├── .gitignore ├── LICENSE ├── README.md ├── app ├── .devcontainer │ ├── devcontainer.json │ └── docker-compose.yml ├── .dockerignore ├── Dockerfile ├── cli │ ├── .editorconfig │ ├── .gitignore │ ├── NOTE-DEBUG.md │ ├── README.md │ ├── bin │ │ ├── run │ │ └── run.cmd │ ├── config.query.yaml │ ├── config.subscription.yaml │ ├── package.json │ ├── src │ │ ├── commands │ │ │ ├── query.ts │ │ │ └── subscription.ts │ │ └── index.ts │ ├── test │ │ ├── commands │ │ │ └── hello.test.ts │ │ ├── mocha.opts │ │ └── tsconfig.json │ ├── tsconfig.json │ └── yarn.lock ├── docker-compose.yaml ├── hasura-bench-report.gif ├── package.json ├── queries │ ├── README.md │ ├── bin │ │ ├── k6 │ │ │ ├── k6 │ │ │ └── loadScript.js │ │ └── wrk │ │ │ ├── graphql-bench.lua │ │ │ ├── json.lua │ │ │ └── wrk │ ├── config.yaml │ ├── example-command.sh │ ├── package.json │ ├── reports │ │ ├── autocannon │ │ │ ├── SearchAlbumsWithArtist-autocannon-500rps.json │ │ │ └── SearchAlbumsWithArtist_2000rps.json │ │ ├── k6 │ │ │ ├── AlbumByPKMultiStage_multistage.json │ │ │ ├── AlbumsArtistTrackGenreAll_10s_max_requests.json │ │ │ ├── SearchAlbumsWithArtist-k6-500rps.json │ │ │ ├── SearchAlbumsWithArtist_10000_fixed_requests.json │ │ │ ├── SearchAlbumsWithArtist_2000rps.json │ │ │ └── SearchAlbumsWithArtist_500rps.json │ │ └── wrk2 │ │ │ └── SearchAlbumsWithArtist-wrk2-500rps.json │ ├── src │ │ ├── PreciseHdrHistogram.ts │ │ ├── executors │ │ │ ├── autocannon │ │ │ │ ├── index.ts │ │ │ │ └── types.ts │ │ │ ├── base │ │ │ │ ├── index.ts │ │ │ │ └── types.ts │ │ │ ├── k6 │ │ │ │ ├── index.ts │ │ │ │ └── types.ts │ │ │ ├── reports │ │ │ │ └── k6 │ │ │ │ │ └── SearchAlbumsWithArtist_2000rps.json │ │ │ └── wrk2 │ │ │ │ ├── index.ts │ │ │ │ └── types.ts │ │ ├── main.ts │ │ ├── tests.ts │ │ └── utils.ts │ ├── tsconfig.json │ └── yarn.lock ├── subscriptions │ ├── .gitignore │ ├── README.md │ ├── example-stdout-output.png │ ├── notes.md │ ├── package.json │ ├── src │ │ ├── example_latency_query.sql │ │ ├── main.ts │ │ ├── migrations │ │ │ └── events.ts │ │ ├── run.ts │ │ ├── schema.ts │ │ ├── tests.ts │ │ └── utils.ts │ ├── test.sh │ ├── tsconfig.json │ └── yarn.lock ├── web-app │ ├── README.md │ ├── index.html │ ├── index.js │ └── sample-output-report.json └── yarn.lock ├── containers ├── Chinook_SqlServer.sql ├── chinook_pg_serial_pk_proper_naming.sql ├── docker-compose.yaml ├── graphql-wait.sh ├── mssql-seed-chinook.sh ├── mssql-update-rows.sh ├── mssql_track_chinook_relationships.json ├── mssql_track_chinook_tables.json ├── psql-seed-chinook.sh ├── psql-setup-events-table.sh ├── psql-update-rows.sh ├── psql-wait.sh ├── psql_track_chinook_relationships.json ├── psql_track_chinook_tables.json ├── setup_events_table.sql └── update-rows.sh ├── docker-run-test ├── config.mssql.subscription.yaml ├── config.query.yaml ├── config.subscription.yaml ├── override-entrypoint-run-shell.sh ├── report.json ├── run-query-bench-docker.sh └── run-subscription-bench-docker.sh ├── makefile ├── readme_images ├── autocannon-output.png ├── autocannon-report.png ├── k6-output.png ├── k6s-report.png ├── npx-serve-output.png └── serve-index.png └── reports └── start-cadvisor.sh /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | lerna-debug.log* 8 | 9 | # Diagnostic reports (https://nodejs.org/api/report.html) 10 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 11 | 12 | # Runtime data 13 | pids 14 | *.pid 15 | *.seed 16 | *.pid.lock 17 | 18 | # Directory for instrumented libs generated by jscoverage/JSCover 19 | lib-cov 20 | 21 | # Coverage directory used by tools like istanbul 22 | coverage 23 | *.lcov 24 | 25 | # nyc test coverage 26 | .nyc_output 27 | 28 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 29 | .grunt 30 | 31 | # Bower dependency directory (https://bower.io/) 32 | bower_components 33 | 34 | # node-waf configuration 35 | .lock-wscript 36 | 37 | # Compiled binary addons (https://nodejs.org/api/addons.html) 38 | build/Release 39 | 40 | # Dependency directories 41 | node_modules/ 42 | jspm_packages/ 43 | 44 | # Snowpack dependency directory (https://snowpack.dev/) 45 | web_modules/ 46 | 47 | # TypeScript cache 48 | *.tsbuildinfo 49 | 50 | # Optional npm cache directory 51 | .npm 52 | 53 | # Optional eslint cache 54 | .eslintcache 55 | 56 | # Microbundle cache 57 | .rpt2_cache/ 58 | .rts2_cache_cjs/ 59 | .rts2_cache_es/ 60 | .rts2_cache_umd/ 61 | 62 | # Optional REPL history 63 | .node_repl_history 64 | 65 | # Output of 'npm pack' 66 | *.tgz 67 | 68 | # Yarn Integrity file 69 | .yarn-integrity 70 | 71 | # dotenv environment variables file 72 | .env 73 | .env.test 74 | 75 | # parcel-bundler cache (https://parceljs.org/) 76 | .cache 77 | .parcel-cache 78 | 79 | # Next.js build output 80 | .next 81 | out 82 | 83 | # Nuxt.js build / generate output 84 | .nuxt 85 | dist 86 | 87 | # Gatsby files 88 | .cache/ 89 | # Comment in the public line in if your project uses Gatsby and not Next.js 90 | # https://nextjs.org/blog/next-9-1#public-directory-support 91 | # public 92 | 93 | # vuepress build output 94 | .vuepress/dist 95 | 96 | # Serverless directories 97 | .serverless/ 98 | 99 | # FuseBox cache 100 | .fusebox/ 101 | 102 | # DynamoDB Local files 103 | .dynamodb/ 104 | 105 | # TernJS port file 106 | .tern-port 107 | 108 | # Stores VSCode versions used for testing VSCode extensions 109 | .vscode-test 110 | 111 | # yarn v2 112 | .yarn/cache 113 | .yarn/unplugged 114 | .yarn/build-state.yml 115 | .yarn/install-state.gz 116 | .pnp.* -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /app/.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/vscode-remote/devcontainer.json or this file's README at: 2 | // https://github.com/microsoft/vscode-dev-containers/tree/v0.128.0/containers/docker-existing-docker-compose 3 | // If you want to run as a non-root user in the container, see .devcontainer/docker-compose.yml. 4 | { 5 | "name": "Existing Docker Compose (Extend)", 6 | 7 | // Update the 'dockerComposeFile' list if you have more compose files or use different names. 8 | // The .devcontainer/docker-compose.yml file contains any overrides you need/want to make. 9 | "dockerComposeFile": ["../docker-compose.yaml", "docker-compose.yml"], 10 | 11 | // The 'service' property is the name of the service for the container that VS Code should 12 | // use. Update this value and .devcontainer/docker-compose.yml to the real service name. 13 | "service": "graphql-bench", 14 | 15 | // The optional 'workspaceFolder' property is the path VS Code should open by default when 16 | // connected. This is typically a file mount in .devcontainer/docker-compose.yml 17 | "workspaceFolder": "/app", 18 | 19 | // Set *default* container specific settings.json values on container create. 20 | "settings": { 21 | "terminal.integrated.shell.linux": null 22 | }, 23 | 24 | // Add the IDs of extensions you want installed when the container is created. 25 | "extensions": [] 26 | 27 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 28 | // "forwardPorts": [], 29 | 30 | // Uncomment the next line if you want start specific services in your Docker Compose config. 31 | // "runServices": [], 32 | 33 | // Uncomment the next line if you want to keep your containers running after VS Code shuts down. 34 | // "shutdownAction": "none", 35 | 36 | // Uncomment the next line to run commands after the container is created - for example installing curl. 37 | // "postCreateCommand": "apt-get update && apt-get install -y curl", 38 | 39 | // Uncomment to connect as a non-root user. See https://aka.ms/vscode-remote/containers/non-root. 40 | // "remoteUser": "vscode" 41 | } 42 | -------------------------------------------------------------------------------- /app/.devcontainer/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- #------------------------------------------------------------------------------------------------------------- 2 | 3 | #------------------------------------------------------------------------------------------------------------- 4 | # Copyright (c) Microsoft Corporation. All rights reserved. 5 | # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. 6 | version: '3.7' 7 | services: 8 | # Update this to the name of the service you want to work with in your docker-compose.yml file 9 | graphql-bench: 10 | # If you want add a non-root user to your Dockerfile, you can use the "remoteUser" 11 | # property in devcontainer.json to cause VS Code its sub-processes (terminals, tasks, 12 | # debugging) to execute as the user. Uncomment the next line if you want the entire 13 | # container to run as this user instead. Note that, on Linux, you may need to 14 | # ensure the UID and GID of the container user you create matches your local user. 15 | # See https://aka.ms/vscode-remote/containers/non-root for details. 16 | # 17 | # user: vscode 18 | 19 | # Uncomment if you want to override the service's Dockerfile to one in the .devcontainer 20 | # folder. Note that the path of the Dockerfile and context is relative to the *primary* 21 | # docker-compose.yml file (the first in the devcontainer.json "dockerComposeFile" 22 | # array). The sample below assumes your primary file is in the root of your project. 23 | # 24 | # build: 25 | # context: . 26 | # dockerfile: .devcontainer/Dockerfile 27 | 28 | # volumes: 29 | # Update this to wherever you want VS Code to mount the folder of your project 30 | # - .:/app:cached 31 | 32 | # Uncomment the next line to use Docker from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker-compose for details. 33 | # - /var/run/docker.sock:/var/run/docker.sock 34 | 35 | # Uncomment the next four lines if you will use a ptrace-based debugger like C++, Go, and Rust. 36 | # cap_add: 37 | # - SYS_PTRACE 38 | # security_opt: 39 | # - seccomp:unconfined 40 | 41 | # Overrides default command so things don't shut down after the process ends. 42 | command: /bin/sh -c "while sleep 1000; do :; done" 43 | -------------------------------------------------------------------------------- /app/.dockerignore: -------------------------------------------------------------------------------- 1 | # No node_modules from any directory 2 | node_modules 3 | **/node_modules 4 | 5 | # Don't copy the local dev wrk and k6 binaries, should use the ones from Dockerfile located at /usr/bin 6 | ./queries/bin/k6/k6 7 | ./queries/bin/wrk/wrk 8 | 9 | # No Markdown files besides README 10 | *.md 11 | !README*.md 12 | 13 | # No images (from README) 14 | *.gif 15 | *.png 16 | *.jpg 17 | -------------------------------------------------------------------------------- /app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:16-alpine AS base 2 | # Store ENV vars for where the place the k6 and wrk2 binaries in the custom built container 3 | ENV k6_binary_path="/usr/bin/k6" 4 | ENV wrk_binary_path="/usr/bin/wrk" 5 | 6 | # Temporary layer which builds wrk2 binary, only used to copy out the binary in final layer for small image 7 | FROM base AS wrk-builder 8 | RUN apk add --update alpine-sdk libgcc openssl-dev zlib-dev \ 9 | && apk add --no-cache git \ 10 | && git clone https://github.com/giltene/wrk2.git \ 11 | && cd wrk2 \ 12 | && make \ 13 | && cp wrk ${wrk_binary_path} \ 14 | && cd .. \ 15 | && rm -rf wrk2 \ 16 | && apk del git alpine-sdk 17 | 18 | # Final layer, copy both wrk2 and k6 binaries and the Node app source code 19 | FROM base AS node-builder 20 | WORKDIR /app 21 | ENV LUA_PATH="/app/queries/bin/wrk/?.lua;;" 22 | COPY --from=loadimpact/k6:0.34.0 /usr/bin/k6 ${k6_binary_path} 23 | COPY --from=wrk-builder ${wrk_binary_path} ${wrk_binary_path} 24 | COPY . . 25 | RUN yarn install 26 | 27 | ENTRYPOINT ["node", "./cli/bin/run"] 28 | -------------------------------------------------------------------------------- /app/cli/.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | indent_style = space 5 | indent_size = 2 6 | charset = utf-8 7 | trim_trailing_whitespace = true 8 | insert_final_newline = true 9 | 10 | [*.md] 11 | trim_trailing_whitespace = false 12 | -------------------------------------------------------------------------------- /app/cli/.gitignore: -------------------------------------------------------------------------------- 1 | *-debug.log 2 | *-error.log 3 | /.nyc_output 4 | /dist 5 | /lib 6 | /package-lock.json 7 | /tmp 8 | node_modules 9 | -------------------------------------------------------------------------------- /app/cli/NOTE-DEBUG.md: -------------------------------------------------------------------------------- 1 | Use this to make Oclif spit out actually useable errors: 2 | ```sh 3 | DEBUG=* ./bin/run query --config="./config.yaml" 4 | ``` -------------------------------------------------------------------------------- /app/cli/README.md: -------------------------------------------------------------------------------- 1 | graphql-bench 2 | ============= 3 | 4 | 5 | 6 | [![oclif](https://img.shields.io/badge/cli-oclif-brightgreen.svg)](https://oclif.io) 7 | [![Version](https://img.shields.io/npm/v/graphql-bench.svg)](https://npmjs.org/package/graphql-bench) 8 | [![Downloads/week](https://img.shields.io/npm/dw/graphql-bench.svg)](https://npmjs.org/package/graphql-bench) 9 | [![License](https://img.shields.io/npm/l/graphql-bench.svg)](https://github.com/GavinRay97/graphql-bench/blob/master/package.json) 10 | 11 | 12 | * [Usage](#usage) 13 | * [Commands](#commands) 14 | 15 | # Usage 16 | 17 | ```sh-session 18 | $ npm install -g graphql-bench 19 | $ graphql-bench COMMAND 20 | running command... 21 | $ graphql-bench (-v|--version|version) 22 | graphql-bench/0.0.0 linux-x64 node-v14.6.0 23 | $ graphql-bench --help [COMMAND] 24 | USAGE 25 | $ graphql-bench COMMAND 26 | ... 27 | ``` 28 | 29 | # Commands 30 | 31 | * [`graphql-bench hello [FILE]`](#graphql-bench-hello-file) 32 | * [`graphql-bench help [COMMAND]`](#graphql-bench-help-command) 33 | 34 | ## `graphql-bench hello [FILE]` 35 | 36 | describe the command here 37 | 38 | ``` 39 | USAGE 40 | $ graphql-bench hello [FILE] 41 | 42 | OPTIONS 43 | -f, --force 44 | -h, --help show CLI help 45 | -n, --name=name name to print 46 | 47 | EXAMPLE 48 | $ graphql-bench hello 49 | hello world from ./src/hello.ts! 50 | ``` 51 | 52 | _See code: [src/commands/hello.ts](https://github.com/GavinRay97/graphql-bench/blob/v0.0.0/src/commands/hello.ts)_ 53 | 54 | ## `graphql-bench help [COMMAND]` 55 | 56 | display help for graphql-bench 57 | 58 | ``` 59 | USAGE 60 | $ graphql-bench help [COMMAND] 61 | 62 | ARGUMENTS 63 | COMMAND command to show help for 64 | 65 | OPTIONS 66 | --all see all commands in CLI 67 | ``` 68 | 69 | _See code: [@oclif/plugin-help](https://github.com/oclif/plugin-help/blob/v3.2.0/src/commands/help.ts)_ 70 | 71 | -------------------------------------------------------------------------------- /app/cli/bin/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | require('@oclif/command').run() 4 | .then(require('@oclif/command/flush')) 5 | .catch(require('@oclif/errors/handle')) 6 | -------------------------------------------------------------------------------- /app/cli/bin/run.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | node "%~dp0\run" %* 4 | -------------------------------------------------------------------------------- /app/cli/config.query.yaml: -------------------------------------------------------------------------------- 1 | url: http://localhost:8085/v1/graphql 2 | headers: 3 | X-Hasura-Admin-Secret: my-secret 4 | queries: 5 | - name: SearchAlbumsWithArtist 6 | tools: [k6, wrk2, autocannon] 7 | execution_strategy: REQUESTS_PER_SECOND 8 | rps: 500 9 | duration: 5s 10 | query: | 11 | query SearchAlbumsWithArtist { 12 | albums(where: {title: {_like: "%Rock%"}}) { 13 | id 14 | title 15 | artist { 16 | name 17 | id 18 | } 19 | } 20 | } 21 | # - name: AlbumByPK 22 | # tools: [autocannon, k6] 23 | # execution_strategy: FIXED_REQUEST_NUMBER 24 | # requests: 10000 25 | # query: | 26 | # query AlbumByPK { 27 | # albums_by_pk(id: 1) { 28 | # id 29 | # title 30 | # } 31 | # } 32 | # - name: AlbumByPKMultiStage 33 | # tools: [k6] 34 | # execution_strategy: MULTI_STAGE 35 | # initial_rps: 0 36 | # stages: 37 | # - duration: 5s 38 | # target: 100 39 | # - duration: 5s 40 | # target: 1000 41 | # query: | 42 | # query AlbumByPK { 43 | # albums_by_pk(id: 1) { 44 | # id 45 | # title 46 | # } 47 | # } 48 | -------------------------------------------------------------------------------- /app/cli/config.subscription.yaml: -------------------------------------------------------------------------------- 1 | url: http://localhost:8085/v1/graphql 2 | db_connection_string: postgres://postgres:postgrespassword@localhost:5430/postgres 3 | headers: 4 | X-Hasura-Admin-Secret: my-secret 5 | config: 6 | label: SearchAlbumsWithArtistUpdated 7 | max_connections: 20 8 | connections_per_second: 10 9 | insert_payload_data: true 10 | query: | 11 | subscription AlbumByIDSubscription($artistIds: [Int!]!) { 12 | albums(where: {artist_id: { _in: $artistIds}}) { 13 | id 14 | title 15 | updated_at 16 | } 17 | } 18 | variables: 19 | # some_value: a_string 20 | # some_range: { start: 1, end: 10 } 21 | # another_range: { start: 50, end: 100 } 22 | # some_number: 10 23 | artistIds: [1, 2, 3, 4] 24 | # some_object: 25 | # a_key: a_value 26 | -------------------------------------------------------------------------------- /app/cli/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "graphql-bench", 3 | "version": "0.0.0", 4 | "author": "Gavin @GavinRay97", 5 | "bin": { 6 | "graphql-bench": "./bin/run" 7 | }, 8 | "bugs": "https://github.com/GavinRay97/graphql-bench/issues", 9 | "dependencies": { 10 | "@oclif/command": "^1", 11 | "@oclif/config": "^1", 12 | "@oclif/plugin-help": "^3", 13 | "tslib": "^1" 14 | }, 15 | "devDependencies": { 16 | "@oclif/dev-cli": "^1", 17 | "@oclif/test": "^1", 18 | "@types/chai": "^4", 19 | "@types/mocha": "^5", 20 | "@types/node": "^10", 21 | "chai": "^4", 22 | "globby": "^10", 23 | "mocha": "^5", 24 | "nyc": "^14", 25 | "ts-node": "^8", 26 | "typescript": "^3.3" 27 | }, 28 | "engines": { 29 | "node": ">=8.0.0" 30 | }, 31 | "files": [ 32 | "/bin", 33 | "/lib", 34 | "/npm-shrinkwrap.json", 35 | "/oclif.manifest.json" 36 | ], 37 | "homepage": "https://github.com/GavinRay97/graphql-bench", 38 | "keywords": [ 39 | "oclif" 40 | ], 41 | "license": "MIT", 42 | "main": "lib/index.js", 43 | "oclif": { 44 | "commands": "./lib/commands", 45 | "bin": "graphql-bench", 46 | "plugins": [ 47 | "@oclif/plugin-help" 48 | ] 49 | }, 50 | "repository": "GavinRay97/graphql-bench", 51 | "scripts": { 52 | "postpack": "rm -f oclif.manifest.json", 53 | "prepack": "rm -rf lib && tsc -b && oclif-dev manifest && oclif-dev readme", 54 | "test": "nyc --extension .ts mocha --forbid-only \"test/**/*.test.ts\"", 55 | "version": "oclif-dev readme && git add README.md" 56 | }, 57 | "types": "lib/index.d.ts" 58 | } 59 | -------------------------------------------------------------------------------- /app/cli/src/commands/query.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path' 2 | import * as fs from 'fs-extra' 3 | import * as yaml from 'js-yaml' 4 | 5 | import { Command, flags } from '@oclif/command' 6 | import { BenchmarkRunner } from '../../../queries/src/main' 7 | import type { GlobalConfig } from '../../../queries/src/executors/base/types' 8 | 9 | export default class Query extends Command { 10 | static description = 'benchmark queries or mutations' 11 | 12 | static examples = [ 13 | `$ graphql-bench query --config ./config.query.yaml --outfile results.json`, 14 | ] 15 | 16 | static flags = { 17 | help: flags.help({ char: 'h' }), 18 | config: flags.string({ 19 | char: 'c', 20 | required: true, 21 | multiple: false, 22 | description: 'Filepath to YAML config file for query benchmarks', 23 | parse: (filepath) => { 24 | const pathToFile = path.join(process.cwd(), filepath) 25 | const configFile = fs.readFileSync(pathToFile, 'utf-8') 26 | return yaml.load(configFile) 27 | }, 28 | }), 29 | outfile: flags.string({ 30 | char: 'o', 31 | required: false, 32 | multiple: false, 33 | description: 'Filepath to output JSON file containing benchmark stats', 34 | }), 35 | url: flags.string({ 36 | required: false, 37 | multiple: false, 38 | description: 'URL to direct graphql queries; may override \'url\' from the YAML config, which is optional if this flag is passed', 39 | }), 40 | query: flags.string({ 41 | required: false, 42 | multiple: false, 43 | description: 'A specific named query to run from the config; if omitted, all queries will be run', 44 | }), 45 | } 46 | 47 | async run() { 48 | const { flags } = this.parse(Query) 49 | 50 | // Oclif, can't figure out how to generically type flags =/ 51 | const config = (flags.config as unknown) as GlobalConfig 52 | if (flags.url) { 53 | // config.url may be omitted, else will be overridden: 54 | config.url = flags.url 55 | } 56 | const executor = new BenchmarkRunner(config) 57 | const results = await executor.runBenchmarks(flags.query) 58 | 59 | if (flags.outfile) { 60 | const pathToOutfile = path.join(process.cwd(), flags.outfile) 61 | fs.outputJSONSync(pathToOutfile, results, { 62 | spaces: 2, 63 | }) 64 | } 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /app/cli/src/commands/subscription.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path' 2 | import * as fs from 'fs-extra' 3 | import * as yaml from 'js-yaml' 4 | 5 | import { Command, flags } from '@oclif/command' 6 | import { main as runSubscriptionBenchmark } from '../../../subscriptions/src/main' 7 | import { SubscriptionBenchConfig } from '../../../subscriptions/src/utils' 8 | 9 | export default class Subscription extends Command { 10 | static description = 'benchmark subscriptions' 11 | 12 | static examples = [ 13 | `$ graphql-bench subscription --config ./config.subscription.yaml`, 14 | ] 15 | 16 | static flags = { 17 | help: flags.help({ char: 'h' }), 18 | config: flags.string({ 19 | char: 'c', 20 | required: true, 21 | multiple: false, 22 | description: 'Filepath to YAML config file for subscription benchmarks', 23 | parse: (filepath) => { 24 | const pathToFile = path.join(process.cwd(), filepath) 25 | const configFile = fs.readFileSync(pathToFile, 'utf-8') 26 | return yaml.load(configFile) 27 | }, 28 | }), 29 | } 30 | 31 | async run() { 32 | const { flags } = this.parse(Subscription) 33 | 34 | await runSubscriptionBenchmark( 35 | (flags.config as unknown) as SubscriptionBenchConfig 36 | ) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /app/cli/src/index.ts: -------------------------------------------------------------------------------- 1 | export { run } from '@oclif/command' 2 | -------------------------------------------------------------------------------- /app/cli/test/commands/hello.test.ts: -------------------------------------------------------------------------------- 1 | import {expect, test} from '@oclif/test' 2 | 3 | describe('hello', () => { 4 | test 5 | .stdout() 6 | .command(['hello']) 7 | .it('runs hello', ctx => { 8 | expect(ctx.stdout).to.contain('hello world') 9 | }) 10 | 11 | test 12 | .stdout() 13 | .command(['hello', '--name', 'jeff']) 14 | .it('runs hello --name jeff', ctx => { 15 | expect(ctx.stdout).to.contain('hello jeff') 16 | }) 17 | }) 18 | -------------------------------------------------------------------------------- /app/cli/test/mocha.opts: -------------------------------------------------------------------------------- 1 | --require ts-node/register 2 | --watch-extensions ts 3 | --recursive 4 | --reporter spec 5 | --timeout 5000 6 | -------------------------------------------------------------------------------- /app/cli/test/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../tsconfig", 3 | "compilerOptions": { 4 | "noEmit": true 5 | }, 6 | "references": [ 7 | {"path": ".."} 8 | ] 9 | } 10 | -------------------------------------------------------------------------------- /app/cli/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "declaration": true, 4 | "importHelpers": true, 5 | "module": "commonjs", 6 | "outDir": "lib", 7 | "rootDirs": ["src", "../queries/src", "../subscriptions/src"], 8 | //"rootDir": "src", 9 | "strict": true, 10 | "target": "es2017", 11 | "esModuleInterop": true 12 | }, 13 | "include": ["src/**/*"] 14 | } 15 | -------------------------------------------------------------------------------- /app/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | services: 4 | graphql-bench: 5 | build: 6 | context: . 7 | dockerfile: Dockerfile 8 | network_mode: "host" 9 | volumes: 10 | - ./queries:/app/queries 11 | - ./subscriptions:/app/subscriptions 12 | # - ./nodemon.json:/home/node/app/nodemon.json 13 | 14 | # expose: 15 | # - "8080" 16 | # ports: 17 | # - "8080:8080" 18 | # - "9229:9229" 19 | # command: npm start 20 | -------------------------------------------------------------------------------- /app/hasura-bench-report.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hasura/graphql-bench/2163a70a5324766521b8d6ceb7a73b8f0d12688d/app/hasura-bench-report.gif -------------------------------------------------------------------------------- /app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "private": true, 3 | "workspaces": [ 4 | "queries", 5 | "subscriptions", 6 | "cli" 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /app/queries/README.md: -------------------------------------------------------------------------------- 1 | # Example `config.yaml`: 2 | 3 | ```yaml 4 | url: 'http://localhost:8085/v1/graphql' 5 | headers: 6 | X-Hasura-Admin-Secret: my-secret 7 | queries: 8 | # Name: Unique name for the query 9 | - name: SearchAlbumsWithArtist 10 | # Tools: List of benchmarking tools to run: ['autocannon', 'k6', 'wrk2'] 11 | tools: [autocannon, k6] 12 | # Execution Strategy: the type of the benchmark to run. Options are: 13 | # REQUESTS_PER_SECOND: Fixed duration, fixed rps. Example parameters: 14 | # duration: 10s 15 | # rps: 500 16 | # FIXED_REQUEST_NUMBER: Complete requests as fast as possible, no duration. Example parameters: 17 | # requests: 10000 18 | # MAX_REQUESTS_IN_DURATION: Make as many requests as possible in duration. Example parameters: 19 | # duration: 10s 20 | # MULTI_STAGE: (K6 only currently) Several stages of REQUESTS_PER_SECOND benchmark. Example parameters: 21 | # initial_rps: 0 22 | # stages: 23 | # - duration: 5s 24 | # target: 100 25 | # - duration: 10s 26 | # target: 1000 27 | # CUSTOM: Pass completely custom options to each tool (see full API spec for all supported options, very large) 28 | execution_strategy: REQUESTS_PER_SECOND 29 | rps: 2000 30 | duration: 10s 31 | connections: 50 32 | query: | 33 | query SearchAlbumsWithArtist { 34 | albums(where: {title: {_like: "%Rock%"}}) { 35 | id 36 | title 37 | artist { 38 | name 39 | id 40 | } 41 | } 42 | } 43 | - name: AlbumByPK 44 | tools: [autocannon, k6] 45 | execution_strategy: FIXED_REQUEST_NUMBER 46 | requests: 10000 47 | query: | 48 | query AlbumByPK { 49 | albums_by_pk(id: 1) { 50 | id 51 | title 52 | } 53 | } 54 | - name: AlbumByPKMultiStage 55 | tools: [k6] 56 | execution_strategy: MULTI_STAGE 57 | initial_rps: 0 58 | stages: 59 | - duration: 5s 60 | target: 100 61 | - duration: 5s 62 | target: 1000 63 | query: | 64 | query AlbumByPK { 65 | albums_by_pk(id: 1) { 66 | id 67 | title 68 | } 69 | } 70 | ``` 71 | 72 | # Custom benchmark config options: 73 | 74 | Each tool has it's own set of unique config options. See below for the full spec and examples. 75 | ```yaml 76 | url: 'http://localhost:8085/v1/graphql' 77 | headers: 78 | X-Hasura-Admin-Secret: my-secret 79 | queries: 80 | - name: SearchAlbumsWithArtist 81 | execution_strategy: CUSTOM 82 | tools: [k6, autocannon] # add the tools that will be added in options 83 | options: 84 | k6: 85 | # /** Discard response bodies. CAREFUL! This causes graphql errors to be ignored */ 86 | # discardResponseBodies?: boolean; 87 | 88 | # /** Third party collector configuration. */ 89 | # ext?: { [name: string]: CollectorOptions }; 90 | 91 | # /** Static hostname mapping. */ 92 | # hosts?: { [name: string]: string }; 93 | 94 | # /** Log all HTTP requests and responses. */ 95 | # httpDebug?: string; 96 | 97 | # /** Disable TLS verification. Insecure. */ 98 | # insecureSkipTLSVerify?: boolean; 99 | 100 | # /** Maximum HTTP redirects to follow. */ 101 | # maxRedirects?: number; 102 | 103 | # /** Minimum test iteration duration. */ 104 | # minIterationDuration?: string; 105 | 106 | # /** Disable keepalive connections. */ 107 | # noConnectionReuse?: boolean; 108 | 109 | # /** Disable usage reports. */ 110 | # noUsageReport?: boolean; 111 | 112 | # /** Disable cross-VU TCP connection reuse. */ 113 | # noVUConnectionReuse?: boolean; 114 | 115 | # /** Maximum requests per second across all VUs. */ 116 | # rps?: number; 117 | 118 | # /** Setup function timeout. */ 119 | # setupTimeout?: string; 120 | 121 | # /** Define stats for trend metrics. */ 122 | # summaryTrendStats?: string[]; 123 | 124 | # /** Which system tags to include in collected metrics. */ 125 | # systemTags?: string[]; 126 | 127 | # /** Tags to set test wide across all metrics. */ 128 | # tags?: { [name: string]: string }; 129 | 130 | # /** Teardown function timeout. */ 131 | # teardownTimeout?: string; 132 | 133 | # /** Threshold specifications. Defines pass and fail conditions. */ 134 | # thresholds?: { [name: string]: Threshold[] }; 135 | 136 | # /** Throw error on failed HTTP request. */ 137 | # throw?: boolean; 138 | 139 | # /** TLS client certificates. */ 140 | # tlsAuth?: Certificate[]; 141 | 142 | # /** Allowed TLS cipher suites. */ 143 | # tlsCipherSuites?: CipherSuite[]; 144 | 145 | # /** Allowed TLS version. Use `http.SSL_*` `http.TLS_*` constants. */ 146 | # tlsVersion?: string | { min: string; max: string }; 147 | 148 | # /** User agent string to include in HTTP requests. */ 149 | # userAgent?: string; 150 | 151 | # scenarios 152 | scenarios: 153 | main: { 154 | executor: 'constant-arrival-rate', 155 | rate: 200, timeUnit: '1s', # 200 requests per second, i.e. 1.5 RPS 156 | duration: '30s', 157 | preAllocatedVUs: 10, # the size of the VU (i.e. worker) pool for this scenario 158 | } 159 | autocannon: 160 | # /** 161 | # * The number of concurrent connections. 162 | # * @default 10 163 | # */ 164 | # connections?: number; 165 | 166 | # /** 167 | # * The number of seconds to run the autocannon. 168 | # * Can be a [timestring](https://www.npmjs.com/package/timestring). 169 | # * @default 10 170 | # */ 171 | # duration?: number | string; 172 | 173 | # /** 174 | # * A `Number` stating the amount of requests to make before ending the test. 175 | # * This overrides duration and takes precedence, so the test won't end 176 | # * until the amount of requests needed to be completed are completed. 177 | # */ 178 | # amount?: number; 179 | 180 | # /** 181 | # * The number of seconds to wait for a response before timeout. 182 | # * @default 10 183 | # */ 184 | # timeout?: number; 185 | 186 | # /** 187 | # * The number of [pipelined requests](https://en.wikipedia.org/wiki/HTTP_pipelining) for each connection. 188 | # * Will cause the `Client` API to throw when greater than 1 189 | # * @default 1 190 | # */ 191 | # pipelining?: number; 192 | 193 | # /** 194 | # * The threshold of the number of errors when making the requests to the server before this instance bail's out. 195 | # * This instance will take all existing results so far and aggregate them into the results. 196 | # * If none passed here, the instance will ignore errors and never bail out. 197 | # */ 198 | # bailout?: number; 199 | 200 | # /** 201 | # * The http method to use. 202 | # * @default 'GET' 203 | # */ 204 | # method?: Request['method']; 205 | 206 | # /** 207 | # * A `String` to be added to the results for identification. 208 | # */ 209 | # title?: string; 210 | 211 | # /** 212 | # * An `Object` containing the headers of the request. 213 | # * @default {} 214 | # */ 215 | # headers?: Request['headers']; 216 | 217 | # /** 218 | # * A `Number` stating the max requests to make per connection. 219 | # * `amount` takes precedence if both are set. 220 | # */ 221 | # maxConnectionRequests?: number; 222 | 223 | # /** 224 | # * A `Number` stating the max requests to make overall. 225 | # * Can't be less than `connections`. 226 | # */ 227 | # maxOverallRequests?: number; 228 | 229 | # /** 230 | # * A `Number` stating the rate of requests to make per second from each individual connection. 231 | # * No rate limiting by default. 232 | # */ 233 | # connectionRate?: number; 234 | 235 | # /** 236 | # * A `Number` stating the rate of requests to make per second from all connections. 237 | # * `connectionRate` takes precedence if both are set. 238 | # * No rate limiting by default. 239 | # */ 240 | # overallRate?: number; 241 | 242 | # /** 243 | # * A `Number` which makes the individual connections disconnect and reconnect to the server 244 | # * whenever it has sent that number of requests. 245 | # */ 246 | # reconnectRate?: number; 247 | 248 | # /** 249 | # * An `Array` of `Objects` which represents the sequence of requests to make while benchmarking. 250 | # * Can be used in conjunction with the `body`, `headers` and `method` params above. 251 | # * 252 | # * The `Objects` in this array can have `body`, `headers`, `method`, or `path` attributes, which overwrite those that are passed in this `opts` object. 253 | # * Therefore, the ones in this (`opts`) object take precedence and should be viewed as defaults. 254 | # */ 255 | # requests?: Request[]; 256 | 257 | # /** 258 | # * A `Boolean` which enables the replacement of `[]` tags within the request body with a randomly generated ID, 259 | # * allowing for unique fields to be sent with requests. 260 | # * @default false 261 | # */ 262 | # idReplacement?: boolean; 263 | 264 | # /** 265 | # * A `Boolean` which allows you to setup an instance of autocannon that restarts indefinitely after emiting results with the `done` event. 266 | # * Useful for efficiently restarting your instance. To stop running forever, you must cause a `SIGINT` or call the `.stop()` function on your instance. 267 | # * @default false 268 | # */ 269 | # forever?: boolean; 270 | 271 | # /** 272 | # * A `String` identifying the server name for the SNI (Server Name Indication) TLS extension. 273 | # */ 274 | # servername?: string; 275 | 276 | # /** 277 | # * A `Boolean` which allows you to disable tracking non 2xx code responses in latency and bytes per second calculations. 278 | # * @default false 279 | # */ 280 | # excludeErrorStats?: boolean; 281 | query: | 282 | query SearchAlbumsWithArtist { 283 | albums(where: {title: {_like: "%Rock%"}}) { 284 | id 285 | title 286 | artist { 287 | name 288 | id 289 | } 290 | } 291 | } 292 | ``` 293 | -------------------------------------------------------------------------------- /app/queries/bin/k6/k6: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hasura/graphql-bench/2163a70a5324766521b8d6ceb7a73b8f0d12688d/app/queries/bin/k6/k6 -------------------------------------------------------------------------------- /app/queries/bin/k6/loadScript.js: -------------------------------------------------------------------------------- 1 | import http from 'k6/http' 2 | import { check } from 'k6' 3 | 4 | export default function () { 5 | let { url, headers, query, variables } = __ENV 6 | 7 | // Can't pass nested JSON in config file, need to parse here because stringified 8 | if (headers) headers = JSON.parse(headers) 9 | if (variables) variables = JSON.parse(variables) 10 | 11 | // Prepare query & variables (if provided) 12 | let body = JSON.stringify({ query, variables }) 13 | 14 | // Send the request 15 | let res = http.post(url, body, { headers }) 16 | 17 | // Run assertions on status, errors in body, optionally results count 18 | let check_cxt = { 'is status 200': (r) => r.status === 200, } 19 | if (res.body) { // unavailable if discardResponseBodies: true 20 | check_cxt['no error in body'] = (r) => Boolean(r.json('errors')) == false 21 | } 22 | check(res, check_cxt) 23 | } 24 | -------------------------------------------------------------------------------- /app/queries/bin/wrk/graphql-bench.lua: -------------------------------------------------------------------------------- 1 | -- init = function(args) 2 | -- request = function() 3 | -- response = function(status, headers, body) 4 | -- done = function(summary, latency, requests) 5 | json = require "json" 6 | 7 | -- Set the default HTTP method and empty "headers" table here 8 | wrk.method = "POST" 9 | wrk.headers = {} 10 | 11 | function tprint (tbl, indent) 12 | if not indent then indent = 0 end 13 | for k, v in pairs(tbl) do 14 | formatting = string.rep(" ", indent) .. k .. ": " 15 | if type(v) == "table" then 16 | print(formatting) 17 | tprint(v, indent+1) 18 | elseif type(v) == 'boolean' then 19 | print(formatting .. tostring(v)) 20 | else 21 | print(formatting .. v) 22 | end 23 | end 24 | end 25 | 26 | function print_params(params_table) 27 | print('========') 28 | print('[PARAMS]') 29 | tprint(params_table) 30 | print('========') 31 | end 32 | 33 | function print_wrk_config() 34 | local _wrk = { 35 | scheme = wrk.scheme, 36 | host = wrk.host, 37 | port = wrk.port, 38 | method = wrk.method, 39 | headers = wrk.headers, 40 | body = wrk.body 41 | } 42 | print('-----') 43 | print('[WRK CONFIG]') 44 | tprint(_wrk) 45 | print('-----') 46 | end 47 | 48 | function init(args) 49 | url, params = args[0], args[1] 50 | -- print('url', url) 51 | -- print('params', params) 52 | if not params then print('ERROR: NO PARAMS PASSED TO WRK2') end 53 | 54 | params = json.decode(params) 55 | --print_params(params) 56 | 57 | if params['headers'] ~= nil then 58 | for header, val in pairs(params['headers']) do 59 | wrk.headers[header] = val 60 | end 61 | end 62 | 63 | wrk.body = json.encode({ 64 | query = params['query'], 65 | variables = params['variables'], 66 | }) 67 | 68 | --print_wrk_config() 69 | end 70 | 71 | function request() 72 | return wrk.request() 73 | end 74 | 75 | function format_summary_to_json(summary, latency) 76 | local stats = { 77 | requests = summary.requests, 78 | duration_in_milliseconds = summary.duration / 1000, 79 | bytes = summary.bytes, 80 | -- 1e6 = 1,000,000 81 | requests_per_second = (summary.requests/summary.duration) * 1e6, 82 | bytes_transfer_per_second = (summary.bytes/summary.duration) * 1e6, 83 | } 84 | 85 | local latency_aggregate = { 86 | min = latency.min / 1000, 87 | max = latency.max / 1000, 88 | mean = latency.mean / 1000, 89 | stdev = latency.stdev / 1000, 90 | } 91 | 92 | local latency_distribution = {} 93 | for idx, p in ipairs({ 50, 75, 90, 95, 97.5, 99, 99.9, 99.99, 99.999, 100 }) do 94 | n = latency:percentile(p) 95 | latency_distribution[idx] = { percentile = p, latency_in_milliseconds = n / 1000 } 96 | end 97 | 98 | stats.latency_aggregate = latency_aggregate 99 | stats.latency_distribution = latency_distribution 100 | 101 | json_stats = json.encode(stats) 102 | return stats, json_stats 103 | end 104 | 105 | function file_exists(filename) 106 | local file = io.open(filename, "rb") 107 | if file then file:close() end 108 | return file ~= nil 109 | end 110 | 111 | function read_file(filename) 112 | local file = assert(io.open(filename, "r")) 113 | local text = file:read("*all") 114 | file:close() 115 | end 116 | 117 | function write_file(filename, content) 118 | local file = assert(io.open(filename, "w")) 119 | file:write(content) 120 | file:close() 121 | end 122 | 123 | function done(summary, latency, requests) 124 | stats_table, json_stats = format_summary_to_json(summary, latency) 125 | io.stderr:write(json_stats) 126 | -- Commenting out this file write, just grab it and parse it from stderr for now 127 | -- write_file('/tmp/wrk2-stats.json', json_stats) 128 | end 129 | 130 | -- function wrk.format(method, path, headers, body) 131 | -- wrk.format returns a HTTP request string containing the passed 132 | -- parameters merged with values from the wrk table. 133 | 134 | -- global init -- function called when the thread is initialized 135 | -- global request -- function returning the HTTP message for each request 136 | -- global response -- optional function called with HTTP response data 137 | -- global done -- optional function called with results of run 138 | -------------------------------------------------------------------------------- /app/queries/bin/wrk/json.lua: -------------------------------------------------------------------------------- 1 | -- 2 | -- json.lua 3 | -- 4 | -- Copyright (c) 2020 rxi 5 | -- 6 | -- Permission is hereby granted, free of charge, to any person obtaining a copy of 7 | -- this software and associated documentation files (the "Software"), to deal in 8 | -- the Software without restriction, including without limitation the rights to 9 | -- use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 10 | -- of the Software, and to permit persons to whom the Software is furnished to do 11 | -- so, subject to the following conditions: 12 | -- 13 | -- The above copyright notice and this permission notice shall be included in all 14 | -- copies or substantial portions of the Software. 15 | -- 16 | -- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | -- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | -- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | -- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | -- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | -- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | -- SOFTWARE. 23 | -- 24 | 25 | local json = { _version = "0.1.2" } 26 | 27 | ------------------------------------------------------------------------------- 28 | -- Encode 29 | ------------------------------------------------------------------------------- 30 | 31 | local encode 32 | 33 | local escape_char_map = { 34 | [ "\\" ] = "\\", 35 | [ "\"" ] = "\"", 36 | [ "\b" ] = "b", 37 | [ "\f" ] = "f", 38 | [ "\n" ] = "n", 39 | [ "\r" ] = "r", 40 | [ "\t" ] = "t", 41 | } 42 | 43 | local escape_char_map_inv = { [ "/" ] = "/" } 44 | for k, v in pairs(escape_char_map) do 45 | escape_char_map_inv[v] = k 46 | end 47 | 48 | 49 | local function escape_char(c) 50 | return "\\" .. (escape_char_map[c] or string.format("u%04x", c:byte())) 51 | end 52 | 53 | 54 | local function encode_nil(val) 55 | return "null" 56 | end 57 | 58 | 59 | local function encode_table(val, stack) 60 | local res = {} 61 | stack = stack or {} 62 | 63 | -- Circular reference? 64 | if stack[val] then error("circular reference") end 65 | 66 | stack[val] = true 67 | 68 | if rawget(val, 1) ~= nil or next(val) == nil then 69 | -- Treat as array -- check keys are valid and it is not sparse 70 | local n = 0 71 | for k in pairs(val) do 72 | if type(k) ~= "number" then 73 | error("invalid table: mixed or invalid key types") 74 | end 75 | n = n + 1 76 | end 77 | if n ~= #val then 78 | error("invalid table: sparse array") 79 | end 80 | -- Encode 81 | for i, v in ipairs(val) do 82 | table.insert(res, encode(v, stack)) 83 | end 84 | stack[val] = nil 85 | return "[" .. table.concat(res, ",") .. "]" 86 | 87 | else 88 | -- Treat as an object 89 | for k, v in pairs(val) do 90 | if type(k) ~= "string" then 91 | error("invalid table: mixed or invalid key types") 92 | end 93 | table.insert(res, encode(k, stack) .. ":" .. encode(v, stack)) 94 | end 95 | stack[val] = nil 96 | return "{" .. table.concat(res, ",") .. "}" 97 | end 98 | end 99 | 100 | 101 | local function encode_string(val) 102 | return '"' .. val:gsub('[%z\1-\31\\"]', escape_char) .. '"' 103 | end 104 | 105 | 106 | local function encode_number(val) 107 | -- Check for NaN, -inf and inf 108 | if val ~= val or val <= -math.huge or val >= math.huge then 109 | error("unexpected number value '" .. tostring(val) .. "'") 110 | end 111 | return string.format("%.14g", val) 112 | end 113 | 114 | 115 | local type_func_map = { 116 | [ "nil" ] = encode_nil, 117 | [ "table" ] = encode_table, 118 | [ "string" ] = encode_string, 119 | [ "number" ] = encode_number, 120 | [ "boolean" ] = tostring, 121 | } 122 | 123 | 124 | encode = function(val, stack) 125 | local t = type(val) 126 | local f = type_func_map[t] 127 | if f then 128 | return f(val, stack) 129 | end 130 | error("unexpected type '" .. t .. "'") 131 | end 132 | 133 | 134 | function json.encode(val) 135 | return ( encode(val) ) 136 | end 137 | 138 | 139 | ------------------------------------------------------------------------------- 140 | -- Decode 141 | ------------------------------------------------------------------------------- 142 | 143 | local parse 144 | 145 | local function create_set(...) 146 | local res = {} 147 | for i = 1, select("#", ...) do 148 | res[ select(i, ...) ] = true 149 | end 150 | return res 151 | end 152 | 153 | local space_chars = create_set(" ", "\t", "\r", "\n") 154 | local delim_chars = create_set(" ", "\t", "\r", "\n", "]", "}", ",") 155 | local escape_chars = create_set("\\", "/", '"', "b", "f", "n", "r", "t", "u") 156 | local literals = create_set("true", "false", "null") 157 | 158 | local literal_map = { 159 | [ "true" ] = true, 160 | [ "false" ] = false, 161 | [ "null" ] = nil, 162 | } 163 | 164 | 165 | local function next_char(str, idx, set, negate) 166 | for i = idx, #str do 167 | if set[str:sub(i, i)] ~= negate then 168 | return i 169 | end 170 | end 171 | return #str + 1 172 | end 173 | 174 | 175 | local function decode_error(str, idx, msg) 176 | local line_count = 1 177 | local col_count = 1 178 | for i = 1, idx - 1 do 179 | col_count = col_count + 1 180 | if str:sub(i, i) == "\n" then 181 | line_count = line_count + 1 182 | col_count = 1 183 | end 184 | end 185 | error( string.format("%s at line %d col %d", msg, line_count, col_count) ) 186 | end 187 | 188 | 189 | local function codepoint_to_utf8(n) 190 | -- http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&id=iws-appendixa 191 | local f = math.floor 192 | if n <= 0x7f then 193 | return string.char(n) 194 | elseif n <= 0x7ff then 195 | return string.char(f(n / 64) + 192, n % 64 + 128) 196 | elseif n <= 0xffff then 197 | return string.char(f(n / 4096) + 224, f(n % 4096 / 64) + 128, n % 64 + 128) 198 | elseif n <= 0x10ffff then 199 | return string.char(f(n / 262144) + 240, f(n % 262144 / 4096) + 128, 200 | f(n % 4096 / 64) + 128, n % 64 + 128) 201 | end 202 | error( string.format("invalid unicode codepoint '%x'", n) ) 203 | end 204 | 205 | 206 | local function parse_unicode_escape(s) 207 | local n1 = tonumber( s:sub(1, 4), 16 ) 208 | local n2 = tonumber( s:sub(7, 10), 16 ) 209 | -- Surrogate pair? 210 | if n2 then 211 | return codepoint_to_utf8((n1 - 0xd800) * 0x400 + (n2 - 0xdc00) + 0x10000) 212 | else 213 | return codepoint_to_utf8(n1) 214 | end 215 | end 216 | 217 | 218 | local function parse_string(str, i) 219 | local res = "" 220 | local j = i + 1 221 | local k = j 222 | 223 | while j <= #str do 224 | local x = str:byte(j) 225 | 226 | if x < 32 then 227 | decode_error(str, j, "control character in string") 228 | 229 | elseif x == 92 then -- `\`: Escape 230 | res = res .. str:sub(k, j - 1) 231 | j = j + 1 232 | local c = str:sub(j, j) 233 | if c == "u" then 234 | local hex = str:match("^[dD][89aAbB]%x%x\\u%x%x%x%x", j + 1) 235 | or str:match("^%x%x%x%x", j + 1) 236 | or decode_error(str, j - 1, "invalid unicode escape in string") 237 | res = res .. parse_unicode_escape(hex) 238 | j = j + #hex 239 | else 240 | if not escape_chars[c] then 241 | decode_error(str, j - 1, "invalid escape char '" .. c .. "' in string") 242 | end 243 | res = res .. escape_char_map_inv[c] 244 | end 245 | k = j + 1 246 | 247 | elseif x == 34 then -- `"`: End of string 248 | res = res .. str:sub(k, j - 1) 249 | return res, j + 1 250 | end 251 | 252 | j = j + 1 253 | end 254 | 255 | decode_error(str, i, "expected closing quote for string") 256 | end 257 | 258 | 259 | local function parse_number(str, i) 260 | local x = next_char(str, i, delim_chars) 261 | local s = str:sub(i, x - 1) 262 | local n = tonumber(s) 263 | if not n then 264 | decode_error(str, i, "invalid number '" .. s .. "'") 265 | end 266 | return n, x 267 | end 268 | 269 | 270 | local function parse_literal(str, i) 271 | local x = next_char(str, i, delim_chars) 272 | local word = str:sub(i, x - 1) 273 | if not literals[word] then 274 | decode_error(str, i, "invalid literal '" .. word .. "'") 275 | end 276 | return literal_map[word], x 277 | end 278 | 279 | 280 | local function parse_array(str, i) 281 | local res = {} 282 | local n = 1 283 | i = i + 1 284 | while 1 do 285 | local x 286 | i = next_char(str, i, space_chars, true) 287 | -- Empty / end of array? 288 | if str:sub(i, i) == "]" then 289 | i = i + 1 290 | break 291 | end 292 | -- Read token 293 | x, i = parse(str, i) 294 | res[n] = x 295 | n = n + 1 296 | -- Next token 297 | i = next_char(str, i, space_chars, true) 298 | local chr = str:sub(i, i) 299 | i = i + 1 300 | if chr == "]" then break end 301 | if chr ~= "," then decode_error(str, i, "expected ']' or ','") end 302 | end 303 | return res, i 304 | end 305 | 306 | 307 | local function parse_object(str, i) 308 | local res = {} 309 | i = i + 1 310 | while 1 do 311 | local key, val 312 | i = next_char(str, i, space_chars, true) 313 | -- Empty / end of object? 314 | if str:sub(i, i) == "}" then 315 | i = i + 1 316 | break 317 | end 318 | -- Read key 319 | if str:sub(i, i) ~= '"' then 320 | decode_error(str, i, "expected string for key") 321 | end 322 | key, i = parse(str, i) 323 | -- Read ':' delimiter 324 | i = next_char(str, i, space_chars, true) 325 | if str:sub(i, i) ~= ":" then 326 | decode_error(str, i, "expected ':' after key") 327 | end 328 | i = next_char(str, i + 1, space_chars, true) 329 | -- Read value 330 | val, i = parse(str, i) 331 | -- Set 332 | res[key] = val 333 | -- Next token 334 | i = next_char(str, i, space_chars, true) 335 | local chr = str:sub(i, i) 336 | i = i + 1 337 | if chr == "}" then break end 338 | if chr ~= "," then decode_error(str, i, "expected '}' or ','") end 339 | end 340 | return res, i 341 | end 342 | 343 | 344 | local char_func_map = { 345 | [ '"' ] = parse_string, 346 | [ "0" ] = parse_number, 347 | [ "1" ] = parse_number, 348 | [ "2" ] = parse_number, 349 | [ "3" ] = parse_number, 350 | [ "4" ] = parse_number, 351 | [ "5" ] = parse_number, 352 | [ "6" ] = parse_number, 353 | [ "7" ] = parse_number, 354 | [ "8" ] = parse_number, 355 | [ "9" ] = parse_number, 356 | [ "-" ] = parse_number, 357 | [ "t" ] = parse_literal, 358 | [ "f" ] = parse_literal, 359 | [ "n" ] = parse_literal, 360 | [ "[" ] = parse_array, 361 | [ "{" ] = parse_object, 362 | } 363 | 364 | 365 | parse = function(str, idx) 366 | local chr = str:sub(idx, idx) 367 | local f = char_func_map[chr] 368 | if f then 369 | return f(str, idx) 370 | end 371 | decode_error(str, idx, "unexpected character '" .. chr .. "'") 372 | end 373 | 374 | 375 | function json.decode(str) 376 | if type(str) ~= "string" then 377 | error("expected argument of type string, got " .. type(str)) 378 | end 379 | local res, idx = parse(str, next_char(str, 1, space_chars, true)) 380 | idx = next_char(str, idx, space_chars, true) 381 | if idx <= #str then 382 | decode_error(str, idx, "trailing garbage") 383 | end 384 | return res 385 | end 386 | 387 | 388 | return json -------------------------------------------------------------------------------- /app/queries/bin/wrk/wrk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hasura/graphql-bench/2163a70a5324766521b8d6ceb7a73b8f0d12688d/app/queries/bin/wrk/wrk -------------------------------------------------------------------------------- /app/queries/config.yaml: -------------------------------------------------------------------------------- 1 | url: http://localhost:8085/v1/graphql 2 | headers: 3 | X-Hasura-Admin-Secret: my-secret 4 | queries: 5 | - name: SearchAlbumsWithArtist 6 | tools: [wrk2, k6, autocannon] 7 | execution_strategy: REQUESTS_PER_SECOND 8 | rps: 500 9 | duration: 60s 10 | query: | 11 | query SearchAlbumsWithArtist { 12 | albums(where: {title: {_like: "%Rock%"}}) { 13 | id 14 | title 15 | artist { 16 | name 17 | id 18 | } 19 | } 20 | } 21 | # - name: AlbumByPK 22 | # tools: [autocannon, k6] 23 | # execution_strategy: FIXED_REQUEST_NUMBER 24 | # requests: 10000 25 | # query: | 26 | # query AlbumByPK { 27 | # albums_by_pk(id: 1) { 28 | # id 29 | # title 30 | # } 31 | # } 32 | # - name: AlbumByPKMultiStage 33 | # tools: [k6] 34 | # execution_strategy: MULTI_STAGE 35 | # initial_rps: 0 36 | # stages: 37 | # - duration: 5s 38 | # target: 100 39 | # - duration: 5s 40 | # target: 1000 41 | # query: | 42 | # query AlbumByPK { 43 | # albums_by_pk(id: 1) { 44 | # id 45 | # title 46 | # } 47 | # } 48 | -------------------------------------------------------------------------------- /app/queries/example-command.sh: -------------------------------------------------------------------------------- 1 | yarn --silent run-benchmarks >test.json 2 | -------------------------------------------------------------------------------- /app/queries/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "graphql-query-bench", 3 | "version": "1.0.0", 4 | "main": "index.js", 5 | "author": "Gavin", 6 | "license": "MIT", 7 | "scripts": { 8 | "run-benchmarks": "ts-node src/benchmarkExecutor.ts >/dev/tty && cat combined_output.json" 9 | }, 10 | "dependencies": { 11 | "@types/fs-extra": "^9.0.1", 12 | "@types/js-yaml": "^3.12.5", 13 | "@types/k6": "^0.26.1", 14 | "@types/node-fetch": "^2.5.10", 15 | "autocannon": "^4.6.0", 16 | "execa": "^4.0.3", 17 | "fs-extra": "^9.0.1", 18 | "hdr-histogram-js": "^2.0.0-beta6", 19 | "js-yaml": "^3.14.0", 20 | "lookpath": "^1.1.0", 21 | "node-fetch": "^2.5.10" 22 | }, 23 | "devDependencies": { 24 | "@types/autocannon": "^4.1.0" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /app/queries/reports/autocannon/SearchAlbumsWithArtist-autocannon-500rps.json: -------------------------------------------------------------------------------- 1 | {"url":"http://localhost:8085/v1/graphql","requests":{"average":502.2,"mean":502.2,"stddev":1.47,"min":501,"max":505,"total":2511,"p0_001":501,"p0_01":501,"p0_1":501,"p1":501,"p2_5":501,"p10":501,"p25":501,"p50":502,"p75":502,"p90":505,"p97_5":505,"p99":505,"p99_9":505,"p99_99":505,"p99_999":505,"sent":3011},"latency":{"average":1.28,"mean":1.28,"stddev":1.41,"min":0,"max":12.607803,"p0_001":0,"p0_01":0,"p0_1":0,"p1":0,"p2_5":0,"p10":0,"p25":1,"p50":1,"p75":1,"p90":3,"p97_5":5,"p99":7,"p99_9":11,"p99_99":12,"p99_999":12},"throughput":{"average":451968,"mean":451968,"stddev":1325.28,"min":450900,"max":454500,"total":2259900,"p0_001":451071,"p0_01":451071,"p0_1":451071,"p1":451071,"p2_5":451071,"p10":451071,"p25":451071,"p50":451839,"p75":451839,"p90":454655,"p97_5":454655,"p99":454655,"p99_9":454655,"p99_99":454655,"p99_999":454655},"errors":0,"timeouts":0,"mismatches":0,"duration":5.05,"start":"2020-08-12T15:21:31.686Z","finish":"2020-08-12T15:21:36.736Z","connections":10,"pipelining":1,"non2xx":0,"1xx":0,"2xx":2511,"3xx":0,"4xx":0,"5xx":0} 2 | -------------------------------------------------------------------------------- /app/queries/reports/autocannon/SearchAlbumsWithArtist_2000rps.json: -------------------------------------------------------------------------------- 1 | {"url":"http://localhost:8085/v1/graphql","requests":{"average":2000.4,"mean":2000.4,"stddev":0.49,"min":2000,"max":2001,"total":10002,"p0_001":2000,"p0_01":2000,"p0_1":2000,"p1":2000,"p2_5":2000,"p10":2000,"p25":2000,"p50":2000,"p75":2001,"p90":2001,"p97_5":2001,"p99":2001,"p99_9":2001,"p99_99":2001,"p99_999":2001,"sent":12002},"latency":{"average":1.03,"mean":1.03,"stddev":1.1,"min":0,"max":12.719859,"p0_001":0,"p0_01":0,"p0_1":0,"p1":0,"p2_5":0,"p10":0,"p25":1,"p50":1,"p75":1,"p90":2,"p97_5":4,"p99":6,"p99_9":10,"p99_99":11,"p99_999":12},"throughput":{"average":1800089.6,"mean":1800089.6,"stddev":501.66,"min":1800000,"max":1800900,"total":9001800,"p0_001":1800191,"p0_01":1800191,"p0_1":1800191,"p1":1800191,"p2_5":1800191,"p10":1800191,"p25":1800191,"p50":1800191,"p75":1801215,"p90":1801215,"p97_5":1801215,"p99":1801215,"p99_9":1801215,"p99_99":1801215,"p99_999":1801215},"errors":0,"timeouts":0,"mismatches":0,"duration":5.05,"start":"2020-08-11T19:52:54.095Z","finish":"2020-08-11T19:52:59.142Z","connections":10,"pipelining":1,"non2xx":0,"1xx":0,"2xx":10002,"3xx":0,"4xx":0,"5xx":0} 2 | -------------------------------------------------------------------------------- /app/queries/reports/k6/AlbumByPKMultiStage_multistage.json: -------------------------------------------------------------------------------- 1 | { 2 | "metrics": { 3 | "checks": { 4 | "fails": 0, 5 | "passes": 5998, 6 | "value": 0 7 | }, 8 | "data_received": { 9 | "count": 956681, 10 | "rate": 95305.56626062887 11 | }, 12 | "data_sent": { 13 | "count": 683772, 14 | "rate": 68118.08497624885 15 | }, 16 | "http_req_blocked": { 17 | "avg": 0.00771433244414805, 18 | "max": 0.622213, 19 | "med": 0.004984, 20 | "min": 0.001106, 21 | "p(90)": 0.007701600000000001, 22 | "p(95)": 0.008756199999999997 23 | }, 24 | "http_req_connecting": { 25 | "avg": 0.0008909646548849617, 26 | "max": 0.311468, 27 | "med": 0, 28 | "min": 0, 29 | "p(90)": 0, 30 | "p(95)": 0 31 | }, 32 | "http_req_duration": { 33 | "avg": 1.17462324474825, 34 | "max": 13.023869, 35 | "med": 1.281998, 36 | "min": 0.319598, 37 | "p(90)": 1.8437460000000003, 38 | "p(95)": 2.0806244 39 | }, 40 | "http_req_receiving": { 41 | "avg": 0.06438281027009, 42 | "max": 0.673957, 43 | "med": 0.070488, 44 | "min": 0.014075, 45 | "p(90)": 0.10387300000000002, 46 | "p(95)": 0.1138705 47 | }, 48 | "http_req_sending": { 49 | "avg": 0.0740931853951316, 50 | "max": 0.250178, 51 | "med": 0.080945, 52 | "min": 0.009322, 53 | "p(90)": 0.1233104, 54 | "p(95)": 0.1321586 55 | }, 56 | "http_req_tls_handshaking": { 57 | "avg": 0, 58 | "max": 0, 59 | "med": 0, 60 | "min": 0, 61 | "p(90)": 0, 62 | "p(95)": 0 63 | }, 64 | "http_req_waiting": { 65 | "avg": 1.0361472490830275, 66 | "max": 12.931826, 67 | "med": 1.115125, 68 | "min": 0.277615, 69 | "p(90)": 1.6357176000000004, 70 | "p(95)": 1.8451715000000002 71 | }, 72 | "http_reqs": { 73 | "count": 2999, 74 | "rate": 298.76353059758264 75 | }, 76 | "iteration_duration": { 77 | "avg": 1.5130963311103687, 78 | "max": 13.314211, 79 | "med": 1.674519, 80 | "min": 0.424206, 81 | "p(90)": 2.3625624000000003, 82 | "p(95)": 2.6613125 83 | }, 84 | "iterations": { 85 | "count": 2999, 86 | "rate": 298.76353059758264 87 | }, 88 | "vus": { 89 | "max": 10, 90 | "min": 10, 91 | "value": 10 92 | }, 93 | "vus_max": { 94 | "max": 10, 95 | "min": 10, 96 | "value": 10 97 | } 98 | }, 99 | "root_group": { 100 | "name": "", 101 | "path": "", 102 | "id": "d41d8cd98f00b204e9800998ecf8427e", 103 | "groups": {}, 104 | "checks": { 105 | "is status 200": { 106 | "name": "is status 200", 107 | "path": "::is status 200", 108 | "id": "548d37ca5f33793206f7832e7cea54fb", 109 | "passes": 2999, 110 | "fails": 0 111 | }, 112 | "no error in body": { 113 | "name": "no error in body", 114 | "path": "::no error in body", 115 | "id": "2304d1de9b435292b5c06f40b4167043", 116 | "passes": 2999, 117 | "fails": 0 118 | } 119 | } 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /app/queries/reports/k6/AlbumsArtistTrackGenreAll_10s_max_requests.json: -------------------------------------------------------------------------------- 1 | { 2 | "metrics": { 3 | "checks": { 4 | "fails": 0, 5 | "passes": 7900, 6 | "value": 0 7 | }, 8 | "data_received": { 9 | "count": 1044885600, 10 | "rate": 103885572.66969061 11 | }, 12 | "data_sent": { 13 | "count": 1678750, 14 | "rate": 166906.21932127603 15 | }, 16 | "http_req_blocked": { 17 | "avg": 0.010237960000000015, 18 | "max": 12.030175, 19 | "med": 0.0034915, 20 | "min": 0.001364, 21 | "p(90)": 0.006101399999999999, 22 | "p(95)": 0.008300849999999999 23 | }, 24 | "http_req_connecting": { 25 | "avg": 0.001402926075949367, 26 | "max": 0.576152, 27 | "med": 0, 28 | "min": 0, 29 | "p(90)": 0, 30 | "p(95)": 0 31 | }, 32 | "http_req_duration": { 33 | "avg": 47.10204647696199, 34 | "max": 164.681963, 35 | "med": 43.806523999999996, 36 | "min": 10.266506, 37 | "p(90)": 75.83881269999999, 38 | "p(95)": 87.43657969999997 39 | }, 40 | "http_req_receiving": { 41 | "avg": 0.7949058774683533, 42 | "max": 31.681142, 43 | "med": 0.209752, 44 | "min": 0.085177, 45 | "p(90)": 1.5640298999999998, 46 | "p(95)": 4.11842319999999 47 | }, 48 | "http_req_sending": { 49 | "avg": 0.08290836000000018, 50 | "max": 26.35348, 51 | "med": 0.0192755, 52 | "min": 0.008655, 53 | "p(90)": 0.032625999999999995, 54 | "p(95)": 0.04252109999999994 55 | }, 56 | "http_req_tls_handshaking": { 57 | "avg": 0, 58 | "max": 0, 59 | "med": 0, 60 | "min": 0, 61 | "p(90)": 0, 62 | "p(95)": 0 63 | }, 64 | "http_req_waiting": { 65 | "avg": 46.224232239493716, 66 | "max": 164.477496, 67 | "med": 42.8990105, 68 | "min": 10.056975, 69 | "p(90)": 74.4910581, 70 | "p(95)": 86.74447079999997 71 | }, 72 | "http_reqs": { 73 | "count": 3950, 74 | "rate": 392.72051605006124 75 | }, 76 | "iteration_duration": { 77 | "avg": 50.698209274683514, 78 | "max": 183.637207, 79 | "med": 47.4730175, 80 | "min": 13.117581, 81 | "p(90)": 79.9288169, 82 | "p(95)": 91.90212169999998 83 | }, 84 | "iterations": { 85 | "count": 3950, 86 | "rate": 392.72051605006124 87 | }, 88 | "vus": { 89 | "max": 20, 90 | "min": 20, 91 | "value": 20 92 | }, 93 | "vus_max": { 94 | "max": 20, 95 | "min": 20, 96 | "value": 20 97 | } 98 | }, 99 | "root_group": { 100 | "name": "", 101 | "path": "", 102 | "id": "d41d8cd98f00b204e9800998ecf8427e", 103 | "groups": {}, 104 | "checks": { 105 | "is status 200": { 106 | "name": "is status 200", 107 | "path": "::is status 200", 108 | "id": "548d37ca5f33793206f7832e7cea54fb", 109 | "passes": 3950, 110 | "fails": 0 111 | }, 112 | "no error in body": { 113 | "name": "no error in body", 114 | "path": "::no error in body", 115 | "id": "2304d1de9b435292b5c06f40b4167043", 116 | "passes": 3950, 117 | "fails": 0 118 | } 119 | } 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /app/queries/reports/k6/SearchAlbumsWithArtist-k6-500rps.json: -------------------------------------------------------------------------------- 1 | { 2 | "metrics": { 3 | "checks": { 4 | "fails": 0, 5 | "passes": 5002, 6 | "value": 0 7 | }, 8 | "data_received": { 9 | "count": 2250900, 10 | "rate": 445922.48824429885 11 | }, 12 | "data_sent": { 13 | "count": 772809, 14 | "rate": 153100.05429720925 15 | }, 16 | "http_req_blocked": { 17 | "avg": 0.005304063174730112, 18 | "max": 0.272195, 19 | "med": 0.003229, 20 | "min": 0.001306, 21 | "p(90)": 0.006288, 22 | "p(95)": 0.007895 23 | }, 24 | "http_req_connecting": { 25 | "avg": 0.00030792123150739703, 26 | "max": 0.15573, 27 | "med": 0, 28 | "min": 0, 29 | "p(90)": 0, 30 | "p(95)": 0 31 | }, 32 | "http_req_duration": { 33 | "avg": 1.4181152187125134, 34 | "max": 11.731741, 35 | "med": 1.271705, 36 | "min": 0.582012, 37 | "p(90)": 2.375526, 38 | "p(95)": 2.591692 39 | }, 40 | "http_req_receiving": { 41 | "avg": 0.05093108436625352, 42 | "max": 1.466971, 43 | "med": 0.042884, 44 | "min": 0.016852, 45 | "p(90)": 0.087178, 46 | "p(95)": 0.098844 47 | }, 48 | "http_req_sending": { 49 | "avg": 0.05888744742103165, 50 | "max": 1.153073, 51 | "med": 0.052606, 52 | "min": 0.010255, 53 | "p(90)": 0.09874, 54 | "p(95)": 0.110653 55 | }, 56 | "http_req_tls_handshaking": { 57 | "avg": 0, 58 | "max": 0, 59 | "med": 0, 60 | "min": 0, 61 | "p(90)": 0, 62 | "p(95)": 0 63 | }, 64 | "http_req_waiting": { 65 | "avg": 1.3082966869252317, 66 | "max": 11.552538, 67 | "med": 1.171217, 68 | "min": 0.536906, 69 | "p(90)": 2.177364, 70 | "p(95)": 2.388689 71 | }, 72 | "http_reqs": { 73 | "count": 2501, 74 | "rate": 495.46943138255426 75 | }, 76 | "iteration_duration": { 77 | "avg": 1.686898317073171, 78 | "max": 12.237789, 79 | "med": 1.505621, 80 | "min": 0.697433, 81 | "p(90)": 2.852051, 82 | "p(95)": 3.141238 83 | }, 84 | "iterations": { 85 | "count": 2501, 86 | "rate": 495.46943138255426 87 | }, 88 | "vus": { 89 | "max": 10, 90 | "min": 10, 91 | "value": 10 92 | }, 93 | "vus_max": { 94 | "max": 10, 95 | "min": 10, 96 | "value": 10 97 | } 98 | }, 99 | "root_group": { 100 | "name": "", 101 | "path": "", 102 | "id": "d41d8cd98f00b204e9800998ecf8427e", 103 | "groups": {}, 104 | "checks": { 105 | "is status 200": { 106 | "name": "is status 200", 107 | "path": "::is status 200", 108 | "id": "548d37ca5f33793206f7832e7cea54fb", 109 | "passes": 2501, 110 | "fails": 0 111 | }, 112 | "no error in body": { 113 | "name": "no error in body", 114 | "path": "::no error in body", 115 | "id": "2304d1de9b435292b5c06f40b4167043", 116 | "passes": 2501, 117 | "fails": 0 118 | } 119 | } 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /app/queries/reports/k6/SearchAlbumsWithArtist_10000_fixed_requests.json: -------------------------------------------------------------------------------- 1 | { 2 | "metrics": { 3 | "checks": { 4 | "fails": 0, 5 | "passes": 20000, 6 | "value": 0 7 | }, 8 | "data_received": { 9 | "count": 9000000, 10 | "rate": 2406792.291962614 11 | }, 12 | "data_sent": { 13 | "count": 3090000, 14 | "rate": 826332.0202404974 15 | }, 16 | "http_req_blocked": { 17 | "avg": 0.0038030054000000083, 18 | "max": 1.674306, 19 | "med": 0.001907, 20 | "min": 0.000901, 21 | "p(90)": 0.003542, 22 | "p(95)": 0.004756099999999998 23 | }, 24 | "http_req_connecting": { 25 | "avg": 0.0001304426, 26 | "max": 0.459505, 27 | "med": 0, 28 | "min": 0, 29 | "p(90)": 0, 30 | "p(95)": 0 31 | }, 32 | "http_req_duration": { 33 | "avg": 3.363693480600015, 34 | "max": 26.317045, 35 | "med": 2.6075635, 36 | "min": 0.570608, 37 | "p(90)": 6.277045800000001, 38 | "p(95)": 8.27420325 39 | }, 40 | "http_req_receiving": { 41 | "avg": 0.07108121310000025, 42 | "max": 11.225144, 43 | "med": 0.030287, 44 | "min": 0.013314, 45 | "p(90)": 0.07014650000000001, 46 | "p(95)": 0.12427534999999992 47 | }, 48 | "http_req_sending": { 49 | "avg": 0.02152808040000004, 50 | "max": 5.998871, 51 | "med": 0.011833, 52 | "min": 0.006439, 53 | "p(90)": 0.021368300000000003, 54 | "p(95)": 0.028180249999999997 55 | }, 56 | "http_req_tls_handshaking": { 57 | "avg": 0, 58 | "max": 0, 59 | "med": 0, 60 | "min": 0, 61 | "p(90)": 0, 62 | "p(95)": 0 63 | }, 64 | "http_req_waiting": { 65 | "avg": 3.2710841871000023, 66 | "max": 24.350404, 67 | "med": 2.527785, 68 | "min": 0.540073, 69 | "p(90)": 6.133292600000001, 70 | "p(95)": 8.090355699999996 71 | }, 72 | "http_reqs": { 73 | "count": 10000, 74 | "rate": 2674.2136577362376 75 | }, 76 | "iteration_duration": { 77 | "avg": 3.658402213299991, 78 | "max": 48.671102, 79 | "med": 2.8530295, 80 | "min": 0.675137, 81 | "p(90)": 6.6975635, 82 | "p(95)": 8.747789699999984 83 | }, 84 | "iterations": { 85 | "count": 10000, 86 | "rate": 2674.2136577362376 87 | }, 88 | "vus": { 89 | "max": 10, 90 | "min": 10, 91 | "value": 10 92 | }, 93 | "vus_max": { 94 | "max": 10, 95 | "min": 10, 96 | "value": 10 97 | } 98 | }, 99 | "root_group": { 100 | "name": "", 101 | "path": "", 102 | "id": "d41d8cd98f00b204e9800998ecf8427e", 103 | "groups": {}, 104 | "checks": { 105 | "is status 200": { 106 | "name": "is status 200", 107 | "path": "::is status 200", 108 | "id": "548d37ca5f33793206f7832e7cea54fb", 109 | "passes": 10000, 110 | "fails": 0 111 | }, 112 | "no error in body": { 113 | "name": "no error in body", 114 | "path": "::no error in body", 115 | "id": "2304d1de9b435292b5c06f40b4167043", 116 | "passes": 10000, 117 | "fails": 0 118 | } 119 | } 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /app/queries/reports/k6/SearchAlbumsWithArtist_2000rps.json: -------------------------------------------------------------------------------- 1 | { 2 | "metrics": { 3 | "checks": { 4 | "fails": 0, 5 | "passes": 19490, 6 | "value": 0 7 | }, 8 | "data_received": { 9 | "count": 8770500, 10 | "rate": 1737564.6271244546 11 | }, 12 | "data_sent": { 13 | "count": 3011205, 14 | "rate": 596563.8553127295 15 | }, 16 | "dropped_iterations": { 17 | "count": 255, 18 | "rate": 50.51923834635836 19 | }, 20 | "http_req_blocked": { 21 | "avg": 0.003697827090815773, 22 | "max": 0.815286, 23 | "med": 0.002188, 24 | "min": 0.001043, 25 | "p(90)": 0.00318, 26 | "p(95)": 0.004219599999999999 27 | }, 28 | "http_req_connecting": { 29 | "avg": 0.00013315700359158546, 30 | "max": 0.314842, 31 | "med": 0, 32 | "min": 0, 33 | "p(90)": 0, 34 | "p(95)": 0 35 | }, 36 | "http_req_duration": { 37 | "avg": 1.1437795475628523, 38 | "max": 14.399807, 39 | "med": 0.869279, 40 | "min": 0.516293, 41 | "p(90)": 1.7828484000000002, 42 | "p(95)": 2.572891599999993 43 | }, 44 | "http_req_receiving": { 45 | "avg": 0.04665413925089808, 46 | "max": 13.119592, 47 | "med": 0.027616, 48 | "min": 0.012053, 49 | "p(90)": 0.04538100000000001, 50 | "p(95)": 0.06497119999999985 51 | }, 52 | "http_req_sending": { 53 | "avg": 0.05086028927655199, 54 | "max": 2.726133, 55 | "med": 0.035664, 56 | "min": 0.008239, 57 | "p(90)": 0.052032600000000026, 58 | "p(95)": 0.08047019999999994 59 | }, 60 | "http_req_tls_handshaking": { 61 | "avg": 0, 62 | "max": 0, 63 | "med": 0, 64 | "min": 0, 65 | "p(90)": 0, 66 | "p(95)": 0 67 | }, 68 | "http_req_waiting": { 69 | "avg": 1.0462651190354044, 70 | "max": 13.775726, 71 | "med": 0.801086, 72 | "min": 0.467876, 73 | "p(90)": 1.592883, 74 | "p(95)": 2.281105999999998 75 | }, 76 | "http_reqs": { 77 | "count": 9745, 78 | "rate": 1930.6273634716163 79 | }, 80 | "iteration_duration": { 81 | "avg": 1.3472799263211903, 82 | "max": 19.281404, 83 | "med": 1.038459, 84 | "min": 0.621622, 85 | "p(90)": 2.1249544000000005, 86 | "p(95)": 2.9531039999999984 87 | }, 88 | "iterations": { 89 | "count": 9745, 90 | "rate": 1930.6273634716163 91 | }, 92 | "vus": { 93 | "max": 10, 94 | "min": 10, 95 | "value": 10 96 | }, 97 | "vus_max": { 98 | "max": 10, 99 | "min": 10, 100 | "value": 10 101 | } 102 | }, 103 | "root_group": { 104 | "name": "", 105 | "path": "", 106 | "id": "d41d8cd98f00b204e9800998ecf8427e", 107 | "groups": {}, 108 | "checks": { 109 | "is status 200": { 110 | "name": "is status 200", 111 | "path": "::is status 200", 112 | "id": "548d37ca5f33793206f7832e7cea54fb", 113 | "passes": 9745, 114 | "fails": 0 115 | }, 116 | "no error in body": { 117 | "name": "no error in body", 118 | "path": "::no error in body", 119 | "id": "2304d1de9b435292b5c06f40b4167043", 120 | "passes": 9745, 121 | "fails": 0 122 | } 123 | } 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /app/queries/reports/k6/SearchAlbumsWithArtist_500rps.json: -------------------------------------------------------------------------------- 1 | { 2 | "metrics": { 3 | "checks": { 4 | "fails": 0, 5 | "passes": 5002, 6 | "value": 0 7 | }, 8 | "data_received": { 9 | "count": 2250900, 10 | "rate": 445863.36350222316 11 | }, 12 | "data_sent": { 13 | "count": 772809, 14 | "rate": 153079.75480242996 15 | }, 16 | "http_req_blocked": { 17 | "avg": 0.005110173930427835, 18 | "max": 0.880602, 19 | "med": 0.003, 20 | "min": 0.001167, 21 | "p(90)": 0.005952, 22 | "p(95)": 0.007475 23 | }, 24 | "http_req_connecting": { 25 | "avg": 0.0003276389444222311, 26 | "max": 0.127106, 27 | "med": 0, 28 | "min": 0, 29 | "p(90)": 0, 30 | "p(95)": 0 31 | }, 32 | "http_req_duration": { 33 | "avg": 1.4410978984406257, 34 | "max": 11.884647, 35 | "med": 1.278623, 36 | "min": 0.59625, 37 | "p(90)": 2.293808, 38 | "p(95)": 2.574437 39 | }, 40 | "http_req_receiving": { 41 | "avg": 0.04664178288684532, 42 | "max": 0.680105, 43 | "med": 0.040958, 44 | "min": 0.013442, 45 | "p(90)": 0.081817, 46 | "p(95)": 0.092152 47 | }, 48 | "http_req_sending": { 49 | "avg": 0.05393678168732506, 50 | "max": 1.649232, 51 | "med": 0.048011, 52 | "min": 0.010127, 53 | "p(90)": 0.091537, 54 | "p(95)": 0.103614 55 | }, 56 | "http_req_tls_handshaking": { 57 | "avg": 0, 58 | "max": 0, 59 | "med": 0, 60 | "min": 0, 61 | "p(90)": 0, 62 | "p(95)": 0 63 | }, 64 | "http_req_waiting": { 65 | "avg": 1.3405193338664565, 66 | "max": 11.812562, 67 | "med": 1.193677, 68 | "min": 0.549902, 69 | "p(90)": 2.116992, 70 | "p(95)": 2.372789 71 | }, 72 | "http_reqs": { 73 | "count": 2501, 74 | "rate": 495.4037372246924 75 | }, 76 | "iteration_duration": { 77 | "avg": 1.6957547449020385, 78 | "max": 12.064122, 79 | "med": 1.509561, 80 | "min": 0.706435, 81 | "p(90)": 2.723371, 82 | "p(95)": 3.047241 83 | }, 84 | "iterations": { 85 | "count": 2501, 86 | "rate": 495.4037372246924 87 | }, 88 | "vus": { 89 | "max": 10, 90 | "min": 10, 91 | "value": 10 92 | }, 93 | "vus_max": { 94 | "max": 10, 95 | "min": 10, 96 | "value": 10 97 | } 98 | }, 99 | "root_group": { 100 | "name": "", 101 | "path": "", 102 | "id": "d41d8cd98f00b204e9800998ecf8427e", 103 | "groups": {}, 104 | "checks": { 105 | "is status 200": { 106 | "name": "is status 200", 107 | "path": "::is status 200", 108 | "id": "548d37ca5f33793206f7832e7cea54fb", 109 | "passes": 2501, 110 | "fails": 0 111 | }, 112 | "no error in body": { 113 | "name": "no error in body", 114 | "path": "::no error in body", 115 | "id": "2304d1de9b435292b5c06f40b4167043", 116 | "passes": 2501, 117 | "fails": 0 118 | } 119 | } 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /app/queries/reports/wrk2/SearchAlbumsWithArtist-wrk2-500rps.json: -------------------------------------------------------------------------------- 1 | { 2 | "latency_aggregate": { 3 | "max": 21.776, 4 | "stdev": 1.2739275682516, 5 | "min": 0.737, 6 | "mean": 3.1087296756107 7 | }, 8 | "duration_in_milliseconds": 5002.475, 9 | "requests": 2499, 10 | "bytes_transfer_per_second": 449597.44926262, 11 | "latency_distribution": [ 12 | { 13 | "percentile": 50, 14 | "latency_in_milliseconds": 3.147 15 | }, 16 | { 17 | "percentile": 75, 18 | "latency_in_milliseconds": 3.559 19 | }, 20 | { 21 | "percentile": 90, 22 | "latency_in_milliseconds": 3.985 23 | }, 24 | { 25 | "percentile": 95, 26 | "latency_in_milliseconds": 4.203 27 | }, 28 | { 29 | "percentile": 97.5, 30 | "latency_in_milliseconds": 4.539 31 | }, 32 | { 33 | "percentile": 99, 34 | "latency_in_milliseconds": 5.067 35 | }, 36 | { 37 | "percentile": 99.9, 38 | "latency_in_milliseconds": 20.191 39 | }, 40 | { 41 | "percentile": 99.99, 42 | "latency_in_milliseconds": 21.791 43 | }, 44 | { 45 | "percentile": 99.999, 46 | "latency_in_milliseconds": 21.791 47 | }, 48 | { 49 | "percentile": 100, 50 | "latency_in_milliseconds": 21.791 51 | } 52 | ], 53 | "bytes": 2249100, 54 | "requests_per_second": 499.55272140291 55 | } 56 | -------------------------------------------------------------------------------- /app/queries/src/PreciseHdrHistogram.ts: -------------------------------------------------------------------------------- 1 | import * as hdr from 'hdr-histogram-js' 2 | import { parseHdrHistogramText } from './executors/base/index' 3 | import { HDRHistogramParsedStats } from './executors/base/types' 4 | 5 | /* A wrapper for Histogram that gets us more precision. See 6 | * https://github.com/HdrHistogram/HdrHistogramJS/issues/35 7 | * 8 | * Values inserted will be truncated to `logBase 10 scalingFactor` (i.e. 4) 9 | * decimal places. 10 | */ 11 | export class PreciseHdrHistogram { 12 | // We'll need to multiply by scalingFactor anything we insert, and divide by scalingFactor 13 | // anything we output from here: 14 | private _histogramDirty: hdr.Histogram 15 | static scalingFactor: number = 10000 16 | 17 | constructor( 18 | request: hdr.BuildRequest 19 | ) { 20 | this._histogramDirty = hdr.build(request) 21 | } 22 | 23 | //// On inputs we multiply... 24 | public recordValue(value: number) { 25 | this._histogramDirty.recordValue(value*PreciseHdrHistogram.scalingFactor) 26 | } 27 | 28 | public recordValueWithCount(value: number, count: number): void { 29 | this._histogramDirty.recordValueWithCount(value*PreciseHdrHistogram.scalingFactor, count) 30 | } 31 | 32 | //// ...and on outputs we divide: 33 | public toJSON(): hdr.HistogramSummary { 34 | let summary = this._histogramDirty.summary 35 | for (let key in summary) { 36 | // scale mean and percentiles (but not counts) back down: 37 | if (key == "totalCount") continue 38 | summary[key] /= PreciseHdrHistogram.scalingFactor 39 | } 40 | 41 | return summary 42 | } 43 | get mean(): number { 44 | return (this._histogramDirty.mean / PreciseHdrHistogram.scalingFactor) 45 | } 46 | get min(): number { 47 | // NOTE: 'minNonZeroValue' is already just 'min' since 0 can't be recorded 48 | return (this._histogramDirty.minNonZeroValue / PreciseHdrHistogram.scalingFactor) 49 | } 50 | get stdDeviation(): number { 51 | return (this._histogramDirty.stdDeviation / PreciseHdrHistogram.scalingFactor) 52 | } 53 | // This is our own helper, where formerly we called: 54 | // parseHdrHistogramText(histogram.outputPercentileDistribution()) 55 | get parsedStats(): HDRHistogramParsedStats[] { 56 | let parsedDirty = parseHdrHistogramText( 57 | this._histogramDirty.outputPercentileDistribution()) 58 | 59 | // scale mean and percentiles (but not counts) back down: 60 | parsedDirty.forEach(function (line) { 61 | // i.e. line.value /= PreciseHdrHistogram.scalingFactor 62 | line.value = String((Number(line.value) / PreciseHdrHistogram.scalingFactor)) 63 | }) 64 | return parsedDirty 65 | } 66 | 67 | // Don't leak implementation in debugging output, which might be confusing: 68 | [Symbol.for("nodejs.util.inspect.custom")]() { 69 | return JSON.stringify(this.toJSON(), null, 2) 70 | } 71 | } 72 | 73 | // Copy-pasted from 'hdr-histogram-js', since this isn't exported 74 | export const defaultRequest: hdr.BuildRequest = { 75 | bitBucketSize: 32, 76 | autoResize: true, 77 | lowestDiscernibleValue: 1, 78 | highestTrackableValue: 2, 79 | // NOTE: the default 'hdr-histogram-js' is 3, but we'll set it to the max of 5 here: 80 | numberOfSignificantValueDigits: 5, 81 | useWebAssembly: false, 82 | } 83 | 84 | export const build = (request = defaultRequest): PreciseHdrHistogram => { 85 | return new PreciseHdrHistogram(request) 86 | } 87 | -------------------------------------------------------------------------------- /app/queries/src/executors/autocannon/index.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * ======================== 3 | * AUTOCANNON 4 | * ======================== 5 | */ 6 | 7 | import { makeBenchmarkMetrics, BenchmarkExecutor } from '../base' 8 | 9 | import { 10 | BenchmarkMetrics, 11 | Benchmark, 12 | BenchmarkTool, 13 | CustomBenchmark, 14 | FixedRequestNumberBenchmark, 15 | MaxRequestsInDurationBenchmark, 16 | MultiStageBenchmark, 17 | RequestsPerSecondBenchmark, 18 | } from '../base/types' 19 | 20 | import { RunAutocannonMetadata } from './types' 21 | 22 | import autocannon from 'autocannon' 23 | import { Options as AutocannonOptions } from 'autocannon' 24 | 25 | import fs from 'fs-extra' 26 | import path from 'path' 27 | import * as hdr from '../../PreciseHdrHistogram' 28 | 29 | export class AutocannonExecutor extends BenchmarkExecutor { 30 | public tool = BenchmarkTool.AUTOCANNON 31 | private reportPath = path.join(this.baseReportPath, 'autocannon') 32 | 33 | private _makeSharedFields(bench: Benchmark): AutocannonOptions { 34 | return { 35 | url: this.config.url, 36 | headers: this.config.headers, 37 | method: 'POST', 38 | connections: bench.connections || 10, 39 | body: JSON.stringify({ 40 | query: bench.query, 41 | variables: bench.variables, 42 | }), 43 | } 44 | } 45 | 46 | public runCustomBench(bench: CustomBenchmark) { 47 | const baseOpts = this._makeSharedFields(bench) 48 | const queryName = this._makeBenchmarkName(bench) 49 | const metadata = { 50 | queryName, 51 | outputFile: `${queryName}.json`, 52 | } 53 | return this._runAutocannon(metadata, { 54 | ...baseOpts, 55 | ...bench.options.autocannon, 56 | }) 57 | } 58 | 59 | public runMultiStageBench(bench: MultiStageBenchmark): never { 60 | throw new Error('Not Implemented') 61 | } 62 | 63 | public runRequestsPerSecondBench(bench: RequestsPerSecondBenchmark) { 64 | const baseOpts = this._makeSharedFields(bench) 65 | const queryName = this._makeBenchmarkName(bench) 66 | const metadata = { 67 | queryName, 68 | outputFile: `${queryName}.json`, 69 | } 70 | return this._runAutocannon(metadata, { 71 | ...baseOpts, 72 | duration: bench.duration, 73 | overallRate: bench.rps, 74 | }) 75 | } 76 | 77 | public runFixedRequestNumberBench(bench: FixedRequestNumberBenchmark) { 78 | const baseOpts = this._makeSharedFields(bench) 79 | const queryName = this._makeBenchmarkName(bench) 80 | const metadata = { 81 | queryName, 82 | outputFile: `${queryName}.json`, 83 | } 84 | return this._runAutocannon(metadata, { 85 | ...baseOpts, 86 | amount: bench.requests, 87 | }) 88 | } 89 | 90 | public runMaxRequestsInDurationBench(bench: MaxRequestsInDurationBenchmark) { 91 | const baseOpts = this._makeSharedFields(bench) 92 | const queryName = this._makeBenchmarkName(bench) 93 | const metadata = { 94 | queryName, 95 | outputFile: `${queryName}.json`, 96 | } 97 | return this._runAutocannon(metadata, { 98 | ...baseOpts, 99 | duration: bench.duration, 100 | }) 101 | } 102 | 103 | private async _runAutocannon( 104 | metadata: RunAutocannonMetadata, 105 | config: AutocannonOptions 106 | ) { 107 | const { queryName, outputFile } = metadata 108 | // If debug, log each response body 109 | if (this.config.debug) 110 | config.setupClient = (client) => client.on('body', console.log) 111 | 112 | const instance = autocannon(config, (err, results) => { 113 | if (err) throw err 114 | }) 115 | 116 | const histogram = hdr.build() 117 | instance.on('response', (client, statusCode, resBytes, responseTime) => { 118 | histogram.recordValue(responseTime) 119 | }) 120 | 121 | autocannon.track(instance, { 122 | outputStream: this.config.writeStream || process.stdout, 123 | renderProgressBar: true, 124 | renderLatencyTable: true, 125 | renderResultsTable: true, 126 | }) 127 | 128 | // Wrap this in a Promise to force waiting for Autocannon run to finish 129 | return new Promise((resolve) => { 130 | instance.on('done', (results) => { 131 | // Write Autocannon results object to output file 132 | const outfile = path.join(this.reportPath, outputFile) 133 | fs.outputJSONSync(outfile, results) 134 | // Build and return Metrics object 135 | const metrics = makeBenchmarkMetrics({ 136 | name: metadata.queryName, 137 | histogram, 138 | response: { 139 | totalBytes: results.throughput.total, 140 | bytesPerSecond: results.throughput.average, 141 | }, 142 | time: { 143 | start: results.start, 144 | end: results.finish, 145 | }, 146 | requests: { 147 | count: results.requests.total, 148 | average: results.requests.average, 149 | }, 150 | }) 151 | resolve(metrics) 152 | }) 153 | }) 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /app/queries/src/executors/autocannon/types.ts: -------------------------------------------------------------------------------- 1 | export interface RunAutocannonMetadata { 2 | queryName: string 3 | outputFile: string 4 | } 5 | -------------------------------------------------------------------------------- /app/queries/src/executors/base/index.ts: -------------------------------------------------------------------------------- 1 | import fs from 'fs-extra' 2 | import path from 'path' 3 | import yaml from 'js-yaml' 4 | 5 | import { 6 | HDRHistogramParsedStats, 7 | BenchmarkMetrics, 8 | BenchmarkMetricParams, 9 | Benchmark, 10 | BenchmarkTool, 11 | CustomBenchmark, 12 | FixedRequestNumberBenchmark, 13 | GlobalConfig, 14 | MaxRequestsInDurationBenchmark, 15 | MultiStageBenchmark, 16 | RequestsPerSecondBenchmark, 17 | } from './types' 18 | 19 | export function parseHdrHistogramText(text: string): HDRHistogramParsedStats[] { 20 | let results: HDRHistogramParsedStats[] = [] 21 | const lines = text.split('\n') 22 | for (let line of lines) { 23 | let entries = line.trim().split(/\s+/) 24 | // Careful of truthiness when parsing zero here: 25 | let valid = entries.length == 4 && entries.every((x) => !(isNaN(Number(x)))) 26 | if (!valid) continue 27 | let [value, percentile, totalCount, ofOnePercentile] = entries 28 | results.push({ value, percentile, totalCount, ofOnePercentile }) 29 | } 30 | return results 31 | } 32 | 33 | export function makeBenchmarkMetrics( 34 | params: BenchmarkMetricParams 35 | ): BenchmarkMetrics { 36 | const { name, histogram, basicHistogram, time, requests, response, geoMean, p501stHalf, p501stQuarter, p501stEighth, geoMean1stHalf, geoMean1stQuarter, geoMean1stEighth } = params 37 | return { 38 | name, 39 | time, 40 | requests, 41 | response, 42 | histogram: { 43 | json: { 44 | ...histogram.toJSON(), 45 | mean: histogram.mean, 46 | geoMean, 47 | p501stHalf, 48 | p501stQuarter, 49 | p501stEighth, 50 | geoMean1stHalf, 51 | geoMean1stQuarter, 52 | geoMean1stEighth, 53 | min: histogram.min, 54 | stdDeviation: histogram.stdDeviation, 55 | }, 56 | parsedStats: histogram.parsedStats, 57 | }, 58 | basicHistogram, 59 | } 60 | } 61 | 62 | /** 63 | * ======================== 64 | * BASE CLASS 65 | * ======================== 66 | */ 67 | 68 | /** 69 | * An abstract class which specific benchmark executors must implement 70 | * Provides shared functionality, common configuration options, and abstract function definitions 71 | */ 72 | export abstract class BenchmarkExecutor { 73 | // Each benchmark executor must mark their tool, used to check whether they're enabled for a query in "runBenchmarks" 74 | public abstract tool: BenchmarkTool 75 | public config: GlobalConfig 76 | 77 | /** Path to the configuration file to load */ 78 | private configPath = path.join(__dirname, '../../config.yaml') 79 | /** Path to the reports directory */ 80 | public baseReportPath = path.join(__dirname, '../../../reports') 81 | 82 | public localBinaryFolder = path.join(__dirname, '../../../bin') 83 | 84 | constructor(config?: GlobalConfig, configFilePath?: string) { 85 | this.config = config || this.readConfigFile(configFilePath) 86 | } 87 | 88 | // Returns "${queryName}-{toolName}" for marking benchmark result metrics 89 | // protected _getBrandedBenchmarkQueryName(queryName: string) { 90 | // return `${queryName}-${this.tool}` 91 | // } 92 | 93 | protected _makeBenchmarkName(benchmark: Benchmark) { 94 | const baseName = `${benchmark.name}-${this.tool}` 95 | switch (benchmark.execution_strategy) { 96 | case 'CUSTOM': 97 | return `${baseName}-custom` 98 | case 'FIXED_REQUEST_NUMBER': 99 | return `${baseName}-${benchmark.requests}-fixed-requests` 100 | case 'REQUESTS_PER_SECOND': 101 | return `${baseName}-${benchmark.rps}rps` 102 | case 'MAX_REQUESTS_IN_DURATION': 103 | return `${baseName}-${benchmark.duration}-max-requests` 104 | case 'MULTI_STAGE': 105 | return `${baseName}-multistage` 106 | } 107 | } 108 | 109 | public readConfigFile(pathTo?: string): GlobalConfig { 110 | const configFile = fs.readFileSync(pathTo || this.configPath, 'utf-8') 111 | return yaml.load(configFile) 112 | } 113 | 114 | public async runBenchmark(benchmark: Benchmark) { 115 | switch (benchmark.execution_strategy) { 116 | case 'CUSTOM': 117 | return this.runCustomBench(benchmark) 118 | case 'REQUESTS_PER_SECOND': 119 | return this.runRequestsPerSecondBench(benchmark) 120 | case 'FIXED_REQUEST_NUMBER': 121 | return this.runFixedRequestNumberBench(benchmark) 122 | case 'MAX_REQUESTS_IN_DURATION': 123 | return this.runMaxRequestsInDurationBench(benchmark) 124 | // Catch and discard "NOT IMPLEMENTED" error, only available in k6 125 | case 'MULTI_STAGE': { 126 | try { 127 | return this.runMultiStageBench(benchmark) 128 | } catch (error) { 129 | break 130 | } 131 | } 132 | } 133 | } 134 | 135 | abstract runCustomBench(bench: CustomBenchmark): Promise 136 | abstract runMultiStageBench( 137 | bench: MultiStageBenchmark 138 | ): Promise | never 139 | abstract runRequestsPerSecondBench( 140 | bench: RequestsPerSecondBenchmark 141 | ): Promise 142 | abstract runFixedRequestNumberBench( 143 | bench: FixedRequestNumberBenchmark 144 | ): Promise 145 | abstract runMaxRequestsInDurationBench( 146 | bench: MaxRequestsInDurationBenchmark 147 | ): Promise 148 | } 149 | -------------------------------------------------------------------------------- /app/queries/src/executors/base/types.ts: -------------------------------------------------------------------------------- 1 | import type * as stream from 'stream' 2 | 3 | import type Histogram from 'hdr-histogram-js/src/Histogram' 4 | import type { HistogramSummary } from 'hdr-histogram-js/src/Histogram' 5 | import * as precise_hdr from '../../PreciseHdrHistogram' 6 | 7 | import type { K6Options } from '../k6/types' 8 | import type { Stage as K6Stage } from 'k6/options' 9 | import type { Options as AutocannonOptions } from 'autocannon' 10 | 11 | /** 12 | * ======================== 13 | * MAIN TYPES 14 | * ======================== 15 | */ 16 | 17 | /** 18 | * The type of the query benchmark test being run 19 | */ 20 | export type ExecutionStrategy = 21 | | 'REQUESTS_PER_SECOND' 22 | | 'FIXED_REQUEST_NUMBER' 23 | | 'MAX_REQUESTS_IN_DURATION' 24 | | 'MULTI_STAGE' 25 | | 'CUSTOM' 26 | 27 | /** 28 | * Shared configuration between all benchmark runs 29 | */ 30 | export interface GlobalConfig { 31 | url: string 32 | /** When true assume the target is a hasura instance and run some additional checks */ 33 | extended_hasura_checks?: boolean 34 | headers?: Record 35 | queries: Benchmark[] 36 | /** When true, will log all HTTP responses */ 37 | debug?: boolean 38 | /** 39 | * Optional writable stream, can be used to stream results from process to HTML responses 40 | * @example 41 | * app.get('/path', (req, res) => { 42 | * var child = spawn('ls', ['-al']) 43 | * child.stdout.pipe(res) 44 | * }) 45 | */ 46 | writeStream?: stream.Writable 47 | } 48 | 49 | export enum BenchmarkTool { 50 | AUTOCANNON = 'autocannon', 51 | K6 = 'k6', 52 | WRK2 = 'wrk2', 53 | } 54 | 55 | /** 56 | * Unused base class with shared attributes that specific types of benchmark configs extend 57 | */ 58 | interface _BenchmarkRunConfig { 59 | name: string 60 | tools: BenchmarkTool[] 61 | execution_strategy: ExecutionStrategy 62 | query: string 63 | variables?: Record 64 | connections?: number 65 | } 66 | 67 | /** 68 | * Config for a benchmark with requests per second for a duration 69 | */ 70 | export interface RequestsPerSecondBenchmark extends _BenchmarkRunConfig { 71 | execution_strategy: 'REQUESTS_PER_SECOND' 72 | rps: number 73 | duration: string 74 | } 75 | 76 | /** 77 | * Config for a benchmark that makes a fixed number of requests with no other constraints 78 | */ 79 | export interface FixedRequestNumberBenchmark extends _BenchmarkRunConfig { 80 | execution_strategy: 'FIXED_REQUEST_NUMBER' 81 | requests: number 82 | } 83 | 84 | /** 85 | * Config for a benchmark that makes the maximum number of requests in a duration 86 | */ 87 | export interface MaxRequestsInDurationBenchmark extends _BenchmarkRunConfig { 88 | execution_strategy: 'MAX_REQUESTS_IN_DURATION' 89 | duration: string 90 | } 91 | 92 | /** 93 | * Config for a benchmark with multiple stages of requests per seconds and durations 94 | */ 95 | export interface MultiStageBenchmark extends _BenchmarkRunConfig { 96 | execution_strategy: 'MULTI_STAGE' 97 | initial_rps: number 98 | stages: K6Stage[] 99 | } 100 | 101 | /** 102 | * Config for a benchmark with fully custom attributes 103 | */ 104 | export interface CustomBenchmark extends _BenchmarkRunConfig { 105 | execution_strategy: 'CUSTOM' 106 | options: { 107 | k6?: K6Options 108 | autocannon?: AutocannonOptions 109 | } 110 | } 111 | 112 | /** 113 | * Supertype that represents the union of all possible benchmark configs 114 | */ 115 | export type Benchmark = 116 | | RequestsPerSecondBenchmark 117 | | FixedRequestNumberBenchmark 118 | | MaxRequestsInDurationBenchmark 119 | | MultiStageBenchmark 120 | | CustomBenchmark 121 | 122 | /** 123 | * ======================== 124 | * UTILITY TYPES 125 | * ======================== 126 | */ 127 | 128 | // see 'parseHdrHistogramText()' 129 | export interface HDRHistogramParsedStats { 130 | value: string 131 | percentile: string 132 | totalCount: string 133 | ofOnePercentile: string 134 | } 135 | 136 | // add some extra statistics: 137 | export interface HistogramSummaryWithEtc extends HistogramSummary { 138 | mean: number 139 | geoMean?: number 140 | // The medians/geomeans for different sized prefixes of the samples, letting 141 | // us validate the results / get a sense if performance skewed over time 142 | p501stHalf?: number 143 | p501stQuarter?: number 144 | p501stEighth?: number 145 | geoMean1stHalf?: number 146 | geoMean1stQuarter?: number 147 | geoMean1stEighth?: number 148 | min: number 149 | stdDeviation: number 150 | } 151 | 152 | export interface BenchmarkMetrics { 153 | name: string 154 | time: { 155 | start: string | Date 156 | end: string | Date 157 | } 158 | requests: { 159 | count: number 160 | average: number 161 | } 162 | response: { 163 | totalBytes: number 164 | bytesPerSecond: number 165 | } 166 | histogram: { 167 | json: HistogramSummaryWithEtc 168 | parsedStats: HDRHistogramParsedStats[] 169 | } 170 | // A basic histogram with equal size buckets (the hdr histogram above predates this). 171 | // (In fact this is 2 histograms: one for the entire set of data, and another 172 | // count for just the first half of the data) 173 | basicHistogram?: BasicHistogram 174 | // These are available when 'extended_hasura_checks: true' in the config yaml: 175 | extended_hasura_checks?: { 176 | bytes_allocated_per_request: number 177 | // memory residency stats, both before and after the benchmark runs: 178 | // see: https://hackage.haskell.org/package/base-4.15.0.0/docs/GHC-Stats.html 179 | live_bytes_before: number 180 | live_bytes_after: number 181 | mem_in_use_bytes_before: number 182 | mem_in_use_bytes_after: number 183 | } 184 | } 185 | 186 | export interface BenchmarkMetricParams { 187 | name: string 188 | histogram: precise_hdr.PreciseHdrHistogram 189 | basicHistogram?: BasicHistogram 190 | time: { 191 | start: Date | string 192 | end: Date | string 193 | } 194 | requests: { 195 | count: number 196 | average: number 197 | } 198 | response: { 199 | totalBytes: number 200 | bytesPerSecond: number 201 | }, 202 | // geometric mean of service times 203 | geoMean?: number 204 | // The medians/geomeans for different sized prefixes of the samples, letting 205 | // us validate the results / get a sense if performance skewed over time 206 | p501stHalf?: number 207 | p501stQuarter?: number 208 | p501stEighth?: number 209 | geoMean1stHalf?: number 210 | geoMean1stQuarter?: number 211 | geoMean1stEighth?: number 212 | } 213 | 214 | // See histogram() 215 | // 216 | // There are 'count' values in the bucket greater than 'gte'. 'count1stHalf' 217 | // is the bucket count just looking at the first half of the data (this might 218 | // help us determine whether the results skew over the course of the benchmark 219 | // run) 220 | export interface HistBucket { 221 | gte: number 222 | count: number 223 | count1stHalf: number 224 | } 225 | export interface BasicHistogram { 226 | buckets: HistBucket[] 227 | outliersRemoved: number 228 | } 229 | -------------------------------------------------------------------------------- /app/queries/src/executors/k6/index.ts: -------------------------------------------------------------------------------- 1 | import * as fs from 'fs-extra' 2 | import * as path from 'path' 3 | import * as cp from 'child_process' 4 | import * as hdr from '../../PreciseHdrHistogram' 5 | import readline from 'readline' 6 | 7 | import { 8 | BenchmarkTool, 9 | Benchmark, 10 | CustomBenchmark, 11 | FixedRequestNumberBenchmark, 12 | MaxRequestsInDurationBenchmark, 13 | MultiStageBenchmark, 14 | RequestsPerSecondBenchmark, 15 | HistBucket, 16 | BasicHistogram, 17 | } from '../base/types' 18 | 19 | import { 20 | K6Options, 21 | RampingArrivalRateExecutor, 22 | ConstantArrivalRateExecutor, 23 | PerVUIterationsExecutor, 24 | ConstantVUExecutor, 25 | K6Metric, 26 | K6Point, 27 | K6Summary, 28 | } from './types' 29 | 30 | import { BenchmarkExecutor, makeBenchmarkMetrics } from '../base' 31 | 32 | import execa from 'execa' 33 | import { lookpath } from 'lookpath' 34 | 35 | interface RunK6Metadata { 36 | queryName: string 37 | outputFile: string 38 | } 39 | 40 | export class K6Executor extends BenchmarkExecutor { 41 | public tool = BenchmarkTool.K6 42 | 43 | private k6BinaryPath = path.join(__dirname, 'k6', 'k6') 44 | private reportPath = path.join(this.baseReportPath, 'k6') 45 | 46 | public runCustomBench(bench: CustomBenchmark) { 47 | // Need to set the url, headers, query, and variables ENV or it won't work 48 | if (bench.options.k6?.scenarios) { 49 | for (let scenario in bench.options.k6?.scenarios) { 50 | bench.options.k6.scenarios[scenario].env = this._makeScenarioEnv(bench) 51 | } 52 | } 53 | 54 | const queryName = this._makeBenchmarkName(bench) 55 | const metadata = { 56 | queryName, 57 | outputFile: `${queryName}.json`, 58 | } 59 | 60 | return this._runK6(metadata, bench.options.k6 as K6Options) 61 | } 62 | 63 | public runMultiStageBench(bench: MultiStageBenchmark) { 64 | const scenario: RampingArrivalRateExecutor = { 65 | executor: 'ramping-arrival-rate', 66 | startRate: bench.initial_rps, 67 | timeUnit: '1s', 68 | preAllocatedVUs: bench.connections || 10, 69 | stages: bench.stages, 70 | env: this._makeScenarioEnv(bench), 71 | } 72 | 73 | const queryName = this._makeBenchmarkName(bench) 74 | const metadata = { 75 | queryName, 76 | outputFile: `${queryName}.json`, 77 | } 78 | 79 | return this._runK6(metadata, { 80 | scenarios: { 81 | [bench.name]: scenario, 82 | }, 83 | }) 84 | } 85 | 86 | public runRequestsPerSecondBench(bench: RequestsPerSecondBenchmark) { 87 | const scenario: ConstantArrivalRateExecutor = { 88 | executor: 'constant-arrival-rate', 89 | rate: bench.rps, 90 | timeUnit: '1s', 91 | duration: bench.duration, 92 | preAllocatedVUs: bench.connections || 10, 93 | env: this._makeScenarioEnv(bench), 94 | } 95 | 96 | const queryName = this._makeBenchmarkName(bench) 97 | const metadata = { 98 | queryName, 99 | outputFile: `${queryName}.json`, 100 | } 101 | 102 | return this._runK6(metadata, { 103 | scenarios: { 104 | [bench.name]: scenario, 105 | }, 106 | }) 107 | } 108 | 109 | public runFixedRequestNumberBench(bench: FixedRequestNumberBenchmark) { 110 | const scenario: PerVUIterationsExecutor = { 111 | executor: 'per-vu-iterations', 112 | iterations: bench.requests / (bench.connections || 10), 113 | vus: bench.connections || 10, 114 | env: this._makeScenarioEnv(bench), 115 | } 116 | 117 | const queryName = this._makeBenchmarkName(bench) 118 | const metadata = { 119 | queryName, 120 | outputFile: `${queryName}.json`, 121 | } 122 | 123 | return this._runK6(metadata, { 124 | scenarios: { 125 | [bench.name]: scenario, 126 | }, 127 | }) 128 | } 129 | 130 | public runMaxRequestsInDurationBench(bench: MaxRequestsInDurationBenchmark) { 131 | const scenario: ConstantVUExecutor = { 132 | executor: 'constant-vus', 133 | duration: bench.duration, 134 | vus: bench.connections || 10, 135 | env: this._makeScenarioEnv(bench), 136 | } 137 | 138 | const queryName = this._makeBenchmarkName(bench) 139 | const metadata = { 140 | queryName, 141 | outputFile: `${queryName}.json`, 142 | } 143 | 144 | return this._runK6(metadata, { 145 | scenarios: { 146 | [bench.name]: scenario, 147 | }, 148 | }) 149 | } 150 | 151 | /** 152 | * Must return non-nested JSON for k6, hence the need to stringify headers and variables 153 | */ 154 | private _makeScenarioEnv(bench: Benchmark) { 155 | return { 156 | url: this.config.url, 157 | query: bench.query, 158 | headers: JSON.stringify(this.config.headers), 159 | variables: JSON.stringify(bench.variables), 160 | } 161 | } 162 | 163 | private async getBinaryPath() { 164 | const defaultPath = await lookpath('k6') 165 | if (defaultPath) return defaultPath 166 | const localK6Binary = path.join(this.localBinaryFolder, 'k6/k6') 167 | const localBinaryExists = await fs.pathExists(localK6Binary) 168 | if (localBinaryExists) return localK6Binary 169 | throw new Error( 170 | 'Could not find K6 binary either globally in $PATH or in local ./bin/k6 folder' 171 | ) 172 | } 173 | 174 | /** 175 | * Internal method for calling the actual benchmark run, dispatched from the more specific benchmark functions 176 | */ 177 | private async _runK6(metadata: RunK6Metadata, config: K6Options) { 178 | const { queryName, outputFile } = metadata 179 | 180 | // If "debug" true, log all HTTP responses 181 | if (this.config.debug) config.httpDebug = 'full' 182 | 183 | // Write the K6 configuration JSON to a temp file, to pass as CLI flag 184 | const tmpDir = path.join(__dirname, 'tmp') 185 | const tmpConfig = path.join(tmpDir, `${queryName}_config.json`) 186 | await fs.outputJSON(tmpConfig, config) 187 | 188 | // outPath is where the JSON report stats will go, scriptFile points K6 to the JS script for the load test 189 | const outPath = path.join(this.reportPath, outputFile) 190 | const scriptFile = path.join(this.localBinaryFolder, 'k6/loadScript.js') 191 | const rawStatsFilePath = path.join(tmpDir, 'k6_raw_stats.json') 192 | 193 | // Make sure the directory exists, or K6 will fail when writing 194 | await fs.ensureFile(outPath) 195 | 196 | // Invoke 'k6 run --config --summary-export ' 197 | const baseOpts: string[] = [] 198 | baseOpts.push('run', scriptFile) 199 | baseOpts.push('--config', tmpConfig) 200 | baseOpts.push('--out', 'json=' + rawStatsFilePath) 201 | baseOpts.push('--summary-export', outPath) 202 | 203 | const k6Binary = await this.getBinaryPath() 204 | 205 | const benchmarkStart = new Date() 206 | await execa(k6Binary, baseOpts, { stdio: 'inherit' }) 207 | const benchmarkEnd = new Date() 208 | 209 | // Create a line-reader to read each line of the JSONL format K6 logs 210 | // Note: we use the crlfDelay option to recognize all instances of CR LF 211 | // ('\r\n') in input.txt as a single line break 212 | const fileStream = fs.createReadStream(rawStatsFilePath) 213 | const rl = readline.createInterface({ 214 | input: fileStream, 215 | crlfDelay: Infinity, 216 | }) 217 | 218 | // We'll build an hdr histogram of HTTP Request durations 219 | const hdrHistogram = hdr.build() 220 | // ...and record raw durations for processing: 221 | var reqDurations: number[] = [] 222 | 223 | for await (const line of rl) { 224 | const stat: K6Metric | K6Point = JSON.parse(line) 225 | // filter for just service time of successful queries: 226 | if (stat.type != 'Point') continue 227 | if (stat.metric != 'http_req_duration') continue 228 | if (Number(stat.data.tags.status) < 200) continue 229 | hdrHistogram.recordValue(stat.data.value) 230 | 231 | reqDurations.push(stat.data.value) 232 | } 233 | 234 | // Remove the temp config file with the K6 run parameters, and logging stats 235 | await fs.remove(tmpConfig) 236 | await fs.remove(rawStatsFilePath) 237 | 238 | // Return the JSON output stats produced by K6 for bench 239 | const jsonStats: K6Summary = fs.readJSONSync(outPath) 240 | const metrics = makeBenchmarkMetrics({ 241 | name: metadata.queryName, 242 | histogram: hdrHistogram, 243 | // filter some outliers to get better granularity for bulk of samples: 244 | basicHistogram: histogram(100, reqDurations, hdrHistogram.toJSON().p99), 245 | time: { 246 | start: benchmarkStart.toISOString(), 247 | end: benchmarkEnd.toISOString(), 248 | }, 249 | requests: { 250 | count: jsonStats.metrics.http_reqs.count, 251 | average: jsonStats.metrics.http_reqs.rate, 252 | }, 253 | response: { 254 | totalBytes: jsonStats.metrics.data_received.count, 255 | bytesPerSecond: jsonStats.metrics.data_received.rate, 256 | }, 257 | geoMean: geoMean(reqDurations), 258 | p501stHalf: median(reqDurations.slice(0,reqDurations.length/2)), 259 | p501stQuarter: median(reqDurations.slice(0,reqDurations.length/4)), 260 | p501stEighth: median(reqDurations.slice(0,reqDurations.length/8)), 261 | geoMean1stHalf: geoMean(reqDurations.slice(0,reqDurations.length/2)), 262 | geoMean1stQuarter: geoMean(reqDurations.slice(0,reqDurations.length/4)), 263 | geoMean1stEighth: geoMean(reqDurations.slice(0,reqDurations.length/8)), 264 | }) 265 | 266 | return metrics 267 | } 268 | } 269 | 270 | // geometric mean, with exponent distributed over product so we don't overflow 271 | function geoMean(xs: number[]): number { 272 | return xs.map(x => Math.pow(x, 1/xs.length)).reduce((acc, x) => acc * x) 273 | } 274 | 275 | // Generate a double histogram of the input numbers: one containing all the 276 | // data, and the other just the first half of the data. Optionally filtering 277 | // out outliers >= maxValue 278 | // 279 | // NOTE: To save space and aid readability we’ll filter out any buckets with a 280 | // count of 0 that follow a bucket with a count of 0. This can still be graphed 281 | // fine without extra accommodations using a stepped line plot, as we plan 282 | function histogram(numBuckets: number, xs: number[], maxValue: number = Number.MAX_SAFE_INTEGER): BasicHistogram { 283 | if (numBuckets < 1 || xs.length < 2) { throw "We need at least one bucket and xs.length > 1" } 284 | 285 | // sort list, keeping track of original index so we can determine which 286 | // part of the data we're looking at 287 | var outliersRemoved = 0 288 | var xsSorted = xs.map((x, ix) => [x,ix]) 289 | .filter(([x,_]) => { 290 | let ok = x < maxValue 291 | if (!ok) outliersRemoved++ 292 | return ok 293 | }) 294 | xsSorted.sort((a,b) => a[0] - b[0]) 295 | // index of last element in the first half of data: 296 | const ix1stHalfLast = xs.length/2 - 1 297 | 298 | const bucketWidth = (xsSorted[xsSorted.length - 1][0] - xsSorted[0][0]) / numBuckets 299 | 300 | var buckets: HistBucket[] = [] 301 | for (let gte = xsSorted[0][0] ; true ; gte+=bucketWidth) { 302 | // Last bucket; add remaining and stop 303 | if (buckets.length === (numBuckets-1)) { 304 | var count1stHalf = 0 305 | xsSorted.map( ([_,ixOrig]) => { if (ixOrig <= ix1stHalfLast) count1stHalf++ }) 306 | buckets.push({gte, count: xsSorted.length, count1stHalf}) 307 | break 308 | } 309 | var count = 0 310 | var count1stHalf = 0 311 | var ixNext 312 | // this should always consume as least one value: 313 | xsSorted.some(([x, ixOrig], ix) => { 314 | if (x < (gte+bucketWidth)) { 315 | count++ 316 | if (ixOrig <= ix1stHalfLast) count1stHalf++ 317 | return false // i.e. keep looping 318 | } else { 319 | ixNext = ix 320 | return true 321 | } 322 | }) 323 | if (ixNext === undefined) {throw "Bugs in histogram!"} 324 | xsSorted = xsSorted.slice(ixNext) 325 | buckets.push({gte, count, count1stHalf}) 326 | } 327 | // having at most one 0 bucket in a row, i.e. `{gte: n, count: 0}` means 328 | // "This and all higher buckets are empty" 329 | var bucketsSparse = [] 330 | var inAZeroSpan = false 331 | buckets.forEach( b => { 332 | if (inAZeroSpan && b.count === 0) { 333 | // drop this bucket 334 | } else if (!inAZeroSpan && b.count === 0) { 335 | // include this zero buckets but not subsequent zero buckets 336 | bucketsSparse.push(b) 337 | inAZeroSpan = true 338 | } else { 339 | inAZeroSpan = false 340 | bucketsSparse.push(b) 341 | } 342 | }) 343 | 344 | return {buckets: bucketsSparse, outliersRemoved} 345 | } 346 | 347 | // Copy paste: https://stackoverflow.com/a/53660837/176841 348 | function median(numbers) { 349 | if (numbers.length === 0) return 0 // I guess 350 | 351 | const sorted = Float64Array.from(numbers).sort(); 352 | const middle = Math.floor(sorted.length / 2); 353 | 354 | if (sorted.length % 2 === 0) { 355 | return (sorted[middle - 1] + sorted[middle]) / 2; 356 | } 357 | 358 | return sorted[middle]; 359 | } 360 | -------------------------------------------------------------------------------- /app/queries/src/executors/reports/k6/SearchAlbumsWithArtist_2000rps.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hasura/graphql-bench/2163a70a5324766521b8d6ceb7a73b8f0d12688d/app/queries/src/executors/reports/k6/SearchAlbumsWithArtist_2000rps.json -------------------------------------------------------------------------------- /app/queries/src/executors/wrk2/index.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * ======================== 3 | * WRK2 4 | * ======================== 5 | */ 6 | 7 | import { 8 | RunWrk2Metadata, 9 | Wrk2BinaryArgs, 10 | WrkStatsToBenchmarkMetricParams, 11 | } from './types' 12 | 13 | import { 14 | BenchmarkTool, 15 | CustomBenchmark, 16 | FixedRequestNumberBenchmark, 17 | MaxRequestsInDurationBenchmark, 18 | MultiStageBenchmark, 19 | RequestsPerSecondBenchmark, 20 | BenchmarkMetrics, 21 | } from '../base/types' 22 | 23 | import { parseHdrHistogramText, BenchmarkExecutor } from '../base' 24 | 25 | import path from 'path' 26 | import fs from 'fs-extra' 27 | import execa from 'execa' 28 | import { lookpath } from 'lookpath' 29 | 30 | export class Wrk2Executor extends BenchmarkExecutor { 31 | public tool = BenchmarkTool.WRK2 32 | private reportPath = path.join(this.baseReportPath, 'wrk2') 33 | 34 | private _wrk2OutputToBenchmarkMetric( 35 | params: WrkStatsToBenchmarkMetricParams 36 | ): BenchmarkMetrics { 37 | const { name, stats, start, end, hdrHistogramStdout } = params 38 | const findPercentile = (stats, number) => 39 | stats.latency_distribution.find((it) => it.percentile == number) 40 | return { 41 | name, 42 | requests: { 43 | average: stats.requests_per_second, 44 | count: stats.requests, 45 | }, 46 | response: { 47 | totalBytes: stats.bytes, 48 | bytesPerSecond: stats.bytes_transfer_per_second, 49 | }, 50 | time: { 51 | start: start.toISOString(), 52 | end: end.toISOString(), 53 | }, 54 | histogram: { 55 | parsedStats: parseHdrHistogramText(hdrHistogramStdout), 56 | json: { 57 | totalCount: stats.requests, 58 | max: stats.latency_aggregate.max, 59 | mean: stats.latency_aggregate.mean, 60 | min: stats.latency_aggregate.min, 61 | stdDeviation: stats.latency_aggregate.stdev, 62 | p50: findPercentile(stats, 50).latency_in_milliseconds, 63 | p75: findPercentile(stats, 75).latency_in_milliseconds, 64 | p90: findPercentile(stats, 90).latency_in_milliseconds, 65 | p97_5: findPercentile(stats, 97.5).latency_in_milliseconds, 66 | p99: findPercentile(stats, 99).latency_in_milliseconds, 67 | p99_9: findPercentile(stats, 99.9).latency_in_milliseconds, 68 | p99_99: findPercentile(stats, 99.9).latency_in_milliseconds, 69 | p99_999: findPercentile(stats, 99.999).latency_in_milliseconds, 70 | }, 71 | }, 72 | } 73 | } 74 | 75 | /** 76 | * Returns the HDR Histogram text produced by wrk from it's stdout 77 | */ 78 | private _getHdrHistogramFromWrkStdout(stdout: string) { 79 | const startStr = ' Value Percentile TotalCount 1/(1-Percentile)' 80 | const endStr = '----------------------------------------------------------' 81 | const start = stdout.indexOf(startStr) 82 | const end = stdout.indexOf(endStr) 83 | const hdrHistogram = stdout.substring(start, end) 84 | return hdrHistogram 85 | } 86 | 87 | public runCustomBench(bench: CustomBenchmark): never { 88 | throw new Error('Work in progress') 89 | } 90 | 91 | public runFixedRequestNumberBench(bench: FixedRequestNumberBenchmark): never { 92 | throw new Error('Fixed request benchmark not possible with wrk2') 93 | } 94 | 95 | public runMultiStageBench(bench: MultiStageBenchmark): never { 96 | throw new Error('Multi stage benchmark not possible with wrk2') 97 | } 98 | 99 | public async runMaxRequestsInDurationBench( 100 | bench: MaxRequestsInDurationBenchmark 101 | ) { 102 | const queryName = this._makeBenchmarkName(bench) 103 | const metadata = { 104 | queryName, 105 | outputFile: `${queryName}.json`, 106 | } 107 | 108 | return this._runWrk2(metadata, { 109 | url: this.config.url, 110 | options: { 111 | script: './graphql-bench.lua', 112 | latency: true, 113 | duration: bench.duration, 114 | // Ludicrous rate that should never be possible to try to emulate max rate, since wrk2 doesn't support this natively 115 | rate: 1000000, 116 | }, 117 | config: { 118 | query: bench.query, 119 | variables: bench.variables, 120 | headers: this.config.headers, 121 | }, 122 | }) 123 | } 124 | 125 | public async runRequestsPerSecondBench(bench: RequestsPerSecondBenchmark) { 126 | const queryName = this._makeBenchmarkName(bench) 127 | console.log('Wrk2, runRequestsPerSecondBench:') 128 | console.log('Bench:', bench) 129 | console.log('queryName:', queryName) 130 | const metadata = { 131 | queryName, 132 | outputFile: `${queryName}.json`, 133 | } 134 | return this._runWrk2(metadata, { 135 | url: this.config.url, 136 | options: { 137 | script: path.join(this.localBinaryFolder, '/wrk/graphql-bench.lua'), 138 | latency: true, 139 | duration: bench.duration, 140 | rate: bench.rps, 141 | }, 142 | config: { 143 | query: bench.query, 144 | variables: bench.variables, 145 | headers: this.config.headers, 146 | }, 147 | }) 148 | } 149 | 150 | /** 151 | * Makes the `spawn` CLI input argument array for "wrk (options) (url) -- (args)" 152 | */ 153 | private _makeWrk2CmdArgs(params: Wrk2BinaryArgs) { 154 | let args = [] 155 | for (let [key, val] of Object.entries(params.options)) { 156 | // If it's a boolean value, it's a flag and we should just pass the flag itself 157 | if (typeof val == 'boolean' && val == true) args.push('--' + key) 158 | // Else pass the flag and the value 159 | else args.push('--' + key, val) 160 | } 161 | args.push(params.url, '--', JSON.stringify(params.config)) 162 | return args 163 | } 164 | 165 | private async getBinaryPath() { 166 | const defaultPath = await lookpath('wrk') 167 | if (defaultPath) return defaultPath 168 | const localWrkBinary = path.join(this.localBinaryFolder, 'wrk/wrk') 169 | const localBinaryExists = await fs.pathExists(localWrkBinary) 170 | if (localBinaryExists) return localWrkBinary 171 | throw new Error( 172 | 'Could not find wrk binary either globally in $PATH or in local ./bin/wrk folder' 173 | ) 174 | } 175 | 176 | private async _runWrk2(metadata: RunWrk2Metadata, config: Wrk2BinaryArgs) { 177 | const wrkPath = await this.getBinaryPath() 178 | const start = new Date() 179 | const wrk = execa(wrkPath, this._makeWrk2CmdArgs(config), { 180 | env: { 181 | // This is a bit hairy. We need the "graphql-bench.lua" script to be able to call require("json"). 182 | // By default, Lua files can "require" any other file in the same directory the program was run from. The "module context" is "./". 183 | // The way that Lua modules work is that it tries to substitute the name of the require()'d module with any "?" character in each pattern in LUA_PATH. 184 | // (Patterns are separated by semicolons) 185 | // So what we need to do is call wrk and set up LUA_PATH with a pattern that when "?" is filled in with "json" it points to the "json.lua" file. 186 | // To do this, we need to point it to the absolute path of: 187 | // ./app/queries/bin/wrk/json.lua 188 | // So, to accomplish this we set LUA_PATH to: 189 | // ./app/queries/bin/wrk/?.lua 190 | LUA_PATH: path.join(this.localBinaryFolder, 'wrk', '?.lua;;'), 191 | }, 192 | }) 193 | wrk.stdout.pipe(process.stdout) 194 | const output = await wrk 195 | const end = new Date() 196 | 197 | const stats = JSON.parse(output.stderr) 198 | // Also emits these same stats to stderr, so could make script not write stat file and just read from there 199 | // const stats: Wrk2StatsOutputJson = await fs.readJSON('/tmp/wrk2-stats.json') 200 | if (!stats) throw new Error('Failed reading stats output from wrk stderr') 201 | 202 | // Write wrk2 results object to report file in folder 203 | const outfile = path.join(this.reportPath, metadata.outputFile) 204 | fs.outputJSONSync(outfile, stats, { spaces: 2 }) 205 | 206 | const hdrHistogramStdout = this._getHdrHistogramFromWrkStdout(output.stdout) 207 | const metrics = this._wrk2OutputToBenchmarkMetric({ 208 | name: metadata.queryName, 209 | start, 210 | end, 211 | stats, 212 | hdrHistogramStdout, 213 | }) 214 | 215 | return metrics 216 | } 217 | } 218 | -------------------------------------------------------------------------------- /app/queries/src/executors/wrk2/types.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Type for the invocation of wrk2 CLI: 3 | * "Usage: wrk " 4 | */ 5 | export interface Wrk2BinaryArgs { 6 | url: string 7 | options: Wrk2Options 8 | config: { 9 | query: string 10 | variables?: Record 11 | headers?: Record 12 | } 13 | } 14 | 15 | /** 16 | * Numeric arguments may include a SI unit (1k, 1M, 1G) 17 | * Time arguments may include a time unit (2s, 2m, 2h) 18 | */ 19 | export interface Wrk2Options { 20 | /** 21 | * -c, --connections Connections to keep open 22 | * @default 10 23 | */ 24 | connections?: string | number 25 | /** 26 | * -d, --duration Duration of test 27 | * @default '10s' 28 | */ 29 | duration?: string | number 30 | /** 31 | * -t, --threads Number of threads to use 32 | * @default 2 33 | */ 34 | threads?: string | number 35 | /** 36 | * Path to Lua script on disk 37 | * -s, --script Load Lua script file 38 | */ 39 | script?: string 40 | /** 41 | * -H, --header Add header to request 42 | */ 43 | header?: string 44 | /** 45 | * -L --latency Print latency statistics 46 | */ 47 | latency?: boolean 48 | /** 49 | * -U --u_latency Print uncorrected latency statistics 50 | */ 51 | u_latency?: boolean 52 | /** 53 | * --timeout Socket/request timeout 54 | */ 55 | timeout?: string | number 56 | /** 57 | * -B, --batch_latency Measure latency of whole batches of pipelined ops (as opposed to each op) 58 | */ 59 | batch_latency?: boolean 60 | /** 61 | * -v, --version Print version details 62 | */ 63 | version?: boolean 64 | /** 65 | * -R, --rate work rate (throughput) in requests/sec (total) 66 | */ 67 | rate: string | number 68 | } 69 | 70 | /** 71 | * Output type 72 | */ 73 | export interface Wrk2StatsOutputJson { 74 | duration_in_milliseconds: number 75 | requests: number 76 | bytes_transfer_per_second: number 77 | latency_aggregate: LatencyAggregate 78 | latency_distribution: LatencyDistribution[] 79 | bytes: number 80 | requests_per_second: number 81 | } 82 | 83 | export interface LatencyAggregate { 84 | min: number 85 | max: number 86 | mean: number 87 | stdev: number 88 | } 89 | 90 | export interface LatencyDistribution { 91 | percentile: 50 | 75 | 90 | 95 | 97.5 | 99 | 99.9 | 99.99 | 99.999 | 100 92 | latency_in_milliseconds: number 93 | } 94 | 95 | export interface WrkStatsToBenchmarkMetricParams { 96 | name: string 97 | stats: Wrk2StatsOutputJson 98 | start: Date 99 | end: Date 100 | hdrHistogramStdout: string 101 | } 102 | 103 | export interface RunWrk2Metadata { 104 | queryName: string 105 | outputFile?: string 106 | } 107 | -------------------------------------------------------------------------------- /app/queries/src/main.ts: -------------------------------------------------------------------------------- 1 | import { AutocannonExecutor } from './executors/autocannon/index' 2 | import { K6Executor } from './executors/k6/index' 3 | import { Wrk2Executor } from './executors/wrk2/index' 4 | import fetch from 'node-fetch' 5 | import { RequestInfo } from 'node-fetch' 6 | 7 | // Helper http client: 8 | async function http( 9 | request: RequestInfo 10 | ): Promise { 11 | const response = await fetch(request); 12 | const body = await response.json(); 13 | return body; 14 | } 15 | 16 | import { 17 | GlobalConfig, 18 | BenchmarkTool, 19 | BenchmarkMetrics, 20 | } from './executors/base/types' 21 | 22 | export class BenchmarkRunner { 23 | constructor(public config: GlobalConfig) {} 24 | 25 | public async runBenchmarks(only_query?: string) { 26 | let results: BenchmarkMetrics[] = [] 27 | 28 | for (let query of this.config.queries) { 29 | // Maybe run just a single requested benchmark from the config: 30 | if (only_query && query.name != only_query) continue 31 | 32 | for (let tool of query.tools) { 33 | // Get RTS stats before benchmarks: 34 | let bare_url = this.config.url.match("http.*//[^\/]*")[0] 35 | let allocated_bytes_before 36 | let live_bytes_before 37 | let mem_in_use_bytes_before 38 | if (this.config.extended_hasura_checks) { 39 | const stats = await http(bare_url+'/dev/rts_stats') 40 | allocated_bytes_before = stats.allocated_bytes 41 | live_bytes_before = stats.gc.gcdetails_live_bytes 42 | mem_in_use_bytes_before = stats.gc.gcdetails_mem_in_use_bytes 43 | } 44 | 45 | switch (tool) { 46 | case BenchmarkTool.AUTOCANNON: { 47 | const executor = new AutocannonExecutor(this.config) 48 | var metrics = await executor.runBenchmark(query) 49 | break 50 | } 51 | case BenchmarkTool.K6: { 52 | const executor = new K6Executor(this.config) 53 | var metrics = await executor.runBenchmark(query) 54 | break 55 | } 56 | case BenchmarkTool.WRK2: { 57 | const executor = new Wrk2Executor(this.config) 58 | var metrics = await executor.runBenchmark(query) 59 | break 60 | } 61 | } 62 | 63 | // Get RTS stats after: 64 | if (this.config.extended_hasura_checks) { 65 | const stats = await http(bare_url+'/dev/rts_stats') 66 | let allocated_bytes_after = stats.allocated_bytes 67 | let live_bytes_after = stats.gc.gcdetails_live_bytes 68 | let mem_in_use_bytes_after = stats.gc.gcdetails_mem_in_use_bytes 69 | 70 | metrics.extended_hasura_checks = { 71 | 'bytes_allocated_per_request': 72 | (allocated_bytes_after - allocated_bytes_before) / metrics.requests.count, 73 | live_bytes_before, 74 | live_bytes_after, 75 | mem_in_use_bytes_before, 76 | mem_in_use_bytes_after, 77 | } 78 | } 79 | 80 | results.push(metrics) 81 | } 82 | } 83 | 84 | return results 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /app/queries/src/tests.ts: -------------------------------------------------------------------------------- 1 | import { BenchmarkRunner } from './main' 2 | 3 | import { 4 | GlobalConfig, 5 | BenchmarkTool, 6 | MaxRequestsInDurationBenchmark, 7 | FixedRequestNumberBenchmark, 8 | RequestsPerSecondBenchmark, 9 | MultiStageBenchmark, 10 | CustomBenchmark, 11 | } from './executors/base/types' 12 | 13 | /** 14 | * Declare some queries 15 | */ 16 | 17 | const queries = { 18 | searchAlbumsWithArtist: ` 19 | query SearchAlbumsWithArtist { 20 | albums(where: {title: {_like: "%Rock%"}}) { 21 | id 22 | title 23 | artist { 24 | name 25 | id 26 | } 27 | } 28 | }`, 29 | albumsArtistTracksGenreAll: ` 30 | query AlbumsArtistTracksGenreAll { 31 | albums { 32 | id 33 | title 34 | artist { 35 | id 36 | name 37 | } 38 | tracks { 39 | id 40 | name 41 | genre { 42 | name 43 | } 44 | } 45 | } 46 | }`, 47 | } 48 | 49 | const rpsBench: RequestsPerSecondBenchmark = { 50 | tools: [BenchmarkTool.AUTOCANNON, BenchmarkTool.K6, BenchmarkTool.WRK2], 51 | name: 'AlbumsArtistTrackGenreAll', 52 | execution_strategy: 'REQUESTS_PER_SECOND', 53 | duration: '3s', 54 | rps: 500, 55 | query: queries.albumsArtistTracksGenreAll, 56 | } 57 | 58 | // wrk2 can't handle a fixed request number benchmark 59 | const fixedReqBench: FixedRequestNumberBenchmark = { 60 | tools: [BenchmarkTool.AUTOCANNON, BenchmarkTool.K6], 61 | name: 'AlbumsArtistTrackGenreAll', 62 | execution_strategy: 'FIXED_REQUEST_NUMBER', 63 | requests: 1000, 64 | query: queries.albumsArtistTracksGenreAll, 65 | } 66 | 67 | const maxReqInDurationBench: MaxRequestsInDurationBenchmark = { 68 | tools: [BenchmarkTool.AUTOCANNON, BenchmarkTool.K6, BenchmarkTool.WRK2], 69 | name: 'AlbumsArtistTrackGenreAll', 70 | execution_strategy: 'MAX_REQUESTS_IN_DURATION', 71 | duration: '10s', 72 | query: queries.albumsArtistTracksGenreAll, 73 | } 74 | 75 | const multiStageBench: MultiStageBenchmark = { 76 | tools: [BenchmarkTool.K6], 77 | name: 'SearchAlbumsWithArtist', 78 | execution_strategy: 'MULTI_STAGE', 79 | query: queries.searchAlbumsWithArtist, 80 | initial_rps: 0, 81 | stages: [ 82 | { 83 | duration: '5s', 84 | target: 100, 85 | }, 86 | { 87 | duration: '5s', 88 | target: 1000, 89 | }, 90 | { 91 | duration: '3s', 92 | target: 300, 93 | }, 94 | { 95 | duration: '5s', 96 | target: 0, 97 | }, 98 | ], 99 | } 100 | 101 | /** 102 | * Set up the global benchmark config 103 | */ 104 | 105 | const tests: GlobalConfig = { 106 | url: 'http://localhost:8085/v1/graphql', 107 | headers: { 'X-Hasura-Admin-Secret': 'my-secret' }, 108 | queries: [rpsBench, fixedReqBench, maxReqInDurationBench, multiStageBench], 109 | } 110 | 111 | async function main() { 112 | const runner = new BenchmarkRunner(tests) 113 | const results = await runner.runBenchmarks() 114 | console.log('Test results:', results) 115 | } 116 | 117 | main().catch((err) => { 118 | console.log('Error running tests') 119 | }) 120 | -------------------------------------------------------------------------------- /app/queries/src/utils.ts: -------------------------------------------------------------------------------- 1 | import * as fs from 'fs' 2 | import readline from 'readline' 3 | 4 | import * as hdr from './PreciseHdrHistogram' 5 | 6 | async function* parseNDJSON(filepath: string) { 7 | const filestream = fs.createReadStream(filepath) 8 | const lines = readline.createInterface({ 9 | input: filestream, 10 | crlfDelay: Infinity, 11 | }) 12 | 13 | for await (const line of lines) { 14 | yield JSON.parse(line) 15 | } 16 | } 17 | 18 | interface ParsedHDRHistogramSummary { 19 | buckets: number 20 | count: number 21 | max: number 22 | mean: number 23 | stddev: number 24 | sub_buckets: number 25 | } 26 | 27 | interface ParsedHDRHistogramValue { 28 | value: number 29 | percentile: number 30 | total_count: number 31 | of_one_percentile: number 32 | } 33 | 34 | interface ParsedHDRHistogram { 35 | summary: ParsedHDRHistogramSummary 36 | values: ParsedHDRHistogramValue[] 37 | } 38 | 39 | type Primitive = 40 | | StringConstructor 41 | | NumberConstructor 42 | | DateConstructor 43 | | BooleanConstructor 44 | 45 | function convertPropertiesTo(type: Primitive, obj: any) { 46 | for (let k in obj) obj[k] = type(obj[k]) 47 | return obj 48 | } 49 | 50 | function parseHdrHistogram(text: string): ParsedHDRHistogram { 51 | let valuesRegex = new RegExp( 52 | /(?\d+\.?\d*)[ ]+(?\d+\.?\d*)[ ]+(?\d+\.?\d*)([ ]+(?\d+\.?\d*))?/g 53 | ) 54 | 55 | // prettier-ignore 56 | let summaryRegex = new RegExp( 57 | /#\[Mean = (?\d+\.?\d*), StdDeviation = (?\d+\.?\d*)]/.source + "\n" + 58 | /#\[Max = (?\d+\.?\d*), Total count = (?\d+\.?\d*)]/.source + "\n" + 59 | /#\[Buckets = (?\d+\.?\d*), SubBuckets = (?\d+\.?\d*)]/.source, 60 | 'g' 61 | ) 62 | 63 | // prettier-ignore 64 | const values: ParsedHDRHistogramValue[] = [...text.matchAll(valuesRegex)] 65 | .flatMap((it) => convertPropertiesTo(Number, it.groups as any)) 66 | 67 | const summary: ParsedHDRHistogramSummary = [...text.matchAll(summaryRegex)] 68 | .flatMap((it) => convertPropertiesTo(Number, it.groups as any)) 69 | .pop() 70 | 71 | return { summary, values } 72 | } 73 | 74 | let testString = 75 | // prettier-ignore 76 | ` Value Percentile TotalCount 1/(1-Percentile) 77 | 7.000 0.000000000000 9 1.00 78 | 10.000 0.100000000000 152 1.11 79 | 12.000 0.200000000000 408 1.25 80 | 12.000 0.300000000000 408 1.43 81 | 13.000 0.400000000000 538 1.67 82 | 14.000 0.500000000000 647 2.00 83 | 15.000 0.550000000000 736 2.22 84 | 16.000 0.600000000000 823 2.50 85 | 17.000 0.650000000000 879 2.86 86 | 18.000 0.700000000000 930 3.33 87 | 19.000 0.750000000000 982 4.00 88 | 20.000 0.775000000000 1029 4.44 89 | 20.000 0.800000000000 1029 5.00 90 | 21.000 0.825000000000 1059 5.71 91 | 22.000 0.850000000000 1083 6.67 92 | 24.000 0.875000000000 1144 8.00 93 | 24.000 0.887500000000 1144 8.89 94 | 25.000 0.900000000000 1165 10.00 95 | 25.000 0.912500000000 1165 11.43 96 | 26.000 0.925000000000 1185 13.33 97 | 27.000 0.937500000000 1200 16.00 98 | 28.000 0.943750000000 1212 17.78 99 | 28.000 0.950000000000 1212 20.00 100 | 29.000 0.956250000000 1219 22.86 101 | 31.000 0.962500000000 1229 26.67 102 | 32.000 0.968750000000 1236 32.00 103 | 33.000 0.971875000000 1247 35.56 104 | 33.000 0.975000000000 1247 40.00 105 | 33.000 0.978125000000 1247 45.71 106 | 35.000 0.981250000000 1253 53.33 107 | 36.000 0.984375000000 1255 64.00 108 | 37.000 0.985937500000 1258 71.11 109 | 37.000 0.987500000000 1258 80.00 110 | 38.000 0.989062500000 1262 91.43 111 | 38.000 0.990625000000 1262 106.67 112 | 39.000 0.992187500000 1266 128.00 113 | 39.000 0.992968750000 1266 142.22 114 | 39.000 0.993750000000 1266 160.00 115 | 41.000 0.994531250000 1268 182.86 116 | 41.000 0.995312500000 1268 213.33 117 | 42.000 0.996093750000 1271 256.00 118 | 42.000 0.996484375000 1271 284.44 119 | 42.000 0.996875000000 1271 320.00 120 | 42.000 0.997265625000 1271 365.71 121 | 42.000 0.997656250000 1271 426.67 122 | 42.000 0.998046875000 1271 512.00 123 | 42.000 0.998242187500 1271 568.89 124 | 48.000 0.998437500000 1272 640.00 125 | 48.000 0.998632812500 1272 731.43 126 | 48.000 0.998828125000 1272 853.33 127 | 48.000 0.999023437500 1272 1024.00 128 | 48.000 0.999121093750 1272 1137.78 129 | 54.000 0.999218750000 1273 1280.00 130 | 54.000 1.000000000000 1273 131 | #[Mean = 16.174, StdDeviation = 6.268] 132 | #[Max = 54.614, Total count = 1273] 133 | #[Buckets = 1, SubBuckets = 2048] 134 | ` 135 | 136 | function calculateHistogramIntervalCounts(values: ParsedHDRHistogramValue[]) { 137 | type HistogramPoint = { amount: number; value: number } 138 | let res: HistogramPoint[] = [] 139 | 140 | let lastCount = 0 141 | for (let entry of values) { 142 | let amount = entry.total_count - lastCount 143 | let value = entry.value 144 | res.push({ amount, value }) 145 | lastCount = entry.total_count 146 | } 147 | 148 | return res 149 | } 150 | 151 | function reconstructHdrHistogramFromParsed( 152 | parsedHistogram: ParsedHDRHistogram 153 | ) { 154 | const histogram = hdr.build() 155 | const intervals = calculateHistogramIntervalCounts(parsedHistogram.values) 156 | for (let entry of intervals) 157 | histogram.recordValueWithCount(entry.value, entry.amount) 158 | return histogram 159 | } 160 | 161 | const reconstructHdrHistogramFromText = (text: string) => 162 | reconstructHdrHistogramFromParsed(parseHdrHistogram(text)) 163 | -------------------------------------------------------------------------------- /app/queries/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | //"include": ["src"], 3 | "compilerOptions": { 4 | /* Basic Options */ 5 | // "incremental": true, /* Enable incremental compilation */ 6 | "target": "es5" /* Specify ECMAScript target version: 'ES3' (default), 'ES5', 'ES2015', 'ES2016', 'ES2017', 'ES2018', 'ES2019', 'ES2020', or 'ESNEXT'. */, 7 | "module": "commonjs" /* Specify module code generation: 'none', 'commonjs', 'amd', 'system', 'umd', 'es2015', 'es2020', or 'ESNext'. */, 8 | "lib": [ 9 | "ES2019", 10 | "ES2020.String" 11 | ] /* Specify library files to be included in the compilation. */, 12 | // "allowJs": true, /* Allow javascript files to be compiled. */ 13 | // "checkJs": true, /* Report errors in .js files. */ 14 | // "jsx": "preserve", /* Specify JSX code generation: 'preserve', 'react-native', or 'react'. */ 15 | // "declaration": true, /* Generates corresponding '.d.ts' file. */ 16 | // "declarationMap": true, /* Generates a sourcemap for each corresponding '.d.ts' file. */ 17 | // "sourceMap": true, /* Generates corresponding '.map' file. */ 18 | // "outFile": "./", /* Concatenate and emit output to single file. */ 19 | // "outDir": "./", /* Redirect output structure to the directory. */ 20 | // "rootDir": "./", /* Specify the root directory of input files. Use to control the output directory structure with --outDir. */ 21 | // "composite": true, /* Enable project compilation */ 22 | // "tsBuildInfoFile": "./", /* Specify file to store incremental compilation information */ 23 | // "removeComments": true, /* Do not emit comments to output. */ 24 | // "noEmit": true, /* Do not emit outputs. */ 25 | // "importHelpers": true, /* Import emit helpers from 'tslib'. */ 26 | "downlevelIteration": true /* Provide full support for iterables in 'for-of', spread, and destructuring when targeting 'ES5' or 'ES3'. */, 27 | // "isolatedModules": true, /* Transpile each file as a separate module (similar to 'ts.transpileModule'). */ 28 | 29 | /* Strict Type-Checking Options */ 30 | "strict": false /* Enable all strict type-checking options. */, 31 | // "noImplicitAny": true, /* Raise error on expressions and declarations with an implied 'any' type. */ 32 | // "strictNullChecks": true, /* Enable strict null checks. */ 33 | // "strictFunctionTypes": true, /* Enable strict checking of function types. */ 34 | // "strictBindCallApply": true, /* Enable strict 'bind', 'call', and 'apply' methods on functions. */ 35 | // "strictPropertyInitialization": true, /* Enable strict checking of property initialization in classes. */ 36 | // "noImplicitThis": true, /* Raise error on 'this' expressions with an implied 'any' type. */ 37 | // "alwaysStrict": true, /* Parse in strict mode and emit "use strict" for each source file. */ 38 | 39 | /* Additional Checks */ 40 | // "noUnusedLocals": true, /* Report errors on unused locals. */ 41 | // "noUnusedParameters": true, /* Report errors on unused parameters. */ 42 | // "noImplicitReturns": true, /* Report error when not all code paths in function return a value. */ 43 | // "noFallthroughCasesInSwitch": true, /* Report errors for fallthrough cases in switch statement. */ 44 | 45 | /* Module Resolution Options */ 46 | "moduleResolution": "node" /* Specify module resolution strategy: 'node' (Node.js) or 'classic' (TypeScript pre-1.6). */, 47 | // "baseUrl": "./", /* Base directory to resolve non-absolute module names. */ 48 | // "paths": {}, /* A series of entries which re-map imports to lookup locations relative to the 'baseUrl'. */ 49 | // "rootDirs": [], /* List of root folders whose combined content represents the structure of the project at runtime. */ 50 | // "typeRoots": [], /* List of folders to include type definitions from. */ 51 | // "types": [], /* Type declaration files to be included in compilation. */ 52 | // "allowSyntheticDefaultImports": true, /* Allow default imports from modules with no default export. This does not affect code emit, just typechecking. */ 53 | "esModuleInterop": true /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */, 54 | // "preserveSymlinks": true, /* Do not resolve the real path of symlinks. */ 55 | // "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */ 56 | 57 | /* Source Map Options */ 58 | // "sourceRoot": "", /* Specify the location where debugger should locate TypeScript files instead of source locations. */ 59 | // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */ 60 | // "inlineSourceMap": true, /* Emit a single file with source maps instead of having a separate file. */ 61 | // "inlineSources": true, /* Emit the source alongside the sourcemaps within a single file; requires '--inlineSourceMap' or '--sourceMap' to be set. */ 62 | 63 | /* Experimental Options */ 64 | // "experimentalDecorators": true, /* Enables experimental support for ES7 decorators. */ 65 | // "emitDecoratorMetadata": true, /* Enables experimental support for emitting type metadata for decorators. */ 66 | 67 | /* Advanced Options */ 68 | "forceConsistentCasingInFileNames": true /* Disallow inconsistently-cased references to the same file. */ 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /app/subscriptions/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | *.sqlite 3 | dist/ 4 | -------------------------------------------------------------------------------- /app/subscriptions/README.md: -------------------------------------------------------------------------------- 1 | # subscription-benchmark 2 | 3 | This is a load generator for GraphQL subscriptions. 4 | 5 | ### Configuration: 6 | 7 | ```yaml 8 | # Hasura Endpoint 9 | url: http://localhost:8085/v1/graphql 10 | # Postgres Connection string (can also set ENV variable PG_CONNECTION_STRING) 11 | db_connection_string: postgres://postgres:postgrespassword@localhost:5430/postgres 12 | # (Optional) Headers Object 13 | headers: 14 | X-Hasura-Admin-Secret: my-secret 15 | # Subscription Config 16 | config: 17 | # Label for the subscription events in database (MUST BE CHANGED AFTER EVERY RUN) 18 | label: SearchAlbumsWithArtist01 19 | # Whether to insert the payload data for subscriptions into the "event_data" column in DB 20 | insert_payload_data: true 21 | # Maximum number of websocket connections 22 | max_connections: 100 23 | # Number of sockets to spawn per second until max reached 24 | connections_per_second: 10 25 | # Subscription query string 26 | query: | 27 | subscription AlbumByIDSubscription($artistIds: [Int!]!) { 28 | albums(where: {artist_id: { _in: $artistIds}}) { 29 | id 30 | title 31 | updated_at 32 | } 33 | } 34 | # Optional variables 35 | variables: 36 | # Can be an array 37 | artistIds: [1, 2, 3, 4] 38 | # Or a string 39 | some_value: a_string 40 | # A number 41 | some_number: 10 42 | # Or a range object with start & end 43 | some_range: { start: 1, end: 10 } 44 | another_range: { start: 50, end: 100 } 45 | # Also an object 46 | some_object: 47 | a_key: a_value 48 | ``` 49 | 50 | **Note about ranges:** Ranges work through generators/iterators. Making a range 1-10 will take the `next()` value of each range variable on every iteration, and mix it in with normal variables as an object. If you have multiple ranges, they each advance by one increment every subscription. When the range reaches the end, it loops back around. 51 | 52 | Example with ranges: 53 | 54 | ```yaml 55 | artistIds: [1, 2, 3] 56 | some_number: 10 57 | some_range: { start: 1, end: 10 } 58 | another_range: { start: 50, end: 100 } 59 | ``` 60 | 61 | The first subscription will get variables like this: 62 | 63 | ```json 64 | { 65 | "artistIds": [1, 2, 3], 66 | "some_number": 10, 67 | "some_range": 1, 68 | "another_range": 50 69 | } 70 | ``` 71 | 72 | The next one will get `some_range: 2` and `another_range: 51`, and so on. When they reach the end of their range value, it repeats starting from the beginning again. 73 | 74 | ### Setup: 75 | 76 | - Install the dependencies with Yarn or NPM 77 | - `yarn install` 78 | - `npm install` 79 | - Create the `events` table using knex migrate with Yarn or NPM 80 | - `yarn knex:migrate:latest` 81 | - `npm run knex:migrate:latest` 82 | - Confirm that an `events` table has been created inside of your database 83 | - Run the benchmark tool 84 | ```bash 85 | # With Yarn 86 | yarn start 87 | # With NPM 88 | npm run start 89 | ``` 90 | - Create events by making changes in the subscribed table 91 | - As you create changes, you should notice the number of data events increasing in `stdout` output: 92 | ![](example-stdout-output.png) 93 | - Stop the benchmark with `ctrl + c` 94 | - The script should say it has inserted the event data: 95 | 96 | ``` 97 | ❯ Executing Teardown Process 98 | ❯ Starting to close socket connections 99 | ❯ Sockets closed, attempting to insert event data 100 | ❯ Inserted total of 10 events for label SearchAlbumsWithArtistUpdated 101 | ❯ Trying to close DB connection pool 102 | ❯ Database connection destroyed 103 | ❯ Now exiting the process 104 | ``` 105 | 106 | Latency data can be calculated by subscribing to the `updated_at` field in your query, and then calculating the difference between `event_time` and the record's `updated_at` time. 107 | 108 | Make note that the 0th index of returned data may not be the the most recently updated, so simply doing `data[0].updated_at` may be incorrect. 109 | 110 | ### Benchmark Table: 111 | 112 | ```js 113 | exports.up = function (knex, Promise) { 114 | return knex.schema.createTable('events', (table) => { 115 | table.string('label').notNullable() // unqiue label to identify benchmark 116 | table.integer('connection_id').notNullable() // connection_id represents the nth connection 117 | table.integer('operation_id').notNullable() 118 | table.integer('event_number').notNullable() // event_number represents the nth event that was receieved by the client 119 | table.jsonb('event_data').notNullable() // event_data stores the data that was received this can be used to validate 120 | table.timestamp('event_time', { useTz: true }).notNullable() // event_time stores the time at which the event was receieved by the client. 121 | table.boolean('is_error').notNullable() // is_error represents whether the event was error or not. 122 | table.integer('latency') // latency is not populated by the benchmark tool, but this can be populated by calculating `event_time-event_triggerd_time` 123 | 124 | table.unique(['label', 'connection_id', 'operation_id', 'event_number']) 125 | }) 126 | } 127 | ``` 128 | -------------------------------------------------------------------------------- /app/subscriptions/example-stdout-output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hasura/graphql-bench/2163a70a5324766521b8d6ceb7a73b8f0d12688d/app/subscriptions/example-stdout-output.png -------------------------------------------------------------------------------- /app/subscriptions/notes.md: -------------------------------------------------------------------------------- 1 | When querying for latency, you must sure to have ordered the fields by "updated_at", or else the item at index "0" may not be the most recently updated item. 2 | This would give very inaccurate results; For example, subscribing to an array of 10 items, updating one, but the 0th index of response is an item whose "updated_at" value is several days old. 3 | -------------------------------------------------------------------------------- /app/subscriptions/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "subscription-benchmark", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1", 8 | "compile": "tsc", 9 | "start": "ts-node ./src/run.ts", 10 | "knex:migrate:make": "ts-node ./node_modules/knex/bin/cli.js --knexfile ./knexfile.ts migrate:make -x ts", 11 | "knex:migrate:latest": "ts-node ./node_modules/knex/bin/cli.js --knexfile ./knexfile.ts migrate:latest", 12 | "knex:migrate:rollback": "ts-node ./node_modules/knex/bin/cli.js --knexfile ./knexfile.ts migrate:rollback" 13 | }, 14 | "author": "", 15 | "license": "ISC", 16 | "dependencies": { 17 | "@nx-js/observer-util": "^4.2.2", 18 | "@types/js-yaml": "^3.12.1", 19 | "@types/lodash": "^4.14.154", 20 | "@types/node": "^11.9.5", 21 | "dotenv": "^8.2.0", 22 | "js-yaml": "^3.13.1", 23 | "knex": "^0.21.1", 24 | "lodash": "^4.17.15", 25 | "log-update": "^4.0.0", 26 | "objection": "^1.6.3", 27 | "pg": "^8.2.1", 28 | "reattempt": "^0.1.1", 29 | "websocket-as-promised": "^1.0.1", 30 | "ws": "^7.3.0" 31 | }, 32 | "devDependencies": { 33 | "@types/graphql": "^14.0.0", 34 | "@types/ws": "^7.2.4", 35 | "graphql": "^14.0.2", 36 | "ts-node": "^8.10.2", 37 | "typescript": "^3.4.1" 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /app/subscriptions/src/example_latency_query.sql: -------------------------------------------------------------------------------- 1 | WITH album_ids_cte AS ( 2 | SELECT *, event_data->'data'->'albums'->0->'id' as album_id FROM events 3 | ) 4 | 5 | SELECT *, now() - event_time AS latency 6 | FROM album_ids_cte 7 | INNER JOIN albums 8 | ON albums.id = album_ids_cte.album_id::int 9 | LIMIT 10 -------------------------------------------------------------------------------- /app/subscriptions/src/main.ts: -------------------------------------------------------------------------------- 1 | import { 2 | argumentGenerator, 3 | GQL, 4 | SubscriptionBenchConfig, 5 | yamlConfigToSocketManagerParams, 6 | COLORS, 7 | } from './utils' 8 | import Knex = require('knex') 9 | import { Model } from 'objection' 10 | import Reattempt from 'reattempt/dist/decorator' 11 | import WebSocket from 'ws' 12 | import WebSocketAsPromised from 'websocket-as-promised' 13 | import { Events } from './schema' 14 | import { observable, observe } from '@nx-js/observer-util' 15 | import logUpdate from 'log-update' 16 | 17 | const DEBUG = process.env.DEBUG 18 | 19 | /** 20 | * ================= 21 | * Program Contents 22 | * ================= 23 | * - SocketManager: 24 | * Holds config for the benchmark parameters and controls spawning/orchestrating Socket connections. 25 | * Also performs DB insert at the end. 26 | * 27 | * - Connection: 28 | * An individual Websocket, maintains an internal record of received events. 29 | * Message handlers are registered here, these push to events. 30 | * 31 | * - main(): 32 | * Reads from config file, instantiates a SocketManager based on params in file. 33 | * Spawns more sockets at configured number per second until target reached. 34 | * Listens for ctrl + c to kill, which invokes exit() 35 | * 36 | * - exit(): 37 | * Teardown handler. Awaits closing all socket connections, writing data to DB, and destroying DB connection. 38 | */ 39 | 40 | /** 41 | * Global Stat Observables 42 | */ 43 | 44 | const STATS_OBSERVABLE = observable({ 45 | DATA_EVENT_COUNT: 0, 46 | ERROR_EVENT_COUNT: 0, 47 | CONNECTED_SOCKETS: new Set(), 48 | }) 49 | 50 | function updateEventStatsStdout() { 51 | logUpdate( 52 | COLORS.FG_CYAN + 53 | `Socket count: ${STATS_OBSERVABLE.CONNECTED_SOCKETS.size} | ` + 54 | COLORS.RESET + 55 | COLORS.FG_GREEN + 56 | `Data Events Received: ${STATS_OBSERVABLE.DATA_EVENT_COUNT} | ` + 57 | COLORS.RESET + 58 | COLORS.FG_RED + 59 | `Error Events Received: ${STATS_OBSERVABLE.ERROR_EVENT_COUNT} ` + 60 | COLORS.RESET 61 | ) 62 | } 63 | 64 | /** 65 | * ===================== 66 | * SOCKET MANAGER CLASS 67 | * ===================== 68 | */ 69 | 70 | export interface SockerManagerConfig { 71 | label: string 72 | endpoint: string 73 | variables: object 74 | headers?: Record 75 | maxConnections: number 76 | insertPayloadData: boolean 77 | connectionsPerSecond: number 78 | pgConnectionString: string 79 | subscriptionString: string 80 | } 81 | 82 | export class SocketManager { 83 | private nextSocketId = 1 84 | public connections: { [id: number]: Connection } = {} 85 | public config: SockerManagerConfig 86 | public queryArgGenerator: Iterator 87 | 88 | constructor(config: SockerManagerConfig) { 89 | this.config = config 90 | this.queryArgGenerator = argumentGenerator(config.variables) 91 | } 92 | 93 | public closeSockets() { 94 | return Promise.all( 95 | Object.values(this.connections).map((conn) => { 96 | conn.socket.sendPacked({ type: GQL.CONNECTION_TERMINATE }) 97 | conn.socket.close() 98 | }) 99 | ) 100 | } 101 | 102 | public get allEvents() { 103 | return Object.values(this.connections).flatMap((conn) => conn.events) 104 | } 105 | 106 | public async insertEvents() { 107 | return Events.query() 108 | .allowInsert('[connection_id, event_number, event_data, event_time]') 109 | .insertGraph(this.allEvents) 110 | } 111 | 112 | public async spawnConnection() { 113 | const socketId = this.nextSocketId++ 114 | const socketManagerConfig = this.config 115 | const queryVariables = this.queryArgGenerator.next().value 116 | try { 117 | const connection = new Connection({ 118 | id: socketId, 119 | queryVariables, 120 | socketManagerConfig, 121 | }) 122 | connection.startSubscription() 123 | this.connections[socketId] = connection 124 | return connection 125 | } catch (err) { 126 | console.log('Caught error when calling spawnConnection(), exiting', err) 127 | process.exit(1) 128 | } 129 | } 130 | } 131 | 132 | /** 133 | * ======================= 134 | * SOCKET CONNECTION CLASS 135 | * ======================= 136 | */ 137 | 138 | export type FormatedError = Error & { 139 | originalError?: any 140 | } 141 | 142 | interface ConnectionParams { 143 | id: number 144 | socketManagerConfig: SockerManagerConfig 145 | queryVariables: object 146 | } 147 | 148 | class Connection { 149 | public eventNumber = 1 150 | public events: Array = [] 151 | public socket: WebSocketAsPromised 152 | public isReconnecting: boolean = false 153 | 154 | constructor(public props: ConnectionParams) { 155 | this.socket = this.makeSocket() 156 | this.configureMessageHandlers() 157 | } 158 | 159 | private makeSocket() { 160 | const { endpoint, headers } = this.props.socketManagerConfig 161 | return new WebSocketAsPromised(endpoint, { 162 | createWebSocket: (url) => new WebSocket(url, 'graphql-ws', { headers }), 163 | extractMessageData: (event) => event, 164 | packMessage: (data) => JSON.stringify(data), 165 | unpackMessage: (data) => JSON.parse(data as string), 166 | } as any) 167 | } 168 | 169 | // TODO: Make the retry configurable through config.yaml 170 | @Reattempt({ times: 60, delay: 1000 }) 171 | public async startSubscription() { 172 | const socket = this.socket 173 | const { id, queryVariables } = this.props 174 | const { headers, subscriptionString } = this.props.socketManagerConfig 175 | 176 | if (DEBUG) console.log('Socket ID', id, 'attempting to start subscription') 177 | 178 | await socket.open() 179 | socket.sendPacked({ 180 | type: GQL.CONNECTION_INIT, 181 | payload: { headers }, 182 | }) 183 | socket.sendPacked({ 184 | id: String(id), 185 | type: GQL.START, 186 | payload: { 187 | query: subscriptionString, 188 | variables: queryVariables, 189 | }, 190 | }) 191 | } 192 | 193 | private makeEventRow({ payload, err }) { 194 | const { label, insertPayloadData } = this.props.socketManagerConfig 195 | return { 196 | label, 197 | is_error: err, 198 | operation_id: 1, 199 | connection_id: this.props.id, 200 | event_number: this.eventNumber++, 201 | event_data: insertPayloadData ? payload : { data: null }, 202 | event_time: new Date().toISOString(), 203 | } 204 | } 205 | 206 | private configureMessageHandlers() { 207 | // On socket close: 208 | this.socket.onClose.addListener(() => { 209 | // Print debug message if enabled 210 | if (DEBUG) console.log('Socket ID', this.props.id, 'closed') 211 | // Remove socket ID from Observable ES6 Set of connected sockets 212 | STATS_OBSERVABLE.CONNECTED_SOCKETS.delete(this.props.id) 213 | // If the socket is not currently trying to reconnect, begin trying 214 | if (!this.isReconnecting) this.startSubscription() 215 | // Set reconnecting state to true 216 | this.isReconnecting = true 217 | }) 218 | 219 | // On socket open: 220 | this.socket.onOpen.addListener(() => { 221 | // Print debug message if enabled 222 | if (DEBUG) console.log('Socket ID', this.props.id, 'connected') 223 | // Add the socket ID to ES6 Set of connected sockets 224 | STATS_OBSERVABLE.CONNECTED_SOCKETS.add(this.props.id) 225 | // Set reconnecting state to false 226 | this.isReconnecting = false 227 | }) 228 | 229 | // If debug mode enabled, also set up generalized data logger 230 | if (DEBUG) { 231 | this.socket.onSend.addListener((data) => console.log('sent', data)) 232 | } 233 | 234 | this.socket.onUnpackedMessage.addListener((data) => { 235 | switch (data.type) { 236 | case GQL.DATA: 237 | const event = this.makeEventRow({ payload: data.payload, err: false }) 238 | if (DEBUG) console.log('CALLED GQL.DATA CASE, GOT EVENT ROW', event) 239 | STATS_OBSERVABLE.DATA_EVENT_COUNT++ 240 | this.events.push(event) 241 | break 242 | case GQL.ERROR: 243 | const error = this.makeEventRow({ payload: data.payload, err: true }) 244 | if (DEBUG) console.log('CALLED GQL.ERROR CASE, GOT ERROR ROW', data) 245 | STATS_OBSERVABLE.ERROR_EVENT_COUNT++ 246 | this.events.push(error) 247 | break 248 | } 249 | }) 250 | } 251 | } 252 | 253 | /** 254 | * ===================== 255 | * UTILS & MISC 256 | * ===================== 257 | */ 258 | 259 | /** 260 | * Connection Object. Actual connection creation happens in the main method. 261 | */ 262 | let knexConnection: Knex ; 263 | 264 | async function assertDatabaseConnection() { 265 | return knexConnection.raw('select 1+1 as result').catch((err: any) => { 266 | console.log('Failed to establish connection to database! Exiting...') 267 | console.log(err) 268 | process.exit(1) 269 | }) 270 | } 271 | 272 | function prettyPrintConfig(options) { 273 | console.table({ 274 | url: options.url, 275 | db_connection_string: options.db_connection_string, 276 | }) 277 | console.table({ headers: options.headers }) 278 | console.table({ config: options.config }, [ 279 | 'label', 280 | 'max_connections', 281 | 'connections_per_second', 282 | ]) 283 | console.table({ variables: options.config.variables }) 284 | } 285 | 286 | /** 287 | * ===================== 288 | * MAIN PROGRAM CODE 289 | * ===================== 290 | */ 291 | 292 | export async function main(opts: SubscriptionBenchConfig) { 293 | if (!opts) { 294 | throw new Error("Subscription options invalid"); 295 | } 296 | if (!opts.db_connection_string) { 297 | throw new Error("DB Connection String not found"); 298 | } 299 | 300 | // Open the DB connection using connection string from config file 301 | let dbConfig = { 302 | client: 'pg', 303 | connection: opts.db_connection_string, 304 | migrations: { 305 | directory: './src/migrations', 306 | }, 307 | } as Knex.Config; 308 | knexConnection = Knex(dbConfig); 309 | Model.knex(knexConnection); 310 | 311 | const options: SubscriptionBenchConfig = opts; 312 | /** 313 | * Any time values change in these stats, run the below function 314 | * currently just updates the terminal output text with new data 315 | * 316 | * NOTE: This only works because updateEventStatsStdout() references 317 | * variables from the observable function, so the Proxy knows to fire 318 | */ 319 | 320 | observe(() => { 321 | updateEventStatsStdout() 322 | }) 323 | 324 | /** 325 | * Logging 326 | */ 327 | 328 | const database = 329 | process.env.PG_CONNECTION_STRING || options.db_connection_string 330 | console.log('Asserting database connectivity, trying to conect to:') 331 | console.log(COLORS.FG_CYAN, database, COLORS.RESET) 332 | 333 | await assertDatabaseConnection() 334 | prettyPrintConfig(options) 335 | 336 | console.log( 337 | 'Connected, starting subscriptions benchmark for a total of', 338 | options.config.max_connections, 339 | 'sockets at a connection rate of', 340 | options.config.connections_per_second, 341 | 'sockets per second' 342 | ) 343 | 344 | /** 345 | * Execution 346 | */ 347 | 348 | const socketManagerParams = yamlConfigToSocketManagerParams(options) 349 | const socketManager = new SocketManager(socketManagerParams) 350 | 351 | const MAX_CONNECTIONS = options.config.max_connections 352 | const SPAWN_RATE = 1000 / options.config.connections_per_second 353 | 354 | let socketSpawned = 0 355 | const spawnFn = () => { 356 | socketSpawned++ 357 | return socketManager.spawnConnection().then((socket) => { 358 | if (socketSpawned >= MAX_CONNECTIONS) clearInterval(spawnInterval) 359 | }) 360 | } 361 | 362 | const spawnInterval = setInterval(spawnFn, SPAWN_RATE) 363 | process.on('SIGINT', () => exit(socketManager)) 364 | } 365 | 366 | /** 367 | * ===================== 368 | * EXIT TEARDOWN PROCESS 369 | * ===================== 370 | */ 371 | 372 | async function exit(socketManager: SocketManager) { 373 | console.log('\nExecuting Teardown Process') 374 | try { 375 | console.log('Starting to close socket connections') 376 | await socketManager.closeSockets() 377 | } catch (error) { 378 | console.log('Error while closing socket connections:', error) 379 | } 380 | 381 | try { 382 | console.log('Sockets closed, attempting to insert event data') 383 | const events = await socketManager.insertEvents() 384 | console.log( 385 | `Inserted total of ${events.length} events for label ${socketManager.config.label}` 386 | ) 387 | } catch (error) { 388 | console.log('Error while inserting events:', error) 389 | } 390 | 391 | try { 392 | console.log('Trying to close DB connection pool') 393 | await knexConnection.destroy() 394 | console.log('Database connection destroyed') 395 | } catch (error) { 396 | console.log('Error while destroying database connection:', error) 397 | } 398 | 399 | console.log('Now exiting the process') 400 | process.exit(0) 401 | } 402 | -------------------------------------------------------------------------------- /app/subscriptions/src/migrations/events.ts: -------------------------------------------------------------------------------- 1 | import * as Knex from 'knex' 2 | 3 | export async function up(knex: Knex): Promise { 4 | return knex.schema.createTable('events', (table: Knex.TableBuilder) => { 5 | table.string('label').notNullable() 6 | table.integer('connection_id').notNullable() 7 | table.integer('operation_id').notNullable() 8 | table.integer('event_number').notNullable() 9 | table.jsonb('event_data').notNullable() 10 | table.timestamp('event_time', { useTz: true }).notNullable() 11 | table.boolean('is_error').notNullable() 12 | table.integer('latency') 13 | table.unique(['label', 'connection_id', 'operation_id', 'event_number']) 14 | }) 15 | } 16 | 17 | export async function down(knex: Knex): Promise { 18 | return knex.schema.dropTable('events') 19 | } 20 | -------------------------------------------------------------------------------- /app/subscriptions/src/run.ts: -------------------------------------------------------------------------------- 1 | import { main } from './main' 2 | 3 | main() 4 | -------------------------------------------------------------------------------- /app/subscriptions/src/schema.ts: -------------------------------------------------------------------------------- 1 | import { Model } from 'objection' 2 | 3 | export class Events extends Model { 4 | label: string 5 | connection_id: number 6 | operation_id: number 7 | event_number: number 8 | event_data: any 9 | event_time: string 10 | is_error: boolean 11 | latency?: number 12 | 13 | static tableName = 'events' 14 | 15 | static get idColumn() { 16 | return ['label', 'connection_id', 'operation_id', 'event_number'] 17 | } 18 | 19 | static get jsonSchema() { 20 | return { 21 | type: 'object', 22 | required: [ 23 | 'label', 24 | 'connection_id', 25 | 'operation_id', 26 | 'event_number', 27 | 'event_data', 28 | 'event_time', 29 | 'is_error', 30 | ], 31 | properties: { 32 | label: { type: 'string' }, 33 | connection_id: { type: 'integer' }, 34 | operation_id: { type: 'integer' }, 35 | event_number: { type: 'integer' }, 36 | event_data: { type: 'json' }, 37 | event_time: { type: 'string' }, 38 | is_error: { type: 'boolean' }, 39 | latency: { type: ['integer', 'null'] }, 40 | }, 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /app/subscriptions/src/tests.ts: -------------------------------------------------------------------------------- 1 | import { SubscriptionBenchConfig } from './utils' 2 | import { main as runSubscriptionBenchmark } from './main' 3 | 4 | const testConfig: SubscriptionBenchConfig = { 5 | url: 'http://localhost:8085/v1/graphql', 6 | db_connection_string: 7 | 'postgres://postgres:postgrespassword@localhost:5430/postgres', 8 | headers: { 9 | 'X-Hasura-Admin-Secret': 'my-secret', 10 | }, 11 | config: { 12 | label: 'SearchAlbumsWithArtist', 13 | max_connections: 20, 14 | connections_per_second: 10, 15 | insert_payload_data: true, 16 | query: ` 17 | subscription AlbumByIDSubscription($artistIds: [Int!]!) { 18 | albums(where: {artist_id: { _in: $artistIds}}) { 19 | id 20 | title 21 | updated_at 22 | } 23 | } 24 | `, 25 | variables: { 26 | artistIds: [1, 2, 3, 4], 27 | }, 28 | }, 29 | } 30 | 31 | async function main() { 32 | await runSubscriptionBenchmark(testConfig) 33 | } 34 | 35 | main().catch((err) => { 36 | console.log('Error running subscription benchmark test') 37 | }) 38 | -------------------------------------------------------------------------------- /app/subscriptions/src/utils.ts: -------------------------------------------------------------------------------- 1 | import { SockerManagerConfig } from './main' 2 | import yaml from 'js-yaml' 3 | import fs from 'fs' 4 | import path from 'path' 5 | 6 | export const GQL = { 7 | START: 'start', 8 | STOP: 'stop', 9 | DATA: 'data', 10 | ERROR: 'error', 11 | COMPLETE: 'complete', 12 | CONNECTION_INIT: 'connection_init', 13 | CONNECTION_ACK: 'connection_ack', 14 | CONNECTION_ERROR: 'connection_error', 15 | CONNECTION_KEEP_ALIVE: 'ka', 16 | CONNECTION_TERMINATE: 'connection_terminate', 17 | } 18 | 19 | function* makeRangeIterator(start, end) { 20 | let originalStart = start 21 | while (true) { 22 | yield start++ 23 | if (start > end) start = originalStart 24 | } 25 | } 26 | 27 | const isRangeVariable = (obj) => obj.start != null && obj.end != null 28 | export function* argumentGenerator(args) { 29 | // Clone holds the original args, and the iterator values 30 | let internal = Object.assign({}, args) 31 | // genKeys holds name of generator keys in the object 32 | let genKeys = [] 33 | // Iterate the keys and find "range" variables with "start"/"end" set 34 | for (let key in args) { 35 | const val = args[key] 36 | if (isRangeVariable(val)) { 37 | internal[key] = makeRangeIterator(val.start, val.end) 38 | genKeys.push(key) 39 | } 40 | } 41 | // Each iteration, for each "range" key, set the value to 42 | // the internal clone's next tick, then yield the whole object 43 | while (true) { 44 | genKeys.forEach((key) => (args[key] = internal[key].next().value)) 45 | yield args 46 | } 47 | } 48 | 49 | export interface SubscriptionBenchConfig { 50 | url: string 51 | headers: Record 52 | config: QueryConfig 53 | db_connection_string: string 54 | } 55 | 56 | export interface QueryConfig { 57 | label: string 58 | // TODO: Make the benchmark stop after optional duration? 59 | duration?: string 60 | insert_payload_data: boolean 61 | max_connections: number 62 | connections_per_second: number 63 | query: string 64 | variables: Record 65 | } 66 | 67 | export interface Range { 68 | start: number 69 | end: number 70 | } 71 | 72 | export const yamlConfigToSocketManagerParams = ( 73 | options: SubscriptionBenchConfig 74 | ): SockerManagerConfig => ({ 75 | label: options.config.label, 76 | endpoint: options.url, 77 | variables: options.config.variables, 78 | headers: options.headers, 79 | maxConnections: options.config.max_connections, 80 | connectionsPerSecond: options.config.connections_per_second, 81 | pgConnectionString: options.db_connection_string, 82 | subscriptionString: options.config.query, 83 | insertPayloadData: options.config.insert_payload_data ?? true, 84 | }) 85 | 86 | export const COLORS = { 87 | FG_GREEN: '\x1b[32m', 88 | FG_YELLOW: '\x1b[33m', 89 | FG_CYAN: '\x1b[36m', 90 | FG_RED: '\x1b[31m', 91 | RESET: '\x1b[0m', 92 | BLINK: '\x1b[5m', 93 | } 94 | -------------------------------------------------------------------------------- /app/subscriptions/test.sh: -------------------------------------------------------------------------------- 1 | PG_CONNECTION_STRING=postgres://postgres:postgrespassword@localhost:5430/postgres ENDPOINT=https://hasura-pro-demo.hasura-app.io/v1/graphql LABEL=mylabel CONFIG_FILE_PATH=src/config_old.yaml ts-node src/index.ts 2 | -------------------------------------------------------------------------------- /app/subscriptions/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es5", 4 | "module": "commonjs", 5 | "moduleResolution": "node", 6 | "sourceMap": true, 7 | "noImplicitAny": false, 8 | "outDir": "./dist", 9 | "allowSyntheticDefaultImports": true, 10 | "removeComments": true, 11 | "noImplicitReturns": true, 12 | "noFallthroughCasesInSwitch": true, 13 | "pretty": true, 14 | "declaration": true, 15 | "skipLibCheck": true, 16 | "esModuleInterop": true, 17 | "experimentalDecorators": true, 18 | "lib": ["es6", "esnext.asynciterable", "ESNEXT"], 19 | "types": ["node"] 20 | }, 21 | "include": ["./src"], 22 | "exclude": ["node_modules"] 23 | } 24 | -------------------------------------------------------------------------------- /app/web-app/README.md: -------------------------------------------------------------------------------- 1 | The bench report visualizer tool here can be opened locally with a browser, and 2 | will ask you to select report JSON from your local filesystem. 3 | 4 | We also expose this as a GitHub Page (for convenient linking, etc) at: 5 | 6 | https://hasura.github.io/graphql-bench/app/web-app/ 7 | 8 | You can aslo display a specific report by including the URL to the JSON in the 9 | URL fragment (assuming CORS is configured properly), for example: 10 | 11 | https://hasura.github.io/graphql-bench/app/web-app/#https://hasura-benchmark-results.s3.us-east-2.amazonaws.com/mono-pr-1866/chinook.json 12 | 13 | ...or using the shorthand form for: 14 | 15 | https://hasura.github.io/graphql-bench/app/web-app/#mono-pr-1866/chinook 16 | 17 | Multiple reports can be specified (or chosen using the file-picker) to display 18 | a regression report that compares each individual benchmark across runs: 19 | 20 | https://hasura.github.io/graphql-bench/app/web-app/#mono-pr-1866/chinook,mono-pr-1849/chinook,mono-pr-1843/chinook 21 | 22 | Visualizations will assume the list of runs is in reverse chronological order, 23 | but this mostly doesn't matter. 24 | -------------------------------------------------------------------------------- /app/web-app/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | GraphQL Bench Report 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 22 | 23 | 24 | 25 | 26 | 30 | 34 | 35 | 36 | 37 |
38 | 39 | 40 | 41 | 42 | 47 | 48 | -------------------------------------------------------------------------------- /containers/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.6' 2 | services: 3 | postgres: 4 | image: postgres:12 5 | restart: always 6 | ports: 7 | - 5430:5432 8 | environment: 9 | POSTGRES_PASSWORD: postgrespassword 10 | # DRIVER={ODBC Driver 17 for SQL Server};SERVER=msserver;Uid=SA;Pwd=testPassword123 11 | msserver: 12 | image: mcr.microsoft.com/mssql/server:2019-latest 13 | ports: 14 | - 1433:1433 15 | environment: 16 | SA_PASSWORD: "testPassword123" 17 | ACCEPT_EULA: "Y" 18 | graphql-engine: 19 | image: hasura/graphql-engine:v2.0.0-beta.1 20 | ports: 21 | - 8085:8080 22 | depends_on: 23 | - 'postgres' 24 | restart: always 25 | environment: 26 | HASURA_GRAPHQL_DATABASE_URL: postgres://postgres:postgrespassword@postgres:5432/postgres 27 | HASURA_GRAPHQL_ENABLE_CONSOLE: 'true' # set to "false" to disable console 28 | -------------------------------------------------------------------------------- /containers/graphql-wait.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | echo -n "Waiting for graphql-engine" 3 | until curl -s "http://localhost:8085/v1/query" &>/dev/null; do 4 | echo -n '.' && sleep 0.2 5 | done 6 | 7 | echo "" 8 | echo " Ok" 9 | -------------------------------------------------------------------------------- /containers/mssql-seed-chinook.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MSSQLUSER=sa 4 | MSSQLPASS=testPassword123 5 | MSSQLADDRESS=localhost,1433 6 | # SCRIPT_DIR points to the absolute path of this file 7 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 8 | SEEDFILE=$SCRIPT_DIR/Chinook_SqlServer.sql 9 | METADATA_URL=http://localhost:8085/v1/metadata 10 | MSSQL_DB_URL="DRIVER={ODBC Driver 17 for SQL Server};SERVER=msserver;Uid=SA;Pwd=testPassword123" 11 | 12 | function mssql_wait { 13 | echo -n "Waiting for mssql to come up" 14 | until ( sqlcmd -S $MSSQLADDRESS -U $MSSQLUSER -P $MSSQLPASS -Q 'SELECT 1' ) &>/dev/null; do 15 | echo -n '.' && sleep 0.2 16 | done 17 | echo " Ok" 18 | } 19 | 20 | mssql_wait 21 | 22 | echo "" 23 | echo "Adding SQL Server source" 24 | curl "$METADATA_URL" \ 25 | --data-raw '{"type":"mssql_add_source","args":{"name":"mssql","configuration":{"connection_info":{"connection_string":"'"$MSSQL_DB_URL"'"}}}}' 26 | 27 | echo "" 28 | echo "Sources added:" 29 | curl $METADATA_URL --data-raw '{"type":"export_metadata","args":{}}' 30 | 31 | echo "" 32 | echo "Seeding DB" 33 | sqlcmd -S $MSSQLADDRESS -U $MSSQLUSER -P $MSSQLPASS -i "$SEEDFILE" 34 | 35 | echo "" 36 | echo "Tracking tables" 37 | curl "$METADATA_URL" --data-binary "@$SCRIPT_DIR/mssql_track_chinook_tables.json" 38 | 39 | echo "" 40 | echo "Tracking foreign-key relationships" 41 | curl "$METADATA_URL" --data-binary "@$SCRIPT_DIR/mssql_track_chinook_relationships.json" 42 | -------------------------------------------------------------------------------- /containers/mssql-update-rows.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MSSQLUSER=sa 4 | MSSQLPASS=testPassword123 5 | MSSQLADDRESS=localhost,1433 6 | 7 | update_rows() { 8 | sqlcmd -S $MSSQLADDRESS -U $MSSQLUSER -P $MSSQLPASS \ 9 | -Q "UPDATE [dbo].[Album] SET Title = CONCAT(NewID(), GETUTCDATE()) WHERE 1=1;" 10 | } 11 | -------------------------------------------------------------------------------- /containers/mssql_track_chinook_relationships.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "bulk", 3 | "source": "mssql", 4 | "resource_version": 3, 5 | "args": [ 6 | { 7 | "type": "mssql_create_object_relationship", 8 | "args": { 9 | "name": "Artist", 10 | "table": { "name": "Album", "schema": "dbo" }, 11 | "using": { "foreign_key_constraint_on": "ArtistId" }, 12 | "source": "mssql" 13 | } 14 | }, 15 | { 16 | "type": "mssql_create_array_relationship", 17 | "args": { 18 | "name": "Tracks", 19 | "table": { "name": "Album", "schema": "dbo" }, 20 | "using": { 21 | "foreign_key_constraint_on": { 22 | "table": { "name": "Track", "schema": "dbo" }, 23 | "column": "AlbumId" 24 | } 25 | }, 26 | "source": "mssql" 27 | } 28 | }, 29 | { 30 | "type": "mssql_create_array_relationship", 31 | "args": { 32 | "name": "Albums", 33 | "table": { "name": "Artist", "schema": "dbo" }, 34 | "using": { 35 | "foreign_key_constraint_on": { 36 | "table": { "name": "Album", "schema": "dbo" }, 37 | "column": "ArtistId" 38 | } 39 | }, 40 | "source": "mssql" 41 | } 42 | }, 43 | { 44 | "type": "mssql_create_object_relationship", 45 | "args": { 46 | "name": "Employee", 47 | "table": { "name": "Customer", "schema": "dbo" }, 48 | "using": { "foreign_key_constraint_on": "SupportRepId" }, 49 | "source": "mssql" 50 | } 51 | }, 52 | { 53 | "type": "mssql_create_array_relationship", 54 | "args": { 55 | "name": "Invoices", 56 | "table": { "name": "Customer", "schema": "dbo" }, 57 | "using": { 58 | "foreign_key_constraint_on": { 59 | "table": { "name": "Invoice", "schema": "dbo" }, 60 | "column": "CustomerId" 61 | } 62 | }, 63 | "source": "mssql" 64 | } 65 | }, 66 | { 67 | "type": "mssql_create_object_relationship", 68 | "args": { 69 | "name": "Employee", 70 | "table": { "name": "Employee", "schema": "dbo" }, 71 | "using": { "foreign_key_constraint_on": "ReportsTo" }, 72 | "source": "mssql" 73 | } 74 | }, 75 | { 76 | "type": "mssql_create_array_relationship", 77 | "args": { 78 | "name": "Customers", 79 | "table": { "name": "Employee", "schema": "dbo" }, 80 | "using": { 81 | "foreign_key_constraint_on": { 82 | "table": { "name": "Customer", "schema": "dbo" }, 83 | "column": "SupportRepId" 84 | } 85 | }, 86 | "source": "mssql" 87 | } 88 | }, 89 | { 90 | "type": "mssql_create_array_relationship", 91 | "args": { 92 | "name": "Employees", 93 | "table": { "name": "Employee", "schema": "dbo" }, 94 | "using": { 95 | "foreign_key_constraint_on": { 96 | "table": { "name": "Employee", "schema": "dbo" }, 97 | "column": "ReportsTo" 98 | } 99 | }, 100 | "source": "mssql" 101 | } 102 | }, 103 | { 104 | "type": "mssql_create_array_relationship", 105 | "args": { 106 | "name": "Tracks", 107 | "table": { "name": "Genre", "schema": "dbo" }, 108 | "using": { 109 | "foreign_key_constraint_on": { 110 | "table": { "name": "Track", "schema": "dbo" }, 111 | "column": "GenreId" 112 | } 113 | }, 114 | "source": "mssql" 115 | } 116 | }, 117 | { 118 | "type": "mssql_create_object_relationship", 119 | "args": { 120 | "name": "Customer", 121 | "table": { "name": "Invoice", "schema": "dbo" }, 122 | "using": { "foreign_key_constraint_on": "CustomerId" }, 123 | "source": "mssql" 124 | } 125 | }, 126 | { 127 | "type": "mssql_create_array_relationship", 128 | "args": { 129 | "name": "InvoiceLines", 130 | "table": { "name": "Invoice", "schema": "dbo" }, 131 | "using": { 132 | "foreign_key_constraint_on": { 133 | "table": { "name": "InvoiceLine", "schema": "dbo" }, 134 | "column": "InvoiceId" 135 | } 136 | }, 137 | "source": "mssql" 138 | } 139 | }, 140 | { 141 | "type": "mssql_create_object_relationship", 142 | "args": { 143 | "name": "Invoice", 144 | "table": { "name": "InvoiceLine", "schema": "dbo" }, 145 | "using": { "foreign_key_constraint_on": "InvoiceId" }, 146 | "source": "mssql" 147 | } 148 | }, 149 | { 150 | "type": "mssql_create_object_relationship", 151 | "args": { 152 | "name": "Track", 153 | "table": { "name": "InvoiceLine", "schema": "dbo" }, 154 | "using": { "foreign_key_constraint_on": "TrackId" }, 155 | "source": "mssql" 156 | } 157 | }, 158 | { 159 | "type": "mssql_create_array_relationship", 160 | "args": { 161 | "name": "Tracks", 162 | "table": { "name": "MediaType", "schema": "dbo" }, 163 | "using": { 164 | "foreign_key_constraint_on": { 165 | "table": { "name": "Track", "schema": "dbo" }, 166 | "column": "MediaTypeId" 167 | } 168 | }, 169 | "source": "mssql" 170 | } 171 | }, 172 | { 173 | "type": "mssql_create_array_relationship", 174 | "args": { 175 | "name": "PlaylistTracks", 176 | "table": { "name": "Playlist", "schema": "dbo" }, 177 | "using": { 178 | "foreign_key_constraint_on": { 179 | "table": { "name": "PlaylistTrack", "schema": "dbo" }, 180 | "column": "PlaylistId" 181 | } 182 | }, 183 | "source": "mssql" 184 | } 185 | }, 186 | { 187 | "type": "mssql_create_object_relationship", 188 | "args": { 189 | "name": "Playlist", 190 | "table": { "name": "PlaylistTrack", "schema": "dbo" }, 191 | "using": { "foreign_key_constraint_on": "PlaylistId" }, 192 | "source": "mssql" 193 | } 194 | }, 195 | { 196 | "type": "mssql_create_object_relationship", 197 | "args": { 198 | "name": "Track", 199 | "table": { "name": "PlaylistTrack", "schema": "dbo" }, 200 | "using": { "foreign_key_constraint_on": "TrackId" }, 201 | "source": "mssql" 202 | } 203 | }, 204 | { 205 | "type": "mssql_create_object_relationship", 206 | "args": { 207 | "name": "Album", 208 | "table": { "name": "Track", "schema": "dbo" }, 209 | "using": { "foreign_key_constraint_on": "AlbumId" }, 210 | "source": "mssql" 211 | } 212 | }, 213 | { 214 | "type": "mssql_create_object_relationship", 215 | "args": { 216 | "name": "Genre", 217 | "table": { "name": "Track", "schema": "dbo" }, 218 | "using": { "foreign_key_constraint_on": "GenreId" }, 219 | "source": "mssql" 220 | } 221 | }, 222 | { 223 | "type": "mssql_create_object_relationship", 224 | "args": { 225 | "name": "MediaType", 226 | "table": { "name": "Track", "schema": "dbo" }, 227 | "using": { "foreign_key_constraint_on": "MediaTypeId" }, 228 | "source": "mssql" 229 | } 230 | }, 231 | { 232 | "type": "mssql_create_array_relationship", 233 | "args": { 234 | "name": "InvoiceLines", 235 | "table": { "name": "Track", "schema": "dbo" }, 236 | "using": { 237 | "foreign_key_constraint_on": { 238 | "table": { "name": "InvoiceLine", "schema": "dbo" }, 239 | "column": "TrackId" 240 | } 241 | }, 242 | "source": "mssql" 243 | } 244 | }, 245 | { 246 | "type": "mssql_create_array_relationship", 247 | "args": { 248 | "name": "PlaylistTracks", 249 | "table": { "name": "Track", "schema": "dbo" }, 250 | "using": { 251 | "foreign_key_constraint_on": { 252 | "table": { "name": "PlaylistTrack", "schema": "dbo" }, 253 | "column": "TrackId" 254 | } 255 | }, 256 | "source": "mssql" 257 | } 258 | } 259 | ] 260 | } 261 | -------------------------------------------------------------------------------- /containers/mssql_track_chinook_tables.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "bulk", 3 | "source": "mssql", 4 | "args": [ 5 | { 6 | "type": "mssql_track_table", 7 | "args": { 8 | "table": { 9 | "name": "Album", 10 | "schema": "dbo" 11 | }, 12 | "source": "mssql" 13 | } 14 | }, 15 | { 16 | "type": "mssql_track_table", 17 | "args": { 18 | "table": { 19 | "name": "Artist", 20 | "schema": "dbo" 21 | }, 22 | "source": "mssql" 23 | } 24 | }, 25 | { 26 | "type": "mssql_track_table", 27 | "args": { 28 | "table": { 29 | "name": "Customer", 30 | "schema": "dbo" 31 | }, 32 | "source": "mssql" 33 | } 34 | }, 35 | { 36 | "type": "mssql_track_table", 37 | "args": { 38 | "table": { 39 | "name": "Employee", 40 | "schema": "dbo" 41 | }, 42 | "source": "mssql" 43 | } 44 | }, 45 | { 46 | "type": "mssql_track_table", 47 | "args": { 48 | "table": { 49 | "name": "Genre", 50 | "schema": "dbo" 51 | }, 52 | "source": "mssql" 53 | } 54 | }, 55 | { 56 | "type": "mssql_track_table", 57 | "args": { 58 | "table": { 59 | "name": "Invoice", 60 | "schema": "dbo" 61 | }, 62 | "source": "mssql" 63 | } 64 | }, 65 | { 66 | "type": "mssql_track_table", 67 | "args": { 68 | "table": { 69 | "name": "InvoiceLine", 70 | "schema": "dbo" 71 | }, 72 | "source": "mssql" 73 | } 74 | }, 75 | { 76 | "type": "mssql_track_table", 77 | "args": { 78 | "table": { 79 | "name": "MSreplication_options", 80 | "schema": "dbo" 81 | }, 82 | "source": "mssql" 83 | } 84 | }, 85 | { 86 | "type": "mssql_track_table", 87 | "args": { 88 | "table": { 89 | "name": "MediaType", 90 | "schema": "dbo" 91 | }, 92 | "source": "mssql" 93 | } 94 | }, 95 | { 96 | "type": "mssql_track_table", 97 | "args": { 98 | "table": { 99 | "name": "Playlist", 100 | "schema": "dbo" 101 | }, 102 | "source": "mssql" 103 | } 104 | }, 105 | { 106 | "type": "mssql_track_table", 107 | "args": { 108 | "table": { 109 | "name": "PlaylistTrack", 110 | "schema": "dbo" 111 | }, 112 | "source": "mssql" 113 | } 114 | }, 115 | { 116 | "type": "mssql_track_table", 117 | "args": { 118 | "table": { 119 | "name": "Track", 120 | "schema": "dbo" 121 | }, 122 | "source": "mssql" 123 | } 124 | }, 125 | { 126 | "type": "mssql_track_table", 127 | "args": { 128 | "table": { 129 | "name": "spt_fallback_db", 130 | "schema": "dbo" 131 | }, 132 | "source": "mssql" 133 | } 134 | }, 135 | { 136 | "type": "mssql_track_table", 137 | "args": { 138 | "table": { 139 | "name": "spt_fallback_dev", 140 | "schema": "dbo" 141 | }, 142 | "source": "mssql" 143 | } 144 | }, 145 | { 146 | "type": "mssql_track_table", 147 | "args": { 148 | "table": { 149 | "name": "spt_fallback_usg", 150 | "schema": "dbo" 151 | }, 152 | "source": "mssql" 153 | } 154 | }, 155 | { 156 | "type": "mssql_track_table", 157 | "args": { 158 | "table": { 159 | "name": "spt_monitor", 160 | "schema": "dbo" 161 | }, 162 | "source": "mssql" 163 | } 164 | }, 165 | { 166 | "type": "mssql_track_table", 167 | "args": { 168 | "table": { 169 | "name": "spt_values", 170 | "schema": "dbo" 171 | }, 172 | "source": "mssql" 173 | } 174 | } 175 | ] 176 | } 177 | -------------------------------------------------------------------------------- /containers/psql-seed-chinook.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PGUSER=postgres 4 | PGPASS=postgrespassword 5 | PGADDRESS=localhost:5430 6 | # SCRIPT_DIR points to the absolute path of this file 7 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 8 | SEEDFILE=$SCRIPT_DIR/chinook_pg_serial_pk_proper_naming.sql 9 | METADATA_URL=http://localhost:8085/v1/metadata 10 | PG_URL=postgres://$PGUSER:$PGPASS@$PGADDRESS/postgres 11 | 12 | ./psql-wait.sh 13 | 14 | echo "" 15 | echo "Seeding DB" 16 | psql $PG_URL <$SEEDFILE 17 | 18 | echo "" 19 | echo "Tracking tables" 20 | curl "$METADATA_URL" --data-binary "@$SCRIPT_DIR/psql_track_chinook_tables.json" 21 | 22 | echo "" 23 | echo "Tracking foreign-key relationships" 24 | curl "$METADATA_URL" --data-binary "@$SCRIPT_DIR/psql_track_chinook_relationships.json" 25 | -------------------------------------------------------------------------------- /containers/psql-setup-events-table.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | PGUSER=postgres 3 | PGPASS=postgrespassword 4 | PGADDRESS=localhost:5430 5 | # SCRIPT_DIR points to the absolute path of this file 6 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 7 | EVENTS_TABLE=$SCRIPT_DIR/setup_events_table.sql 8 | 9 | ./psql-wait.sh 10 | psql postgres://$PGUSER:$PGPASS@$PGADDRESS/postgres <$EVENTS_TABLE 11 | -------------------------------------------------------------------------------- /containers/psql-update-rows.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PGUSER=postgres 4 | PGPASS=postgrespassword 5 | PGADDRESS=localhost:5430 6 | 7 | update_rows() { 8 | psql postgres://$PGUSER:$PGPASS@$PGADDRESS/postgres \ 9 | -c "UPDATE albums SET Title = CONCAT(gen_random_uuid(), now()) WHERE 1=1;" 10 | } 11 | -------------------------------------------------------------------------------- /containers/psql-wait.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | PGUSER=postgres 3 | PGPASS=postgrespassword 4 | PGADDRESS=localhost:5430 5 | 6 | echo -n "Waiting for postgres to come up" 7 | until ( psql postgres://$PGUSER:$PGPASS@$PGADDRESS/postgres -c '\l' ) &>/dev/null; do 8 | echo -n '.' && sleep 0.2 9 | done 10 | echo " Ok" 11 | -------------------------------------------------------------------------------- /containers/psql_track_chinook_relationships.json: -------------------------------------------------------------------------------- 1 | {"type":"bulk","source":"default","resource_version":2,"args":[{"type":"pg_create_object_relationship","args":{"name":"artist","table":{"name":"albums","schema":"public"},"using":{"foreign_key_constraint_on":"artist_id"},"source":"default"}},{"type":"pg_create_array_relationship","args":{"name":"tracks","table":{"name":"albums","schema":"public"},"using":{"foreign_key_constraint_on":{"table":{"name":"tracks","schema":"public"},"column":"album_id"}},"source":"default"}},{"type":"pg_create_array_relationship","args":{"name":"albums","table":{"name":"artists","schema":"public"},"using":{"foreign_key_constraint_on":{"table":{"name":"albums","schema":"public"},"column":"artist_id"}},"source":"default"}},{"type":"pg_create_object_relationship","args":{"name":"employee","table":{"name":"customers","schema":"public"},"using":{"foreign_key_constraint_on":"support_rep_id"},"source":"default"}},{"type":"pg_create_array_relationship","args":{"name":"invoices","table":{"name":"customers","schema":"public"},"using":{"foreign_key_constraint_on":{"table":{"name":"invoices","schema":"public"},"column":"customer_id"}},"source":"default"}},{"type":"pg_create_object_relationship","args":{"name":"employee","table":{"name":"employees","schema":"public"},"using":{"foreign_key_constraint_on":"reports_to"},"source":"default"}},{"type":"pg_create_array_relationship","args":{"name":"customers","table":{"name":"employees","schema":"public"},"using":{"foreign_key_constraint_on":{"table":{"name":"customers","schema":"public"},"column":"support_rep_id"}},"source":"default"}},{"type":"pg_create_array_relationship","args":{"name":"employees","table":{"name":"employees","schema":"public"},"using":{"foreign_key_constraint_on":{"table":{"name":"employees","schema":"public"},"column":"reports_to"}},"source":"default"}},{"type":"pg_create_array_relationship","args":{"name":"tracks","table":{"name":"genres","schema":"public"},"using":{"foreign_key_constraint_on":{"table":{"name":"tracks","schema":"public"},"column":"genre_id"}},"source":"default"}},{"type":"pg_create_object_relationship","args":{"name":"customer","table":{"name":"invoices","schema":"public"},"using":{"foreign_key_constraint_on":"customer_id"},"source":"default"}},{"type":"pg_create_array_relationship","args":{"name":"invoice_lines","table":{"name":"invoices","schema":"public"},"using":{"foreign_key_constraint_on":{"table":{"name":"invoice_lines","schema":"public"},"column":"invoice_id"}},"source":"default"}},{"type":"pg_create_object_relationship","args":{"name":"invoice","table":{"name":"invoice_lines","schema":"public"},"using":{"foreign_key_constraint_on":"invoice_id"},"source":"default"}},{"type":"pg_create_object_relationship","args":{"name":"track","table":{"name":"invoice_lines","schema":"public"},"using":{"foreign_key_constraint_on":"track_id"},"source":"default"}},{"type":"pg_create_array_relationship","args":{"name":"tracks","table":{"name":"media_types","schema":"public"},"using":{"foreign_key_constraint_on":{"table":{"name":"tracks","schema":"public"},"column":"media_type_id"}},"source":"default"}},{"type":"pg_create_array_relationship","args":{"name":"playlist_tracks","table":{"name":"playlists","schema":"public"},"using":{"foreign_key_constraint_on":{"table":{"name":"playlist_track","schema":"public"},"column":"playlist_id"}},"source":"default"}},{"type":"pg_create_object_relationship","args":{"name":"playlist","table":{"name":"playlist_track","schema":"public"},"using":{"foreign_key_constraint_on":"playlist_id"},"source":"default"}},{"type":"pg_create_object_relationship","args":{"name":"track","table":{"name":"playlist_track","schema":"public"},"using":{"foreign_key_constraint_on":"track_id"},"source":"default"}},{"type":"pg_create_object_relationship","args":{"name":"album","table":{"name":"tracks","schema":"public"},"using":{"foreign_key_constraint_on":"album_id"},"source":"default"}},{"type":"pg_create_object_relationship","args":{"name":"genre","table":{"name":"tracks","schema":"public"},"using":{"foreign_key_constraint_on":"genre_id"},"source":"default"}},{"type":"pg_create_object_relationship","args":{"name":"media_type","table":{"name":"tracks","schema":"public"},"using":{"foreign_key_constraint_on":"media_type_id"},"source":"default"}},{"type":"pg_create_array_relationship","args":{"name":"invoice_lines","table":{"name":"tracks","schema":"public"},"using":{"foreign_key_constraint_on":{"table":{"name":"invoice_lines","schema":"public"},"column":"track_id"}},"source":"default"}},{"type":"pg_create_array_relationship","args":{"name":"playlist_tracks","table":{"name":"tracks","schema":"public"},"using":{"foreign_key_constraint_on":{"table":{"name":"playlist_track","schema":"public"},"column":"track_id"}},"source":"default"}}]} 2 | -------------------------------------------------------------------------------- /containers/psql_track_chinook_tables.json: -------------------------------------------------------------------------------- 1 | {"type":"bulk","source":"default","resource_version":1,"args":[{"type":"pg_track_table","args":{"table":{"name":"actors","schema":"public"},"source":"default"}},{"type":"pg_track_table","args":{"table":{"name":"albums","schema":"public"},"source":"default"}},{"type":"pg_track_table","args":{"table":{"name":"artists","schema":"public"},"source":"default"}},{"type":"pg_track_table","args":{"table":{"name":"categories","schema":"public"},"source":"default"}},{"type":"pg_track_table","args":{"table":{"name":"customers","schema":"public"},"source":"default"}},{"type":"pg_track_table","args":{"table":{"name":"employees","schema":"public"},"source":"default"}},{"type":"pg_track_table","args":{"table":{"name":"events","schema":"public"},"source":"default"}},{"type":"pg_track_table","args":{"table":{"name":"film_actor","schema":"public"},"source":"default"}},{"type":"pg_track_table","args":{"table":{"name":"film_category","schema":"public"},"source":"default"}},{"type":"pg_track_table","args":{"table":{"name":"films","schema":"public"},"source":"default"}},{"type":"pg_track_table","args":{"table":{"name":"genres","schema":"public"},"source":"default"}},{"type":"pg_track_table","args":{"table":{"name":"invoice_lines","schema":"public"},"source":"default"}},{"type":"pg_track_table","args":{"table":{"name":"invoices","schema":"public"},"source":"default"}},{"type":"pg_track_table","args":{"table":{"name":"media_types","schema":"public"},"source":"default"}},{"type":"pg_track_table","args":{"table":{"name":"playlist_track","schema":"public"},"source":"default"}},{"type":"pg_track_table","args":{"table":{"name":"playlists","schema":"public"},"source":"default"}},{"type":"pg_track_table","args":{"table":{"name":"tracks","schema":"public"},"source":"default"}}]} 2 | -------------------------------------------------------------------------------- /containers/setup_events_table.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE public.events ( 2 | -- unique label to identify benchmark 3 | label text NOT NULL, 4 | -- connection_id represents the n'th connection 5 | connection_id int NOT NULL, 6 | operation_id int NOT NULL, 7 | -- event_number represents the nth event that was received by the client 8 | event_number int NOT NULL, 9 | -- event_data stores the payload that was received 10 | event_data jsonb NOT NULL, 11 | -- event_time stores the time at which the event was received by the client 12 | event_time timestamptz NOT NULL, 13 | -- is_error represents whether the event was error or not 14 | is_error boolean NOT NULL, 15 | -- latency is not populated by the benchmark tool, but this can be populated by calculating event_time - 16 | latency int 17 | ) 18 | -------------------------------------------------------------------------------- /containers/update-rows.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # SCRIPT_DIR points to the absolute path of this file 4 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 5 | 6 | # backend can be psql or mssql 7 | backend="${1:-psql}" 8 | update_rows_file=$SCRIPT_DIR/${backend}-update-rows.sh 9 | 10 | sleep_interval_ms=1000 11 | test_duration_seconds=30 12 | float_sleep_interval=$(awk -v interval=$sleep_interval_ms 'BEGIN { print interval/1000 }') 13 | total_iters=$(( sleep_interval_ms * test_duration_seconds / 1000 )) 14 | echo "Starting at: $(date)" 15 | 16 | # run the backend-specific `update_rows` query 17 | source "$update_rows_file" 18 | 19 | for i in $(seq $total_iters) 20 | do 21 | update_rows 22 | echo "Triggered $i/$total_iters times" 23 | sleep "$float_sleep_interval" 24 | done 25 | echo "Finished at: $(date)" 26 | -------------------------------------------------------------------------------- /docker-run-test/config.mssql.subscription.yaml: -------------------------------------------------------------------------------- 1 | url: http://localhost:8085/v1/graphql 2 | db_connection_string: postgres://postgres:postgrespassword@localhost:5430/postgres 3 | headers: {} 4 | config: 5 | label: 'SearchAlbumsWithArtistMSSQL' 6 | max_connections: 20 7 | connections_per_second: 1 8 | insert_payload_data: false 9 | query: | 10 | subscription AlbumByIDSubscription($artistIds: [Int!]!) { 11 | Album(where: {ArtistId: { _in: $artistIds}}) { 12 | AlbumId 13 | ArtistId 14 | Title 15 | } 16 | } 17 | variables: 18 | artistIds: [1, 2, 3, 4] 19 | -------------------------------------------------------------------------------- /docker-run-test/config.query.yaml: -------------------------------------------------------------------------------- 1 | url: http://localhost:8085/v1/graphql 2 | headers: 3 | X-Hasura-Admin-Secret: my-secret 4 | queries: 5 | - name: SearchAlbumsWithArtist 6 | tools: [k6, wrk2, autocannon] 7 | execution_strategy: REQUESTS_PER_SECOND 8 | rps: 500 9 | duration: 5s 10 | query: | 11 | query SearchAlbumsWithArtist { 12 | albums(where: {title: {_like: "%Rock%"}}) { 13 | id 14 | title 15 | artist { 16 | name 17 | id 18 | } 19 | } 20 | } 21 | # - name: AlbumByPK 22 | # tools: [autocannon, k6] 23 | # execution_strategy: FIXED_REQUEST_NUMBER 24 | # requests: 10000 25 | # query: | 26 | # query AlbumByPK { 27 | # albums_by_pk(id: 1) { 28 | # id 29 | # title 30 | # } 31 | # } 32 | # - name: AlbumByPKMultiStage 33 | # tools: [k6] 34 | # execution_strategy: MULTI_STAGE 35 | # initial_rps: 0 36 | # stages: 37 | # - duration: 5s 38 | # target: 100 39 | # - duration: 5s 40 | # target: 1000 41 | # query: | 42 | # query AlbumByPK { 43 | # albums_by_pk(id: 1) { 44 | # id 45 | # title 46 | # } 47 | # } 48 | -------------------------------------------------------------------------------- /docker-run-test/config.subscription.yaml: -------------------------------------------------------------------------------- 1 | url: http://localhost:8085/v1/graphql 2 | db_connection_string: postgres://postgres:postgrespassword@localhost:5430/postgres 3 | headers: 4 | X-Hasura-Admin-Secret: my-secret 5 | config: 6 | label: SearchAlbumsWithArtistUpdated 7 | max_connections: 20 8 | connections_per_second: 10 9 | insert_payload_data: true 10 | query: | 11 | subscription AlbumByIDSubscription($artistIds: [Int!]!) { 12 | albums(where: {artist_id: { _in: $artistIds}}) { 13 | id 14 | artist_id 15 | title 16 | } 17 | } 18 | variables: 19 | # some_value: a_string 20 | # some_range: { start: 1, end: 10 } 21 | # another_range: { start: 50, end: 100 } 22 | # some_number: 10 23 | artistIds: [1, 2, 3, 4] 24 | # some_object: 25 | # a_key: a_value 26 | -------------------------------------------------------------------------------- /docker-run-test/override-entrypoint-run-shell.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This is a quick sample script as a demo for how to override Docker containers with an ENTRYPOINT command to get into an interactive shell 4 | docker run -it --entrypoint "/bin/sh" graphql-bench-test:latest 5 | -------------------------------------------------------------------------------- /docker-run-test/run-query-bench-docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Run the CLI in container, with "localhost" network shared and current directory mounted at "/app/tmp" 4 | # The CLI tries to look up the config file from process.cwd()/, and the WORKDIR is set to /app so CWD is always "/app" 5 | # So you need to use a relative path from /app to where you mounted your config files. 6 | # IE if your config YAML on host system are in (current directory, AKA "$PWD"), then you should mount $PWD to "/app/" 7 | # And then use "./something" as base directory for config + report output paths. 8 | 9 | # SCRIPT_DIR points to the absolute path of this file 10 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 11 | 12 | docker run --net=host -v "$SCRIPT_DIR":/app/tmp -it \ 13 | graphql-bench-local:latest query \ 14 | --config="./tmp/config.query.yaml" \ 15 | --outfile="./tmp/report.json" 16 | -------------------------------------------------------------------------------- /docker-run-test/run-subscription-bench-docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Run the CLI in container, with "localhost" network shared and current directory mounted at "/app/tmp" 4 | # The CLI tries to look up the config file from process.cwd()/, and the WORKDIR is set to /app so CWD is always "/app" 5 | # So you need to use a relative path from /app to where you mounted your config files. 6 | # IE if your config YAML on host system are in (current directory, AKA "$PWD"), then you should mount $PWD to "/app/" 7 | # And then use "./something" as base directory for config + report output paths. 8 | 9 | # SCRIPT_DIR points to the absolute path of this file 10 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 11 | subscription_config=${1:-config.subscription.yaml} 12 | 13 | docker run --name containers_graphql_bench_subs_1 --net=host -v "$SCRIPT_DIR":/app/tmp -it \ 14 | graphql-bench-local:latest subscription \ 15 | --config="./tmp/$subscription_config" 16 | -------------------------------------------------------------------------------- /makefile: -------------------------------------------------------------------------------- 1 | .PHONY: run_docker_query_bench run_docker_subscription_bench seed_chinook_database help 2 | 3 | 00000000: ## -------------------------------------------------- 4 | 00README: ## RECOMMENDED PROCESS: setup_containers -> seed_chinook_database -> build_local_docker_image -> run_docker_query_bench 5 | 00TIPS: ## You can alter the query & subscription benchmark config in ./docker-run-test/config.(query|subscription).yaml 6 | 01TIPS: ## By default it works with the Hasura & Chinook setup provided here. 7 | 88888888: ## -------------------------------------------------- 8 | 9 | build_local_docker_image: ## Builds ands tags a local docker image of graphql-bench 10 | docker build -t graphql-bench-local:latest ./app 11 | 12 | run_docker_query_bench: ## Runs local docker container query benchmark, using config.query.yaml in ./docker-run-test 13 | ./docker-run-test/run-query-bench-docker.sh 14 | 15 | run_docker_subscription_bench: ## Runs local docker container subscription benchmark, using the default config.subscription.yaml in ./docker-run-test 16 | ./docker-run-test/run-subscription-bench-docker.sh "config.subscription.yaml" 17 | 18 | run_docker_subscription_bench_mssql: ## Runs local docker container subscription benchmark, using config.mssql.subscription.yaml in ./docker-run-test 19 | ./docker-run-test/run-subscription-bench-docker.sh "config.mssql.subscription.yaml" 20 | 21 | setup_containers: ## Sets up Hasura, Postgres and SQL Server Docker containers 22 | cd containers && docker-compose up -d --force-recreate && ./graphql-wait.sh 23 | 24 | seed_chinook_database: ## Creates Chinook database schema & seed data in Hasura for testing 25 | ./containers/psql-seed-chinook.sh 26 | 27 | seed_chinook_database_mssql: ## Creates Chinook database schema & seed data in Hasura for testing 28 | ./containers/mssql-seed-chinook.sh 29 | 30 | setup_events_table: ## Sets up events table for subscriptions 31 | ./containers/psql-setup-events-table.sh 32 | 33 | run_update_rows: ## Updates rows to trigger data events 34 | ./containers/update-rows.sh psql 35 | 36 | run_update_rows_mssql: ## Updates rows to trigger data events 37 | ./containers/update-rows.sh mssql 38 | 39 | start_container_report: ## Starts cadvisor for Docker container stats reporting 40 | ./reports/start-cadvisor 41 | 42 | cleanup: 43 | cd containers && docker-compose stop && docker-compose rm && docker rm -f containers_graphql_bench_subs_1 44 | 45 | install_wrk2: ## Handles installing or cloning and compiling wrk2 from source on either Mac or Debian-based Linux (for local non-Docker development) 46 | OS := $(shell uname) 47 | ifeq ($(OS),Darwin) 48 | brew tap jabley/homebrew-wrk2 49 | brew install --HEAD wrk2 50 | else 51 | # Installs the build tools, open ssl dev libs (including headers), and git. Then uses git to download wrk and build. 52 | sudo apt-get update 53 | sudo apt-get install -y build-essential libssl-dev git zlib1g-dev 54 | git clone https://github.com/giltene/wrk2.git 55 | cd wrk2 56 | make 57 | # Move the executable to somewhere in your PATH 58 | sudo cp wrk /usr/local/bin 59 | # Finally, delete the repo folder 60 | cd ../ 61 | sudo rm -rf wrk 62 | endif 63 | 64 | install_k6: ## Handles installing k6 either Mac or Debian-based Linux (for local non-Docker development) 65 | OS := $(shell uname) 66 | ifeq ($(OS),Darwin) 67 | brew install k6 68 | else 69 | sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 379CE192D401AB61 70 | echo "deb https://dl.bintray.com/loadimpact/deb stable main" | sudo tee -a /etc/apt/sources.list 71 | sudo apt-get update 72 | sudo apt-get install k6 73 | endif 74 | 75 | setup_all: ## Sets up containers and then creates Chinook database 76 | setup_containers setup_events_table 77 | 78 | # Subscriptions / Postgres 79 | setup_psql: setup_containers setup_events_table seed_chinook_database build_local_docker_image 80 | run_benchmark_psql: run_docker_subscription_bench run_update_rows 81 | benchmark_psql: setup_psql run_benchmark_psql cleanup 82 | 83 | # Subscriptions / SQL Server 84 | setup_mssql: setup_containers setup_events_table seed_chinook_database_mssql build_local_docker_image 85 | run_benchmark_mssql: run_docker_subscription_bench_mssql run_update_rows_mssql 86 | benchmark_mssql: setup_mssql run_benchmark_mssql cleanup 87 | 88 | help: 89 | @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' 90 | 91 | .DEFAULT_GOAL := help 92 | -------------------------------------------------------------------------------- /readme_images/autocannon-output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hasura/graphql-bench/2163a70a5324766521b8d6ceb7a73b8f0d12688d/readme_images/autocannon-output.png -------------------------------------------------------------------------------- /readme_images/autocannon-report.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hasura/graphql-bench/2163a70a5324766521b8d6ceb7a73b8f0d12688d/readme_images/autocannon-report.png -------------------------------------------------------------------------------- /readme_images/k6-output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hasura/graphql-bench/2163a70a5324766521b8d6ceb7a73b8f0d12688d/readme_images/k6-output.png -------------------------------------------------------------------------------- /readme_images/k6s-report.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hasura/graphql-bench/2163a70a5324766521b8d6ceb7a73b8f0d12688d/readme_images/k6s-report.png -------------------------------------------------------------------------------- /readme_images/npx-serve-output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hasura/graphql-bench/2163a70a5324766521b8d6ceb7a73b8f0d12688d/readme_images/npx-serve-output.png -------------------------------------------------------------------------------- /readme_images/serve-index.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hasura/graphql-bench/2163a70a5324766521b8d6ceb7a73b8f0d12688d/readme_images/serve-index.png -------------------------------------------------------------------------------- /reports/start-cadvisor.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | VERSION=v0.36.0 # use the latest release version from https://github.com/google/cadvisor/releases 3 | 4 | echo "Starting cAdvisor..." 5 | echo "Visit localhost:8090 to view Docker container statistics" 6 | 7 | docker run \ 8 | --volume=/:/rootfs:ro \ 9 | --volume=/var/run:/var/run:ro \ 10 | --volume=/sys:/sys:ro \ 11 | --volume=/var/lib/docker/:/var/lib/docker:ro \ 12 | --volume=/dev/disk/:/dev/disk:ro \ 13 | --publish=8090:8080 \ 14 | --detach=true \ 15 | --name=cadvisor \ 16 | --privileged \ 17 | --device=/dev/kmsg \ 18 | gcr.io/cadvisor/cadvisor:$VERSION 19 | --------------------------------------------------------------------------------