├── .clabot ├── .deepsource.toml ├── .dockerignore ├── .github ├── dependabot.yaml └── workflows │ ├── ci-docker.yaml │ ├── ci-test.yaml │ ├── codesee-arch-diagram.yml │ ├── probe.yaml │ └── semgrep.yml ├── .gitignore ├── .gitmodules ├── .request ├── COMM-LICENSE ├── Dockerfile ├── Dockerfile.dev ├── ELASTICSEARCH.md ├── LICENSE.txt ├── Makefile ├── Procfile ├── README.md ├── ce.go ├── circle.yml ├── cmd └── gor │ └── gor.go ├── docs ├── CNAME ├── Capturing-and-replaying-traffic.md ├── Compilation.md ├── Development-Setup.md ├── Distributed-configuration.md ├── Exporting-to-ElasticSearch.md ├── FAQ.md ├── Middleware.md ├── Rate-limiting.md ├── Replaying-HTTP-traffic.md ├── Request-filtering.md ├── Request-rewriting.md ├── Running-as-non-root-user.md ├── Saving-and-Replaying-from-file.md ├── Troubleshooting.md ├── _Footer.md ├── _config.yml ├── commercial │ ├── collaboration.md │ ├── faq.md │ └── support.md ├── css │ ├── breadcrumbs.css │ ├── code.css │ ├── fabric.css │ ├── goreplay.css │ └── sidenav.css ├── getting-started │ ├── basics.md │ └── tutorial.md ├── index.md ├── js │ ├── base.js │ └── turbolinks.js └── pro │ ├── recording-and-replaying-keep-alive-tcp-sessions.md │ └── replaying-binary-protocols.md ├── elasticsearch.go ├── elasticsearch_test.go ├── emitter.go ├── emitter_test.go ├── examples └── middleware │ ├── echo.clj │ ├── echo.java │ ├── echo.js │ ├── echo.py │ ├── echo.rb │ ├── echo.sh │ └── token_modifier.go ├── go.mod ├── go.sum ├── gor_stat.go ├── homebrew └── gor.rb ├── http_modifier.go ├── http_modifier_settings.go ├── http_modifier_settings_test.go ├── http_modifier_test.go ├── http_prettifier.go ├── http_prettifier_test.go ├── input_dummy.go ├── input_file.go ├── input_file_test.go ├── input_http.go ├── input_http_test.go ├── input_kafka.go ├── input_kafka_test.go ├── input_raw.go ├── input_raw_test.go ├── input_tcp.go ├── input_tcp_test.go ├── internal ├── byteutils │ ├── byteutils.go │ └── byteutils_test.go ├── capture │ ├── af_packet.go │ ├── af_packet_linux.go │ ├── capture.go │ ├── capture_test.go │ ├── doc.go │ ├── dump.go │ ├── sock_linux.go │ ├── sock_others.go │ ├── socket.go │ └── vxlan.go ├── ring │ └── ring.go ├── simpletime │ └── time.go ├── size │ ├── size.go │ └── size_test.go └── tcp │ ├── doc.go │ ├── tcp_message.go │ ├── tcp_packet.go │ └── tcp_test.go ├── k8s ├── README.md ├── clusterrole.yaml ├── collect_goreplay_telemetry.sh ├── goreplay.yaml ├── nginx.yaml └── rolebinding.yaml ├── kafka.go ├── limiter.go ├── limiter_test.go ├── middleware.go ├── middleware ├── README.md ├── middleware.js └── package.json ├── middleware_test.go ├── mkdocs.yml ├── nfpm.yaml ├── output_binary.go ├── output_binary_pro.go ├── output_dummy.go ├── output_file.go ├── output_file_test.go ├── output_http.go ├── output_http_test.go ├── output_kafka.go ├── output_kafka_test.go ├── output_null.go ├── output_s3.go ├── output_s3_pro.go ├── output_tcp.go ├── output_tcp_test.go ├── output_ws.go ├── output_ws_test.go ├── plugins.go ├── plugins_test.go ├── pro.go ├── proto ├── fuzz.go ├── proto.go └── proto_test.go ├── protocol.go ├── s3 └── index.html ├── s3_reader.go ├── s3_test.go ├── settings.go ├── settings_test.go ├── sidenav.css ├── site ├── .gitignore ├── Gemfile ├── Gemfile.lock ├── _config.yml ├── _posts │ └── 2017-01-06-welcome-to-jekyll.markdown ├── about.md └── index.md ├── snapcraft.yaml ├── tcp_client.go ├── test_input.go ├── test_output.go └── version.go /.clabot: -------------------------------------------------------------------------------- 1 | { 2 | "contributors": ["buger"] 3 | } 4 | -------------------------------------------------------------------------------- /.deepsource.toml: -------------------------------------------------------------------------------- 1 | version = 1 2 | 3 | exclude_patterns = [ 4 | "vendor/**" 5 | ] 6 | 7 | [[analyzers]] 8 | name = "go" 9 | enabled = true 10 | 11 | [analyzers.meta] 12 | import_paths = ["github.com/ankitdobhal/goreplay"] 13 | 14 | [[analyzers]] 15 | name = "docker" 16 | enabled = true 17 | 18 | [[analyzers]] 19 | name = "ruby" 20 | enabled = true 21 | 22 | [[analyzers]] 23 | name = "javascript" 24 | enabled = true 25 | 26 | [analyzers.meta] 27 | environment = ["nodejs"] 28 | 29 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | *.tar.gz 2 | gor 3 | gor.test 4 | -------------------------------------------------------------------------------- /.github/dependabot.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # Maintain dependencies for GitHub Actions 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "weekly" 8 | pull-request-branch-name: 9 | # Separate sections of the branch name with a hyphen 10 | separator: "-" 11 | 12 | - package-ecosystem: "gomod" 13 | directory: "/" 14 | schedule: 15 | interval: "weekly" 16 | 17 | - package-ecosystem: "docker" 18 | directory: "/" 19 | schedule: 20 | interval: "weekly" 21 | 22 | -------------------------------------------------------------------------------- /.github/workflows/ci-docker.yaml: -------------------------------------------------------------------------------- 1 | name: ci 2 | on: 3 | release: 4 | types: [published] 5 | jobs: 6 | docker-build-and-push: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - 10 | name: "Checkout repository" 11 | uses: actions/checkout@v3 12 | - 13 | name: "Set up Docker Buildx" 14 | uses: docker/setup-buildx-action@v2 15 | - 16 | name: "Cache Docker layers" 17 | uses: actions/cache@v3.0.5 18 | with: 19 | path: /tmp/.buildx-cache 20 | key: ${{ runner.os }}-buildx-${{ github.sha }} 21 | restore-keys: | 22 | ${{ runner.os }}-buildx- 23 | - 24 | name: "Login to Container Registry" 25 | uses: docker/login-action@v1 26 | with: 27 | username: ${{ secrets.DOCKERHUB_USERNAME }} 28 | password: ${{ secrets.DOCKERHUB_TOKEN }} 29 | - 30 | name: "Build and push docker image" 31 | uses: docker/build-push-action@v3 32 | with: 33 | context: . 34 | push: true 35 | tags: ${{ github.repository }}:${{ github.event.release.tag_name }} 36 | build-args: RELEASE_VERSION=${{ github.event.release.tag_name }} 37 | cache-from: type=local,src=/tmp/.buildx-cache 38 | cache-to: type=local,mode=max,dest=/tmp/.buildx-cache 39 | -------------------------------------------------------------------------------- /.github/workflows/ci-test.yaml: -------------------------------------------------------------------------------- 1 | name: test 2 | on: [push, pull_request] 3 | jobs: 4 | test: 5 | strategy: 6 | matrix: 7 | go-version: [1.18.x, 1.19.x] # two latest minor versions 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: update package index 11 | run: sudo apt-get update 12 | - name: install libpcap 13 | run: sudo apt-get install libpcap-dev -y 14 | - name: install Go 15 | uses: actions/setup-go@v2 16 | with: 17 | go-version: ${{ matrix.go-version }} 18 | - name: checkout code 19 | uses: actions/checkout@v3 20 | - uses: actions/cache@v3.0.5 21 | with: 22 | path: | 23 | ~/go/pkg/mod # Module download cache 24 | ~/.cache/go-build # Build cache (Linux) 25 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 26 | restore-keys: | 27 | ${{ runner.os }}-go- 28 | - name: test 29 | run: sudo go test ./... -v -timeout 120s 30 | -------------------------------------------------------------------------------- /.github/workflows/codesee-arch-diagram.yml: -------------------------------------------------------------------------------- 1 | # This workflow was added by CodeSee. Learn more at https://codesee.io/ 2 | # This is v2.0 of this workflow file 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request_target: 8 | types: [opened, synchronize, reopened] 9 | 10 | name: CodeSee 11 | 12 | permissions: read-all 13 | 14 | jobs: 15 | codesee: 16 | runs-on: ubuntu-latest 17 | continue-on-error: true 18 | name: Analyze the repo with CodeSee 19 | steps: 20 | - uses: Codesee-io/codesee-action@v2 21 | with: 22 | codesee-token: ${{ secrets.CODESEE_ARCH_DIAG_API_TOKEN }} 23 | codesee-url: https://app.codesee.io 24 | -------------------------------------------------------------------------------- /.github/workflows/probe.yaml: -------------------------------------------------------------------------------- 1 | name: AI Comment Handler 2 | 3 | 4 | on: 5 | pull_request: 6 | types: [opened] #[opened , labeled] 7 | issue_comment: 8 | types: [created] 9 | issues: 10 | types: [opened] #[opened, labeled] 11 | 12 | # Define permissions needed for the workflow 13 | permissions: 14 | issues: write 15 | pull-requests: write 16 | contents: read 17 | 18 | jobs: 19 | trigger_probe_chat: 20 | # Uncomment if you want to run on on specific lables, in this example `probe` 21 | # if: | 22 | # (github.event_name == 'pull_request' && github.event.action == 'opened') || 23 | # (github.event_name == 'issues' && github.event.action == 'opened') || 24 | # (github.event_name == 'issue_comment' && github.event.action == 'created') || 25 | # ((github.event_name == 'pull_request' || github.event_name == 'issues') && 26 | # github.event.action == 'labeled' && github.event.label.name == 'probe') 27 | # Use the reusable workflow from your repository (replace and ) 28 | uses: buger/probe/.github/workflows/probe.yml@main 29 | # Pass required inputs 30 | with: 31 | command_prefix: "/probe" # Or '/ai', '/ask', etc. 32 | # Optionally override the default npx command if the secret isn't set 33 | # default_probe_chat_command: 'node path/to/custom/script.js' 34 | # Pass ALL secrets from this repository to the reusable workflow 35 | # This includes GITHUB_TOKEN, PROBE_CHAT_COMMAND (if set), ANTHROPIC_API_KEY, etc. 36 | secrets: 37 | ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} 38 | ANTHROPIC_API_URL: ${{ secrets.ANTHROPIC_API_URL }} 39 | -------------------------------------------------------------------------------- /.github/workflows/semgrep.yml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: {} 3 | push: 4 | branches: 5 | - main 6 | - master 7 | paths: 8 | - .github/workflows/semgrep.yml 9 | schedule: 10 | # random HH:MM to avoid a load spike on GitHub Actions at 00:00 11 | - cron: 53 23 * * * 12 | name: Semgrep 13 | jobs: 14 | semgrep: 15 | name: Scan 16 | runs-on: ubuntu-20.04 17 | env: 18 | SEMGREP_APP_TOKEN: ${{ secrets.SEMGREP_APP_TOKEN }} 19 | container: 20 | image: returntocorp/semgrep 21 | steps: 22 | - uses: actions/checkout@v3 23 | - run: semgrep ci 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | vendor 2 | *.swp 3 | *.gor 4 | *.rpm 5 | *.dep 6 | *.deb 7 | *.pkg 8 | *.exe 9 | *.pprof 10 | *.out 11 | hey 12 | 13 | *.bin 14 | lib/ 15 | output/ 16 | *.gz 17 | *.zip 18 | .aider* 19 | 20 | *.class 21 | 22 | *.test 23 | .idea 24 | *.iml 25 | gor 26 | 27 | *.mprof 28 | 29 | *.pcap 30 | 31 | .DS_Store 32 | 33 | goreplay 34 | corpus 35 | crashers 36 | suppressions 37 | dist 38 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/buger/goreplay/251e45abd242886bb64ff2b2dc98789556b56330/.gitmodules -------------------------------------------------------------------------------- /.request: -------------------------------------------------------------------------------- 1 | POST /post HTTP/1.1 2 | Content-Length: 7 3 | Host: www.w3.org 4 | 5 | a=1&b=2 6 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.16 as builder 2 | 3 | ARG RELEASE_VERSION 4 | 5 | RUN apk add --no-cache ca-certificates openssl 6 | RUN wget https://github.com/buger/goreplay/releases/download/${RELEASE_VERSION}/gor_${RELEASE_VERSION}_x64.tar.gz -O gor.tar.gz 7 | RUN tar xzf gor.tar.gz 8 | 9 | FROM scratch 10 | COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ 11 | COPY --from=builder /gor . 12 | ENTRYPOINT ["./gor"] 13 | -------------------------------------------------------------------------------- /Dockerfile.dev: -------------------------------------------------------------------------------- 1 | ARG BASE_IMAGE 2 | FROM ${BASE_IMAGE} 3 | 4 | RUN apk add --no-cache \ 5 | gcc \ 6 | g++ \ 7 | make \ 8 | linux-headers \ 9 | bison \ 10 | flex \ 11 | git \ 12 | wget 13 | 14 | RUN wget http://www.tcpdump.org/release/libpcap-1.10.0.tar.gz && tar xzf libpcap-1.10.0.tar.gz && cd libpcap-1.10.0 && ./configure && make install 15 | 16 | WORKDIR /go/src/github.com/buger/goreplay/ 17 | ADD . /go/src/github.com/buger/goreplay/ 18 | 19 | RUN go get golang.org/x/lint/golint 20 | RUN go get 21 | -------------------------------------------------------------------------------- /ELASTICSEARCH.md: -------------------------------------------------------------------------------- 1 | gor & elasticsearch 2 | =================== 3 | 4 | Prerequisites 5 | ------------- 6 | 7 | - elasticsearch 8 | - kibana (Get it here: http://www.elasticsearch.org/overview/kibana/) 9 | - gor 10 | 11 | 12 | elasticsearch 13 | ------------- 14 | 15 | The default elasticsearch configuration is just fine for most workloads. You won't need clustering, sharding or something like that. 16 | 17 | In this example we're installing it on our gor replay server which gives us the elasticsearch listener on _http://localhost:9200_ 18 | 19 | 20 | kibana 21 | ------ 22 | 23 | Kibana (elasticsearch analytics web-ui) is just as simple. 24 | Download it, extract it and serve it via a simple webserver. 25 | (Could be nginx or apache) 26 | 27 | You could also use a shell, ```cd``` into the kibana directory and start a little quick and dirty python webserver with: 28 | 29 | ``` 30 | python -m SimpleHTTPServer 8000 31 | ``` 32 | 33 | In this example we're also choosing the gor replay server as our kibana host. If you choose a different server you'll have to point kibana to your elasticsearch host. 34 | 35 | 36 | gor 37 | --- 38 | 39 | Start your gor replay server with elasticsearch option: 40 | 41 | ``` 42 | ./gor --input-raw :8000 --output-http http://staging.com --output-http-elasticsearch localhost:9200/gor 43 | ``` 44 | 45 | 46 | (You don't have to create the index upfront. That will be done for you automatically) 47 | 48 | 49 | Now visit your kibana url, load the predefined dashboard from the gist https://gist.github.com/gottwald/b2c875037f24719a9616 and watch the data rush in. 50 | 51 | 52 | Troubleshooting 53 | --------------- 54 | 55 | The replay process may complain about __too many open files__. 56 | That's because your typical linux shell has a small open files soft limit at 1024. 57 | You can easily raise that when you do this before starting your _gor replay_ process: 58 | 59 | ``` 60 | ulimit -n 64000 61 | ``` 62 | 63 | Please be aware, this is not a permanent setting. It's just valid for the following jobs you start from that shell. 64 | 65 | We reached the 1024 limit in our tests with a ubuntu box replaying about 9000 requests per minute. (We had very slow responses there, should be way more with fast responses) 66 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2011-present Leonid Bugaev 2 | 3 | Portions of this software are licensed as follows: 4 | 5 | * All content residing under the "doc/" directory of this repository is licensed under "Creative Commons: CC BY-SA 4.0 license". 6 | * The file "pro.go" and all files ending with the "_pro.go" suffix are released under the commercial license specified in the "COMM-LICENSE" file. 7 | * Content outside of the above mentioned directories or restrictions above is available under the "LGPLv3" license as defined below. 8 | 9 | 10 | GoReplay is an Open Source project licensed under the terms of 11 | the LGPLv3 license. Please see 12 | for license text. 13 | 14 | As a special exception to the GNU Lesser General Public License version 3 15 | ("LGPL3"), the copyright holders of this Library give you permission to 16 | convey to a third party a Combined Work that links statically or dynamically 17 | to this Library without providing any Minimal Corresponding Source or 18 | Minimal Application Code as set out in 4d or providing the installation 19 | information set out in section 4e, provided that you comply with the other 20 | provisions of LGPL3 and provided that you meet, for the Application the 21 | terms and conditions of the license(s) which apply to the Application. 22 | 23 | TLDR: You are free to use Gor subpackages like `byteutils` or `proto` in your commercial projects. 24 | 25 | 26 | GoReplay Pro has a commercial-friendly license allowing private forks 27 | and modifications of GoReplay. Please see https://goreplay.org/pro.html for 28 | more detail. You can find the commercial license terms in COMM-LICENSE. 29 | -------------------------------------------------------------------------------- /Procfile: -------------------------------------------------------------------------------- 1 | web: python -m SimpleHTTPServer 8000 2 | replayed_web: python -m SimpleHTTPServer 8001 3 | listener: sudo -E go run ./bin/gor.go --input-raw :8000 --output-tcp :8002 --verbose 4 | replay: go run ./bin/gor.go --input-tcp :8002 --output-http localhost:8001 --verbose 5 | -------------------------------------------------------------------------------- /ce.go: -------------------------------------------------------------------------------- 1 | //go:build !pro 2 | 3 | package goreplay 4 | 5 | import ( 6 | "fmt" 7 | ) 8 | 9 | // PRO this value indicates if goreplay is running in PRO mode. 10 | var PRO = false 11 | 12 | func SettingsHook(settings *AppSettings) { 13 | if settings.RecognizeTCPSessions { 14 | settings.RecognizeTCPSessions = false 15 | fmt.Println("[ERROR] TCP session recognition is not supported in the open-source version of GoReplay") 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /circle.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | pre: 3 | - sudo apt-get install libpcap-dev -y 4 | 5 | test: 6 | override: 7 | - sudo bash -l -c "export GOPATH='/home/ubuntu/.go_workspace:/usr/local/go_workspace:/home/ubuntu/.go_project' && GORACE='halt_on_error=1' /usr/local/go/bin/go test ./... -v -timeout 120s -race" -------------------------------------------------------------------------------- /cmd/gor/gor.go: -------------------------------------------------------------------------------- 1 | // Gor is simple http traffic replication tool written in Go. Its main goal to replay traffic from production servers to staging and dev environments. 2 | // Now you can test your code on real user sessions in an automated and repeatable fashion. 3 | package main 4 | 5 | import ( 6 | "expvar" 7 | "flag" 8 | "fmt" 9 | "github.com/buger/goreplay" 10 | "log" 11 | "net/http" 12 | "net/http/httputil" 13 | httppptof "net/http/pprof" 14 | "os" 15 | "os/signal" 16 | "runtime" 17 | "runtime/pprof" 18 | "syscall" 19 | "time" 20 | ) 21 | 22 | var ( 23 | cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file") 24 | memprofile = flag.String("memprofile", "", "write memory profile to this file") 25 | ) 26 | 27 | func init() { 28 | var defaultServeMux http.ServeMux 29 | http.DefaultServeMux = &defaultServeMux 30 | 31 | http.HandleFunc("/debug/vars", func(w http.ResponseWriter, r *http.Request) { 32 | w.Header().Set("Content-Type", "application/json; charset=utf-8") 33 | fmt.Fprintf(w, "{\n") 34 | first := true 35 | expvar.Do(func(kv expvar.KeyValue) { 36 | if kv.Key == "memstats" || kv.Key == "cmdline" { 37 | return 38 | } 39 | 40 | if !first { 41 | fmt.Fprintf(w, ",\n") 42 | } 43 | first = false 44 | fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) 45 | }) 46 | fmt.Fprintf(w, "\n}\n") 47 | }) 48 | 49 | http.HandleFunc("/debug/pprof/", httppptof.Index) 50 | http.HandleFunc("/debug/pprof/cmdline", httppptof.Cmdline) 51 | http.HandleFunc("/debug/pprof/profile", httppptof.Profile) 52 | http.HandleFunc("/debug/pprof/symbol", httppptof.Symbol) 53 | http.HandleFunc("/debug/pprof/trace", httppptof.Trace) 54 | } 55 | 56 | func loggingMiddleware(addr string, next http.Handler) http.Handler { 57 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 58 | if r.URL.Path == "/loop" { 59 | _, err := http.Get("http://" + addr) 60 | log.Println(err) 61 | } 62 | 63 | rb, _ := httputil.DumpRequest(r, false) 64 | log.Println(string(rb)) 65 | next.ServeHTTP(w, r) 66 | }) 67 | } 68 | 69 | func main() { 70 | if os.Getenv("GOMAXPROCS") == "" { 71 | runtime.GOMAXPROCS(runtime.NumCPU() * 2) 72 | } 73 | 74 | args := os.Args[1:] 75 | var plugins *goreplay.InOutPlugins 76 | if len(args) > 0 && args[0] == "file-server" { 77 | if len(args) != 2 { 78 | log.Fatal("You should specify port and IP (optional) for the file server. Example: `gor file-server :80`") 79 | } 80 | dir, _ := os.Getwd() 81 | 82 | goreplay.Debug(0, "Started example file server for current directory on address ", args[1]) 83 | 84 | log.Fatal(http.ListenAndServe(args[1], loggingMiddleware(args[1], http.FileServer(http.Dir(dir))))) 85 | } else { 86 | flag.Parse() 87 | goreplay.CheckSettings() 88 | plugins = goreplay.NewPlugins() 89 | } 90 | 91 | log.Printf("[PPID %d and PID %d] Version:%s\n", os.Getppid(), os.Getpid(), goreplay.VERSION) 92 | 93 | if len(plugins.Inputs) == 0 || len(plugins.Outputs) == 0 { 94 | log.Fatal("Required at least 1 input and 1 output") 95 | } 96 | 97 | if *memprofile != "" { 98 | profileMEM(*memprofile) 99 | } 100 | 101 | if *cpuprofile != "" { 102 | profileCPU(*cpuprofile) 103 | } 104 | 105 | if goreplay.Settings.Pprof != "" { 106 | go func() { 107 | log.Println(http.ListenAndServe(goreplay.Settings.Pprof, nil)) 108 | }() 109 | } 110 | 111 | closeCh := make(chan int) 112 | emitter := goreplay.NewEmitter() 113 | go emitter.Start(plugins, goreplay.Settings.Middleware) 114 | if goreplay.Settings.ExitAfter > 0 { 115 | log.Printf("Running gor for a duration of %s\n", goreplay.Settings.ExitAfter) 116 | 117 | time.AfterFunc(goreplay.Settings.ExitAfter, func() { 118 | log.Printf("gor run timeout %s\n", goreplay.Settings.ExitAfter) 119 | close(closeCh) 120 | }) 121 | } 122 | c := make(chan os.Signal, 1) 123 | signal.Notify(c, os.Interrupt, syscall.SIGTERM) 124 | exit := 0 125 | select { 126 | case <-c: 127 | exit = 1 128 | case <-closeCh: 129 | exit = 0 130 | } 131 | emitter.Close() 132 | os.Exit(exit) 133 | } 134 | 135 | func profileCPU(cpuprofile string) { 136 | if cpuprofile != "" { 137 | f, err := os.Create(cpuprofile) 138 | if err != nil { 139 | log.Fatal(err) 140 | } 141 | pprof.StartCPUProfile(f) 142 | 143 | time.AfterFunc(30*time.Second, func() { 144 | pprof.StopCPUProfile() 145 | f.Close() 146 | }) 147 | } 148 | } 149 | 150 | func profileMEM(memprofile string) { 151 | if memprofile != "" { 152 | f, err := os.Create(memprofile) 153 | if err != nil { 154 | log.Fatal(err) 155 | } 156 | time.AfterFunc(30*time.Second, func() { 157 | pprof.WriteHeapProfile(f) 158 | f.Close() 159 | }) 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /docs/CNAME: -------------------------------------------------------------------------------- 1 | docs.goreplay.org -------------------------------------------------------------------------------- /docs/Capturing-and-replaying-traffic.md: -------------------------------------------------------------------------------- 1 | Think about Gor more like a network analyzer or tcpdump on steroids, it is not a proxy and does not affect your app anyhow. You specify application port, and it will capture and replay incoming data. 2 | 3 | Simplest setup will be: 4 | ```bash 5 | # Run on servers where you want to catch traffic. You can run it on every `web` machine. 6 | sudo gor --input-raw :80 --output-http http://staging.com 7 | ``` 8 | It will record and replay traffic from the same machine. However, it is possible to use [[Aggregator-forwarder setup]], when Gor on your web machines forward traffic to Gor aggregator instance running on the separate server. 9 | 10 | > You may notice that it require `sudo`: to analyze network Gor need permissions which available only to root users. However, it is possible to configure Gor [beign run for non-root users](Running as a non-root user). 11 | 12 | 13 | ### Forwarding to multiple addresses 14 | 15 | You can forward traffic to multiple endpoints. 16 | ``` 17 | gor --input-tcp :28020 --output-http "http://staging.com" --output-http "http://dev.com" 18 | ``` 19 | 20 | ### Splitting traffic 21 | By default, it will send same traffic to all outputs, but you have options to equally split it (round-robin) using `--split-output` option. 22 | 23 | ``` 24 | gor --input-raw :80 --output-http "http://staging.com" --output-http "http://dev.com" --split-output true 25 | ``` 26 | 27 | ### Tracking responses 28 | By default `input-raw` does not intercept responses, only requests. You can turn response tracking using `--input-raw-track-response` option. When enable you will be able to access response information in middleware and `output-file`. 29 | 30 | 31 | ### Traffic interception engine 32 | By default, Gor will use `libpcap` for intercepting traffic, it should work in most cases. If you have any troubles with it, you may try alternative engine: `raw_socket`. 33 | 34 | ``` 35 | sudo gor --input-raw :80 --input-raw-engine "raw_socket" --output-http "http://staging.com" 36 | ``` 37 | 38 | You can read more about [[Replaying HTTP traffic]]. 39 | 40 | You can use VXLAN or traffic mirroring from AWS to capture the traffic. The 4789 UDP port will be opened and that works as you are launched GoReplay on the source machine. 41 | 42 | ``` 43 | gor --input-raw :80 --input-raw-engine vxlan -output-stdout 44 | ``` 45 | 46 | ### Tracking original IP addresses 47 | You can use `--input-raw-realip-header` option to specify header name: If not blank, injects header with given name and real IP value to the request payload. Usually, this header should be named: `X-Real-IP`, but you can specify any name. 48 | 49 | `gor --input-raw :80 --input-raw-realip-header "X-Real-IP" ...` 50 | 51 | 52 | *** 53 | 54 | Also you may want to know about [[Rate limiting]], [[Request rewriting]] and [[Request filtering]] -------------------------------------------------------------------------------- /docs/Compilation.md: -------------------------------------------------------------------------------- 1 | We provide pre-compiled binaries for Mac and Linux, but you are free to compile Gor by yourself. 2 | 3 | Gor is written using Go, so first you need to download it from here https://golang.org/, use the latest stable version. 4 | 5 | The only Gor dependency is [libpcap](https://github.com/the-tcpdump-group/libpcap), which is the interface to various kernel packet capture mechanisms, and https://github.com/google/gopacket, which is a Go wrapper around libpcap. Latest libpcap version can be obtained at http://www.tcpdump.org/release/. Libpcap itself depend on `flex` and `bison` packages, many operating systems already have them installed. 6 | 7 | ```bash 8 | # Fetch libpcap dependencies. Depending on your OS, instead of `apt` you will use `yum` or `rpm`, or `brew` on Mac. 9 | sudo apt-get install flex bison -y 10 | 11 | # Download latest stable release, compile and install it 12 | wget http://www.tcpdump.org/release/libpcap-1.7.4.tar.gz && tar xzf libpcap-1.7.4.tar.gz 13 | cd libpcap-1.7.4 14 | ./configure && make install 15 | 16 | 17 | # Lets fetch Gor source code 18 | mkdir $HOME/gocode 19 | # See more information about GOPATH https://github.com/golang/go/wiki/GOPATH 20 | export GOPATH=$HOME/gocode 21 | # Fetch code from the Github 22 | go get github.com/buger/gor 23 | 24 | # Compile from source 25 | cd $HOME/gocode/src/github.com/buger/gor 26 | go build LDFLAGS = -ldflags "-extldflags \"-static\"" 27 | ``` 28 | 29 | After you finished, you should see `gor` binary in current directory. 30 | 31 | -------------------------------------------------------------------------------- /docs/Development-Setup.md: -------------------------------------------------------------------------------- 1 | ## STEP 1: Install Docker 2 | For local development we recommend to use Docker. 3 | 4 | If you don’t have it you can read how to install it here: 5 | https://docs.docker.com/engine/getstarted/step_one/#step-3-verify-your-installation 6 | 7 | ## STEP 2: Download repository 8 | 9 | `git clone git@github.com:buger/goreplay.git` 10 | 11 | 12 | ## STEP 3: Setup container 13 | 14 | ``` 15 | cd ./goreplay 16 | make build 17 | 18 | ``` 19 | 20 | ## Testing 21 | To run tests execute next command: 22 | 23 | ``` 24 | make test 25 | ``` 26 | 27 | You can copy the command that is produced and modify it. For example, if you need to run one test copy the command and add `-run TestName`, e.g.: 28 | 29 | ``` 30 | docker run -v `pwd`:/go/src/github.com/buger/gor/ -p 0.0.0.0:8000:8000 -t -i gor:go go test ./. -run TestEmitterFiltered -timeout 60s -ldflags "-X main.VERSION=DEV-1482398347 -extldflags \"-static\"" -v 31 | ``` 32 | 33 | 34 | ## Building 35 | To get a binary file run 36 | 37 | ``` 38 | make release-bin 39 | ``` 40 | -------------------------------------------------------------------------------- /docs/Distributed-configuration.md: -------------------------------------------------------------------------------- 1 | Sometimes it makes sense to use separate Gor instance for replaying traffic and performing things like load testing, so your production machines do not spend precious resources. It is possible to configure Gor on your web machines forward traffic to Gor aggregator instance running on the separate server. 2 | 3 | ```bash 4 | # Run on servers where you want to catch traffic. You can run it on each `web` machine. 5 | sudo gor --input-raw :80 --output-tcp replay.local:28020 6 | 7 | # Replay server (replay.local). 8 | gor --input-tcp replay.local:28020 --output-http http://staging.com 9 | ``` 10 | 11 | If you have multiple replay machines you can split traffic among them using `--split-output` option: it will equally split all incoming traffic to all outputs using round robin algorithm. 12 | ``` 13 | gor --input-raw :80 --split-output --output-tcp replay1.local:28020 --output-tcp replay2.local:28020 14 | ``` 15 | 16 | [GoReplay PRO](https://goreplay.org/pro.html) support accurate recording and replaying of tcp sessions, and when `--recognize-tcp-sessions` option is passed, instead of round-robin it will use a smarter algorithm which ensures that same sessions will be sent to the same replay instance. 17 | 18 | 19 | In case if you are planning a large load testing, you may consider use separate master instance which will control Gor slaves which actually replay traffic. For example: 20 | ``` 21 | # This command will read multiple log files, replay them on 10x speed and loop them if needed for 30 seconds, and will distributed traffic (tcp session aware) among multiple workers 22 | gor --input-file logs_from_multiple_machines.*|1000% --input-file-loop --exit-after 30s --recognize-tcp-sessions --split-output --output-tcp worker1.local --output-tcp worker2.local:27017 --output-tcp worker3.local:27017 ... --output-tcp workerN.local:27017 23 | 24 | # worker 25 | gor --input-tcp :27017 --ouput-http load_test.target 26 | ``` 27 | -------------------------------------------------------------------------------- /docs/Exporting-to-ElasticSearch.md: -------------------------------------------------------------------------------- 1 | Gor can export requests and replayed response data to ElasticSearch: 2 | 3 | ``` 4 | ./gor --input-raw :8000 --output-http http://staging.com --output-http-elasticsearch localhost:9200/gor 5 | ``` 6 | 7 | You don't have to create the index upfront. That will be done for you automatically. 8 | 9 | ### Format 10 | 11 | Following structure represents ES format: 12 | 13 | ``` 14 | type ESRequestResponse struct { 15 | ReqURL string `json:"Req_URL"` 16 | ReqMethod string `json:"Req_Method"` 17 | ReqUserAgent string `json:"Req_User-Agent"` 18 | ReqAcceptLanguage string `json:"Req_Accept-Language,omitempty"` 19 | ReqAccept string `json:"Req_Accept,omitempty"` 20 | ReqAcceptEncoding string `json:"Req_Accept-Encoding,omitempty"` 21 | ReqIfModifiedSince string `json:"Req_If-Modified-Since,omitempty"` 22 | ReqConnection string `json:"Req_Connection,omitempty"` 23 | ReqCookies string `json:"Req_Cookies,omitempty"` 24 | RespStatus string `json:"Resp_Status"` 25 | RespStatusCode string `json:"Resp_Status-Code"` 26 | RespProto string `json:"Resp_Proto,omitempty"` 27 | RespContentLength string `json:"Resp_Content-Length,omitempty"` 28 | RespContentType string `json:"Resp_Content-Type,omitempty"` 29 | RespTransferEncoding string `json:"Resp_Transfer-Encoding,omitempty"` 30 | RespContentEncoding string `json:"Resp_Content-Encoding,omitempty"` 31 | RespExpires string `json:"Resp_Expires,omitempty"` 32 | RespCacheControl string `json:"Resp_Cache-Control,omitempty"` 33 | RespVary string `json:"Resp_Vary,omitempty"` 34 | RespSetCookie string `json:"Resp_Set-Cookie,omitempty"` 35 | Rtt int64 `json:"RTT"` 36 | Timestamp time.Time 37 | } 38 | ``` -------------------------------------------------------------------------------- /docs/FAQ.md: -------------------------------------------------------------------------------- 1 | ### What OS are supported? 2 | Gor will run everywhere where [libpcap](http://www.tcpdump.org/) works, and it works on most of the platforms. However, currently, we test it on Linux and Mac. See more about [[Compilation]]. 3 | 4 | ### Why does the `--input-raw` requires sudo or root access? 5 | Listener works by sniffing traffic from a given port. It's accessible 6 | only by using sudo or root access. But it is possible to [[Running as non root user]]. 7 | 8 | ### How do you deal with user session to replay the traffic correctly? 9 | You can rewrite session related headers/params to match your staging environment. If you require custom logic (e.g random token based auth) follow this discussion: https://github.com/buger/gor/issues/154 10 | 11 | ### Can I use Gor to intercept SSL traffic? 12 | Basic idea is that SSL was made to protect itself from traffic interception. There 2 options: 13 | 1. Move SSL handling to proxy like Nginx or Amazon ELB. And allow Gor to listen on upstreams. 14 | 2. Use `--input-http` so you can duplicate request payload directly from your app to Gor, but it will require your app modifications. 15 | 16 | More can be find here: https://github.com/buger/gor/issues/85 17 | 18 | ### Is there a limit for size of HTTP request when using output-http? 19 | Due to the fact that Gor can't guarantee interception of all packets, for large payloads > 200kb there is chance of missing some packets and corrupting body. Treat it as a feature and chance to test broken bodies handling :) 20 | The only way to guarantee delivery is using `--input-http`, but you will miss some features. 21 | 22 | ### I'm getting 'too many open files' error 23 | Typical Linux shell has a small open files soft limit at 1024. You can easily raise that when you do this before starting your gor replay process: 24 | 25 | ulimit -n 64000 26 | 27 | More about ulimit: http://www.thecodingmachine.com/solving-the-too-many-open-files-exception-in-red5-or-any-other-application/ 28 | 29 | ### The CPU average across my load-balanced targets is higher than the source 30 | If you are replaying traffic from multiple listeners to a load-balanced target and you use sticky sessions, you may observe that the target servers have a higher CPU load than the listener servers. This may be because the sticky session cookie of the original load balancer is not honored by the target load balancer thus resulting in requests that would normally hit the same target server hitting different servers on the backend thus reducing some caching benefits gained via the load balancing. Try running just one listener against one replay target and see if the CPU utilization comparison is more accurate. 31 | 32 | Also see [[Troubleshooting]]. -------------------------------------------------------------------------------- /docs/Middleware.md: -------------------------------------------------------------------------------- 1 | #### Overview 2 | Middleware is a program that accepts request and response payload at STDIN and emits modified requests at STDOUT. You can implement any custom logic like stripping private data, advanced rewriting, support for oAuth and etc. Check examples [included into our repo](https://github.com/buger/gor/tree/master/examples/middleware). 3 | 4 | 5 | ``` 6 | Original request +--------------+ 7 | +-------------+----------STDIN---------->+ | 8 | | Gor input | | Middleware | 9 | +-------------+----------STDIN---------->+ | 10 | Original response (1) +------+---+---+ 11 | | ^ 12 | +-------------+ Modified request v | 13 | | Gor output +<---------STDOUT-----------------+ | 14 | +-----+-------+ | 15 | | | 16 | | Replayed response | 17 | +------------------STDIN----------------->----+ 18 | ``` 19 | 20 | (1): Original responses will only be sent to the middleware if the `--input-raw-track-response` option is specified. 21 | 22 | Middleware can be written in any language, see `examples/middleware` folder for examples. 23 | Middleware program should accept the fact that all communication with Gor is asynchronous, there is no guarantee that original request and response messages will come one after each other. Your app should take care of the state if logic depends on original or replayed response, see `examples/middleware/token_modifier.go` as example. 24 | 25 | Simple bash echo middleware (returns same request) will look like this: 26 | ```bash 27 | while read line; do 28 | echo $line 29 | done 30 | ``` 31 | 32 | Middleware can be enabled using `--middleware` option, by specifying path to executable file: 33 | ``` 34 | gor --input-raw :80 --middleware "/opt/middleware_executable" --output-http "http://staging.server" 35 | ``` 36 | 37 | #### Communication protocol 38 | All messages should be hex encoded, new line character specifieds the end of the message, eg. new message per line. 39 | 40 | Decoded payload consist of 2 parts: header and HTTP payload, separated by new line character. 41 | 42 | Example request payload: 43 | 44 | ``` 45 | 1 932079936fa4306fc308d67588178d17d823647c 1439818823587396305 46 | GET /a HTTP/1.1 47 | Host: 127.0.0.1 48 | 49 | ``` 50 | 51 | Example response payload (note: you will only receive this if you specify `--input-raw-track-response`) 52 | 53 | ``` 54 | 2 8e091765ae902fef8a2b7d9dd960e9d52222bd8c 1439818823587996305 2782013 55 | HTTP/1.1 200 OK 56 | Date: Mon, 17 Aug 2015 13:40:23 GMT 57 | Content-Length: 0 58 | Content-Type: text/plain; charset=utf-8 59 | 60 | ``` 61 | 62 | Header contains request meta information separated by spaces. First value is payload type, possible values: `1` - request, `2` - original response, `3` - replayed response. 63 | Next goes request id: unique among all requests (sha1 of time and Ack), but remain same for original and replayed response, so you can create associations between request and responses. The third argument is the time when request/response was initiated/received. Forth argument is populated only for responses and means latency. 64 | 65 | HTTP payload is unmodified HTTP requests/responses intercepted from network. You can read more about request format [here](http://www.jmarshall.com/easy/http/), [here](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol) and [here](http://www.w3.org/Protocols/rfc2616/rfc2616.html). You can operate with payload as you want, add headers, change path, and etc. Basically you just editing a string, just ensure that it is RCF compliant. 66 | 67 | At the end modified (or untouched) request should be emitted back to STDOUT, keeping original header, and hex-encoded. If you want to filter request, just not send it. Emitting responses back is required, even if you did not touch them. 68 | 69 | #### Advanced example 70 | Imagine that you have auth system that randomly generate access tokens, which used later for accessing secure content. Since there is no pre-defined token value, naive approach without middleware (or if middleware use only request payloads) will fail, because replayed server have own tokens, not synced with origin. To fix this, our middleware should take in account responses of replayed and origin server, store `originalToken -> replayedToken` aliases and rewrite all requests using this token to use replayed alias. See [examples/middleware/token_modifier.go](https://github.com/buger/gor/tree/master/examples/middleware/token_modifier.go) and [middleware_test.go#TestTokenMiddleware](https://github.com/buger/gor/tree/master/middleware_test.go) as example of described scheme. 71 | 72 | *** 73 | 74 | You may also read about [[Request filtering]], [[Rate limiting]] and [[Request rewriting]]. 75 | -------------------------------------------------------------------------------- /docs/Rate-limiting.md: -------------------------------------------------------------------------------- 1 | Rate limiting can be useful if you only want to forward parts of incoming traffic, for example, to not overload your test environment. There are two strategies: dropping random requests or dropping fractions of requests based on Header or URL param value. 2 | 3 | ### Dropping random requests 4 | Every input and output support random rate limiting. 5 | There are two limiting algorithms: absolute or percentage based. 6 | 7 | **Absolute**: If for current second it reached specified requests limit - disregard the rest, on next second counter reset. 8 | 9 | **Percentage**: For input-file it will slowdown or speedup request execution, for the rest it will use the random generator to decide if request pass or not based on the chance you specified. 10 | 11 | You can specify your desired limit using the "|" operator after the server address, see examples below. 12 | 13 | #### Limiting replay using absolute number 14 | ``` 15 | # staging.server will not get more than ten requests per second 16 | gor --input-tcp :28020 --output-http "http://staging.com|10" 17 | ``` 18 | 19 | #### Limiting listener using percentage based limiter 20 | ``` 21 | # replay server will not get more than 10% of requests 22 | # useful for high-load environments 23 | gor --input-raw :80 --output-tcp "replay.local:28020|10%" 24 | ``` 25 | 26 | ### Consistent limiting based on Header or URL param value 27 | If you have unique user id (like API key) stored in header or URL you can consistently forward specified percent of traffic only for the fraction of this users. 28 | Basic formula looks like this: `FNV32-1A_hashing(value) % 100 >= chance`. Examples: 29 | ``` 30 | # Limit based on header value 31 | gor --input-raw :80 --output-tcp "replay.local:28020|10%" --http-header-limiter "X-API-KEY: 10%" 32 | 33 | # Limit based on header value 34 | gor --input-raw :80 --output-tcp "replay.local:28020|10%" --http-param-limiter "api_key: 10%" 35 | ``` 36 | 37 | When limiting based on header or param only percentage based limiting supported. -------------------------------------------------------------------------------- /docs/Replaying-HTTP-traffic.md: -------------------------------------------------------------------------------- 1 | Gor can replay HTTP traffic using `--output-http` option: 2 | 3 | ```bash 4 | sudo ./gor --input-raw :8000 --output-http="http://staging.env" 5 | ``` 6 | 7 | You can [filter](Request filtering), [rate limit](Rate limiting) and [rewrite](Request rewriting) requests on the fly. 8 | 9 | ### HTTP output workers 10 | By default Gor creates a dynamic pool of workers: it starts with 10 and creates more HTTP output workers when the HTTP output queue length is greater than 10. The number of workers created (N) is equal to the queue length at the time which it is checked and found to have a length greater than 10. The queue length is checked every time a message is written to the HTTP output queue. No more workers will be spawned until that request to spawn N workers is satisfied. If a dynamic worker cannot process a message at that time, it will sleep for 100 milliseconds. If a dynamic worker cannot process a message for 2 seconds it dies. 11 | You may specify fixed number of workers using `--output-http-workers=20` option. 12 | 13 | ### Following redirects 14 | By default Gor will ignore all redirects since they are handled by clients using your app, but in scenarios where your replayed environment introduces new redirects, you can enable them like this: 15 | ``` 16 | gor --input-tcp replay.local:28020 --output-http http://staging.com --output-http-redirects 2 17 | ``` 18 | The given example will follow up to 2 redirects per request. 19 | 20 | ### HTTP timeouts 21 | By default http timeout for both request and response is 5 seconds. You can override it like this: 22 | ``` 23 | gor --input-tcp replay.local:28020 --output-http http://staging.com --output-http-timeout 30s 24 | ``` 25 | 26 | ### Response buffer 27 | By default, to reduce memory consumption, internal HTTP client will fetch max 200kb of the response body (used if you use middleware), by you can increase limit using `--output-http-response-buffer` option (accepts number of bytes). 28 | 29 | ### Basic Auth 30 | 31 | If your development or staging environment is protected by Basic Authentication then those credentials can be injected in during the replay: 32 | 33 | ``` 34 | gor --input-raw :80 --output-http "http://user:pass@staging.com" 35 | ``` 36 | 37 | Note: This will overwrite any Authorization headers in the original request. 38 | 39 | 40 | ### Multiple domains support 41 | 42 | If you app accepts traffic from multiple domains, and you want to keep original headers, there is specific `--http-original-host` with tells Gor do not touch Host header at all. 43 | 44 | 45 | *** 46 | You may also read about [[Saving and Replaying from file]] -------------------------------------------------------------------------------- /docs/Request-filtering.md: -------------------------------------------------------------------------------- 1 | Filtering is useful when you need to capture only specific part of traffic, like API requests. It is possible to filter by URL, HTTP header or HTTP method. 2 | 3 | #### Allow url regexp 4 | ``` 5 | # only forward requests being sent to the /api endpoint 6 | gor --input-raw :8080 --output-http staging.com --http-allow-url /api 7 | ``` 8 | 9 | #### Disallow url regexp 10 | ``` 11 | # only forward requests NOT being sent to the /api... endpoint 12 | gor --input-raw :8080 --output-http staging.com --http-disallow-url /api 13 | ``` 14 | #### Filter based on regexp of header 15 | 16 | ``` 17 | # only forward requests with an api version of 1.0x 18 | gor --input-raw :8080 --output-http staging.com --http-allow-header api-version:^1\.0\d 19 | 20 | # only forward requests NOT containing User-Agent header value "Replayed by Gor" 21 | gor --input-raw :8080 --output-http staging.com --http-disallow-header "User-Agent: Replayed by Gor" 22 | ``` 23 | 24 | #### Filter based on HTTP method 25 | Requests not matching a specified whitelist can be filtered out. For example to strip non-nullipotent requests: 26 | 27 | ``` 28 | gor --input-raw :80 --output-http "http://staging.server" \ 29 | --http-allow-method GET \ 30 | --http-allow-method OPTIONS 31 | ``` 32 | 33 | 34 | ----- 35 | You may also read about [[Request rewriting]], [[Rate limiting]] and [[Middleware]] -------------------------------------------------------------------------------- /docs/Request-rewriting.md: -------------------------------------------------------------------------------- 1 | Gor supports rewriting of URLs, URL params and headers, see below. 2 | 3 | Rewriting may be useful if you test environment does not have the same data as your production, and you want to perform all actions in the context of `test` user: for example rewrite all API tokens to some test value. Other possible use cases are toggling features on/off using custom headers or rewriting URL's if they changed in the new environment. 4 | 5 | For more complex logic you can use [Middleware](middleware.md). 6 | 7 | #### Rewrite URL based on a mapping 8 | `--http-rewrite-url` expects value in ":" format: ":" is a dilimiter. In `` section you may use captured regexp group values. This works similar to `replace` method in Javascript or `gsub` in Ruby. 9 | 10 | ``` 11 | # Rewrites all `/v1/user//ping` requests to `/v2/user//ping` 12 | gor --input-raw :8080 --output-http staging.com --http-rewrite-url /v1/user/([^\\/]+)/ping:/v2/user/$1/ping 13 | ``` 14 | 15 | #### Set URL param 16 | Set request url param, if param already exists it will be overwritten. 17 | ``` 18 | gor --input-raw :8080 --output-http staging.com --http-set-param api_key=1 19 | ``` 20 | 21 | #### Set Header 22 | Set request header, if header already exists it will be overwritten. May be useful if you need to identify requests generated by Gor or enable feature flagged functionality in an application: 23 | 24 | ``` 25 | gor --input-raw :80 --output-http "http://staging.server" \ 26 | --http-set-header "User-Agent: Replayed by Gor" \ 27 | --http-set-header "Enable-Feature-X: true" 28 | ``` 29 | 30 | #### Host header 31 | Host header gets special treatment. By default Host get set to the value specified in --output-http. If you manually set --http-set-header "Host: anonther.com", Gor will not override Host value. 32 | 33 | If you app accepts traffic from multiple domains, and you want to keep original headers, there is specific `--http-original-host` with tells Gor do not touch Host header at all. 34 | 35 | 36 | *** 37 | 38 | You may also read about [[Request filtering]], [[Rate limiting]] and [[Middleware]] 39 | -------------------------------------------------------------------------------- /docs/Running-as-non-root-user.md: -------------------------------------------------------------------------------- 1 | You can enable Gor for non-root users in a secure method by using the following commands 2 | 3 | ``` 4 | # Following commands assume that you put `gor` binary to /usr/local/bin 5 | add gor 6 | addgroup gor 7 | chgrp gor /usr/local/bin/gor 8 | chmod 0750 /usr/local/bin/gor 9 | setcap "cap_net_raw,cap_net_admin+eip" /usr/local/bin/gor 10 | ``` 11 | 12 | As a brief explanation of the above. 13 | * We create a group called gor. 14 | * We then add the user you want to the new group so they will be able to use gor without sudo 15 | * We then change the user/group of gor binary the new group. 16 | * We then make sure the permissions are set on gor binary so that members of the group can execute it but other normal users cannot. 17 | * We then use `setcap` to give the CAP_NET_RAW and CAP_NET_ADMIN privilege to the executable when it runs. This is so that Gor can open its raw socket which is not normally permitted unless you are root. -------------------------------------------------------------------------------- /docs/Troubleshooting.md: -------------------------------------------------------------------------------- 1 | Gor can report stats on the `output-tcp` and `output-http` request queues. Stats are reported to the console every 5 seconds in the form `latest,mean,max,count,count/second` by using the `--output-http-stats` and `--output-tcp-stats` options. 2 | 3 | Examples: 4 | 5 | ``` 6 | 2014/04/23 21:17:50 output_tcp:latest,mean,max,count,count/second 7 | 2014/04/23 21:17:50 output_tcp:0,0,0,0,0 8 | 2014/04/23 21:17:55 output_tcp:1,1,2,68,13 9 | 2014/04/23 21:18:00 output_tcp:1,1,2,92,18 10 | 2014/04/23 21:18:05 output_tcp:1,1,2,119,23 11 | ``` 12 | 13 | ``` 14 | 2014/04/23 21:19:46 output_http:latest,mean,max,count,count/second 15 | 2014/04/23 21:19:46 output_http:0,0,0,0,0 16 | 2014/04/23 21:19:51 output_http:0,0,0,0,0 17 | 2014/04/23 21:19:56 output_http:0,0,0,0,0 18 | 2014/04/23 21:20:01 output_http:1,0,1,50,10 19 | 2014/04/23 21:20:06 output_http:1,1,4,72,14 20 | 2014/04/23 21:20:11 output_http:1,0,1,179,35 21 | 2014/04/23 21:20:16 output_http:1,0,1,148,29 22 | 2014/04/23 21:20:21 output_http:1,1,2,91,18 23 | 2014/04/23 21:20:26 output_http:1,1,2,150,30 24 | 2014/04/23 21:18:15 output_http:100,99,100,70,14 25 | 2014/04/23 21:18:21 output_http:100,99,100,55,11 26 | ``` 27 | 28 | ### How can I tell if I have bottlenecks? 29 | Key areas that sometimes experience bottlenecks are the output-tcp and output-http functions which have internal queues for requests. Each queue has an upper limit of 100. Enable stats reporting to see if any queues are experiencing bottleneck behavior. 30 | 31 | #### Output HTTP bottlenecks 32 | When running a Gor replay the output-http feature may bottleneck if: 33 | 34 | * the replay has inadequate bandwidth. If the replay is receiving or sending more messages than its network adapter can handle the output-http-stats may report that the output-http queue is filling up. See if there is a way to upgrade the replay's bandwidth. 35 | * with `--output-http-workers` set to anything other than `-1` the `-output-http` target is unable to respond to messages in a timely manner. The http output workers which take messages off the output-http queue, process the request, and ensure that the request did not result in an error may not be able to keep up with the number of incoming requests. If the replay is not using dynamic worker scaling (`--output-http-workers=-1`) The optimal number of output-http-workers can be determined with the formula `output-workers = (Average number of requests per second)/(Average target response time per second)`. 36 | 37 | #### Output TCP bottlenecks 38 | When using the Gor listener the output-tcp feature may bottleneck if: 39 | 40 | * the replay is unable to accept and process more requests than the listener is able generate. Prior to troubleshooting the output-tcp bottleneck, ensure that the replay target is not experiencing any bottlenecks. 41 | * the replay target has inadequate bandwidth to handle all its incoming requests. If a replay target's incoming bandwidth is maxed out the output-tcp-stats may report that the output-tcp queue is filling up. See if there is a way to upgrade the replay's bandwidth. 42 | 43 | 44 | #### Tuning 45 | 46 | To achieve the top most performance you should tune the source server system limits: 47 | 48 | net.ipv4.tcp_max_tw_buckets = 65536 49 | net.ipv4.tcp_tw_recycle = 1 50 | net.ipv4.tcp_tw_reuse = 0 51 | net.ipv4.tcp_max_syn_backlog = 131072 52 | net.ipv4.tcp_syn_retries = 3 53 | net.ipv4.tcp_synack_retries = 3 54 | net.ipv4.tcp_retries1 = 3 55 | net.ipv4.tcp_retries2 = 8 56 | net.ipv4.tcp_rmem = 16384 174760 349520 57 | net.ipv4.tcp_wmem = 16384 131072 262144 58 | net.ipv4.tcp_mem = 262144 524288 1048576 59 | net.ipv4.tcp_max_orphans = 65536 60 | net.ipv4.tcp_fin_timeout = 10 61 | net.ipv4.tcp_low_latency = 1 62 | net.ipv4.tcp_syncookies = 0 63 | *** 64 | 65 | ### Gor is crashing with following stacktrace 66 | ``` 67 | fatal error: unexpected signal during runtime execution 68 | [signal 0xb code=0x1 addr=0x63 pc=0x7ffcdfdf8b2c] 69 | 70 | runtime stack: 71 | runtime.throw(0xad8380, 0x2a) 72 | /usr/local/go/src/runtime/panic.go:547 +0x90 73 | runtime.sigpanic() 74 | /usr/local/go/src/runtime/sigpanic_unix.go:12 +0x5a 75 | 76 | goroutine 103 [syscall, locked to thread]: 77 | runtime.cgocall(0x7b35a0, 0xc82121f1e8, 0x0) 78 | /usr/local/go/src/runtime/cgocall.go:123 +0x11b fp=0xc82121f188 sp=0xc82121f158 79 | net._C2func_getaddrinfo(0x7ffcec0008c0, 0x0, 0xc821b221e0, 0xc8217b2b18, 0x0, 0x0, 0x0) 80 | ??:0 +0x55 fp=0xc82121f1e8 sp=0xc82121f188 81 | net.cgoLookupIPCNAME(0x7fffb17208ab, 0x12, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb17200) 82 | ``` 83 | 84 | There is a chance that you hit Go bug. The crash comes from the CGO version of DNS resolver. 85 | By default Go based version used, but ins some cases [it switches to CGO based](https://golang.org/pkg/net/#hdr-Name_Resolution). It is possible to force Go based DNS resolver using GODEBUG environment variable: 86 | `sudo GODEBUG="netdns=go" ./gor --input-raw :80 --output-http staging.env` 87 | 88 | 89 | 90 | Also, see [[FAQ]] -------------------------------------------------------------------------------- /docs/_Footer.md: -------------------------------------------------------------------------------- 1 | [Website](https://goreplay.org) | [PRO version](https://goreplay.org/pro.html) | [[Getting started]] | [[FAQ]] | [Join newsletter](https://www.getdrip.com/forms/89690474/submissions/new) -------------------------------------------------------------------------------- /docs/_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-cayman -------------------------------------------------------------------------------- /docs/commercial/collaboration.md: -------------------------------------------------------------------------------- 1 | Collaboration is difficult with commercial closed source but I do want to keep as much of the OSS ethos as possible available to customers who want to fix it themselves. 2 | 3 | ## Legal 4 | 5 | In order to unambiguously own and sell Gor commercial products, I must have the copyright associated with the entire codebase. Any code you create which is merged must be owned by me. That's not me trying to be a jerk, that's just the way it works. 6 | 7 | ## Application 8 | 9 | If you wish access to the product repository so you can send a PR, just open a new Gor issue and include the following info: 10 | 11 | 1. the email address that bought the license, a max of one collaborator per license 12 | 1. the following statement "I assign all rights, including copyright, to any future Gor work by myself to Leonid Bugaev" 13 | 14 | You should be granted access to the private repo soon after. 15 | 16 | ## Notes 17 | 18 | 1. You should **never** work on the master branch. Only I may merge changes. 19 | 1. I may revoke access for any reason at any time. Access is not guaranteed with purchase. -------------------------------------------------------------------------------- /docs/commercial/faq.md: -------------------------------------------------------------------------------- 1 | ### What are GoReplay PRO and GoReplay Enterprise? 2 | 3 | [GoReplay PRO](https://goreplay.org/pro.html) and GoReplay Enterprise are extensions to GoReplay which add more functionality and provide additional support options for customers. 4 | 5 | ### Is there a trial version? 6 | 7 | There's no free trial but we do offer a 14 day period with full refund if it does not work for you. 8 | 9 | ### What is the license? 10 | 11 | See [COMM-LICENSE](https://github.com/buger/gor/blob/master/COMM-LICENSE) in the root of the GoReplay repo. 12 | 13 | ### How does PRO licensing work? 14 | 15 | Every organization running GoReplay Pro on its own servers must have a license. There's no limit to the amount of servers or environments used by that organization. 16 | 17 | ### How does Enterprise licensing work? 18 | 19 | Every organization running Gor Enterprise on its own servers must have a license. There's **no limit** to the amount of servers or environments used by that organization. 20 | 21 | ### What happens if my subscription lapses? 22 | 23 | You must have an active subscription to run GoReplay Pro or Enterprise. After a one week grace period, you'll lose access to binaries and priority support. You won't get any more updates or bug fixes. 24 | 25 | ### How do I buy GoReplay Enterprise? 26 | 27 | Send email to [support@gortool.com](mailto:support@gortool.com) with your info. A PDF quote will be emailed to you with the price. Reply to that email with your purchase order or just "Sounds good" and we will send an invoice which can be paid with a credit card, ACH bank transfer or a paper check. 28 | 29 | ### Can I upgrade from GoReplay Pro? 30 | 31 | Yes! Current subscribers can upgrade by [requesting a quote](mailto:support@gortool.com). **Please note that you are an existing Pro subscriber.** We will add a one-time discount on your first invoice to reflect any remaining Pro subscription credit. If you purchased GoReplay Pro 6 months ago for $950, you'll get a $475 discount. 32 | 33 | ### Can I distribute GoReplay PRO or Enterprise to my customers? 34 | 35 | This is a common requirement for "on-site installs" or "appliances" sold to large corporations. 36 | 37 | The standard license is appropriate for SaaS usage as it does not allow distribution. GoReplay PRO and Enterprise have an Appliance license option which **does** allow you to distribute them. The Appliance license is $9,500/yr for Pro and $19,500/yr for Enterprise. It allows you to distribute the Pro or Enterprise binaries as part of your application and each of your customers to run GoReplay Pro or Enterprise. Email [support@gortool.com](mailto:support@gortool.com) to purchase. 38 | 39 | ### Can you transfer a license? 40 | 41 | Licenses are **not** transferable to another company. We will transfer the license from a user-specific email to a group email address (e.g. john_smith@example.com -> tech@example.com) but only for **the same domain**. It is strongly recommended that you buy the license using a group email address so the license is not attached to any one employee's email address. 42 | 43 | ### What does the license require me to do? 44 | 45 | Your purchase gets you a unique access URL for downloading the Pro and/or Enterprise binaries. The license agreement requires you to keep this access URL private. If we find your access URL is ever publicized: 46 | 47 | 1. We'll send you a warning email with details. You need to remove the content and send a new email address so we can generate a new access URL for you. The old access URL will stop working immediately so you'll need to update your apps. 48 | 2. If your access URL is publicized a second time, we reserve the right to permanently remove access. 49 | 50 | ### Can I get a refund? 51 | 52 | Yes, up to two weeks after purchase. Let us know the reason and maybe we can help but either way it's not a problem. Email [support@gortool.com](mailto:support@gortool.com). 53 | 54 | ### How do I update my credit card info? 55 | 56 | If you purchased GoReplay Enterprise, there's nothing to do. Each annual invoice is paid separately. 57 | 58 | If you purchased GoReplay PRO, log into [Gumroad](https://gumroad.com) with your email address, click the Billing tab and enter your new card. I can't provide support for the Gumroad website and don't have the ability to edit customer info - if you can't log in or change your credit card, you can always let your current subscription expire and purchase a new subscription. -------------------------------------------------------------------------------- /docs/commercial/support.md: -------------------------------------------------------------------------------- 1 | Gor offers only community support. Gor Pro and Enterprise offer priority support via email. 2 | 3 | ## Priority Support 4 | 5 | Covers 1 incident per quarter, with a max response time of 2 working days. Scope is limited to Gor and Gor Pro and Enterprise features and APIs, not the application or infrastructure. For support, email **support** AT **gortool.com**. Please email using the same domain as the original license email or explain your connection to the licensed company. 6 | 7 | More aggressive support contracts (phone, quicker response time) are available separately, email with your needs. 8 | 9 | ## Onboarding 10 | 11 | Enterprise customers may request a one hour video chat session with @buger to discuss their application(s), requirements and how best to leverage the various Gor features. Contact support to set up your session. -------------------------------------------------------------------------------- /docs/css/breadcrumbs.css: -------------------------------------------------------------------------------- 1 | .wy-breadcrumbs li { 2 | display: inline-block; 3 | } 4 | 5 | .wy-breadcrumbs li.wy-breadcrumbs-aside { 6 | float: right; 7 | } 8 | 9 | .wy-breadcrumbs li a { 10 | display: inline-block; 11 | padding: 5px; 12 | } 13 | 14 | .wy-breadcrumbs li a:first-child { 15 | padding-left: 0; 16 | } 17 | 18 | .wy-breadcrumbs-extra { 19 | margin-bottom: 0; 20 | color: #b3b3b3; 21 | font-size: 80%; 22 | display: inline-block; 23 | } 24 | 25 | @media screen and (max-width: 480px) { 26 | .wy-breadcrumbs-extra { 27 | display: none; 28 | } 29 | 30 | .wy-breadcrumbs li.wy-breadcrumbs-aside { 31 | display: none; 32 | } 33 | } 34 | 35 | @media print { 36 | .wy-breadcrumbs li.wy-breadcrumbs-aside { 37 | display: none; 38 | } 39 | } -------------------------------------------------------------------------------- /docs/css/goreplay.css: -------------------------------------------------------------------------------- 1 | code { 2 | font-family: Consolas, "Liberation Mono", Menlo, Courier, monospace !important; 3 | border-bottom-left-radius: 3px; 4 | border-bottom-right-radius: 3px; 5 | border-top-left-radius: 3px; 6 | border-top-right-radius: 3px; 7 | background-color: #f7f7f7 !important; 8 | color: #333 !important; 9 | font-size: 14px !important; 10 | border: none !important; 11 | padding: 5px !important; 12 | } 13 | 14 | pre > code { 15 | line-height: 20px; 16 | padding: 16px !important; 17 | } 18 | 19 | blockquote { 20 | color: #777 !important; 21 | border-left: 4px solid rgb(221, 221, 221); 22 | padding-left: 16px; 23 | padding-right: 16px; 24 | margin-left: 0px !important; 25 | } 26 | 27 | /* Visited links purple is not a good idea */ 28 | a:visited { 29 | color: #008BF3; 30 | } 31 | 32 | a { 33 | color: #008BF3; 34 | } 35 | 36 | h3, h4, h5, h6 { 37 | color: #333 !important; 38 | } 39 | 40 | .rst-versions { 41 | display: none !important; 42 | } 43 | 44 | /* The next and previous button we don't need */ 45 | .rst-footer-buttons { 46 | display: none; 47 | } 48 | 49 | /* Foldable parts */ 50 | details > * { 51 | margin-left: 20px; 52 | } 53 | details > summary { 54 | margin-left: 0px; /* To overwrite the margin from above */ 55 | 56 | font-size: 120%; 57 | cursor: pointer 58 | } 59 | 60 | /* Fix the design not having the correct spacing */ 61 | .wy-menu-vertical .subnav li.current > a { 62 | padding: 0.4045em 2.427em; 63 | } 64 | 65 | .toctree-l3 { 66 | padding-left: 1.0em; 67 | } 68 | 69 | footer .fastlane { 70 | margin: 20px 0; 71 | } 72 | 73 | footer .fastlane iframe { 74 | vertical-align: middle; 75 | } 76 | 77 | /* Custom Syntax highlighting to look more like GitHub.com */ 78 | .hljs-symbol { 79 | color: #0086b3; 80 | } 81 | 82 | .hljs-keyword { 83 | font-weight: normal; 84 | color: #a71d5d; 85 | } 86 | 87 | .hljs-string { 88 | color: #183691; 89 | } 90 | 91 | /* Header Anchors */ 92 | /* From https://github.com/facelessuser/pymdown-extensions/blob/bf18d0635e9d91b0f98eacdcaa10f26e0ace0f20/doc_theme/css/theme_custom.css#L322-L395 */ 93 | .rst-content .headeranchor-link { 94 | position: absolute; 95 | top: 0; 96 | bottom: 0; 97 | left: 0; 98 | display: block; 99 | padding-right: 6px; 100 | padding-left: 30px; 101 | margin-left: -30px; 102 | } 103 | 104 | .rst-content .headeranchor-link:focus { 105 | outline: none; 106 | } 107 | 108 | .rst-content h1, 109 | .rst-content h2, 110 | .rst-content h3, 111 | .rst-content h4, 112 | .rst-content h5, 113 | .rst-content h6 { 114 | position: relative; 115 | } 116 | 117 | .rst-content h1 .headeranchor, 118 | .rst-content h2 .headeranchor, 119 | .rst-content h3 .headeranchor, 120 | .rst-content h4 .headeranchor, 121 | .rst-content h5 .headeranchor, 122 | .rst-content h6 .headeranchor { 123 | display: none; 124 | color: #000; 125 | vertical-align: middle; 126 | } 127 | 128 | .rst-content h1:hover .headeranchor-link, 129 | .rst-content h2:hover .headeranchor-link, 130 | .rst-content h3:hover .headeranchor-link, 131 | .rst-content h4:hover .headeranchor-link, 132 | .rst-content h5:hover .headeranchor-link, 133 | .rst-content h6:hover .headeranchor-link { 134 | height: 1em; 135 | padding-left: 8px; 136 | margin-left: -30px; 137 | text-decoration: none; 138 | } 139 | 140 | .rst-content h1:hover .headeranchor-link .headeranchor, 141 | .rst-content h2:hover .headeranchor-link .headeranchor, 142 | .rst-content h3:hover .headeranchor-link .headeranchor, 143 | .rst-content h4:hover .headeranchor-link .headeranchor, 144 | .rst-content h5:hover .headeranchor-link .headeranchor, 145 | .rst-content h6:hover .headeranchor-link .headeranchor { 146 | display: inline-block; 147 | } 148 | 149 | .rst-content .headeranchor { 150 | font: normal normal 16px FontAwesome; 151 | line-height: 1; 152 | display: inline-block; 153 | text-decoration: none; 154 | -webkit-font-smoothing: antialiased; 155 | -moz-osx-font-smoothing: grayscale; 156 | -webkit-user-select: none; 157 | -moz-user-select: none; 158 | -ms-user-select: none; 159 | user-select: none; 160 | } 161 | 162 | .rst-content .headeranchor:before { 163 | content: '\f0c1'; 164 | } 165 | 166 | /* index.md badges */ 167 | @media screen and (max-width: 768px) { 168 | .badge img { 169 | width: auto; 170 | } 171 | } -------------------------------------------------------------------------------- /docs/getting-started/basics.md: -------------------------------------------------------------------------------- 1 | ### Overview 2 | Gor architecture tries to follow UNIX philosophy: everything made of pipes, various inputs multiplexing data to outputs. 3 | 4 | You can [rate limit](/rate-limiting.md), [filter](request-filtering.md), [rewrite](request-rewriting.md) requests or even use your own [middleware](middleware.md) to implement custom logic. Also, it is possible to replay requests at the higher rate for [load testing](saving-and-replaying-from-file.md). 5 | 6 | ### Available inputs 7 | 8 | * `--input-raw` - used to capture HTTP traffic, you should specify IP address or interface and application port. More about [[Capturing and replaying traffic]]. 9 | * `--input-file` - accepts file which previously was recorded using ` --output-file`. More about [[Saving and Replaying from file]] 10 | * `--input-tcp` - used by Gor aggregation instance if you decided forward traffic from multiple forwarder Gor instances to it. Read about using [[Aggregator-forwarder setup]]. 11 | 12 | ### Available outputs 13 | 14 | * `--output-http` - replay HTTP traffic to given endpoint, accepts base url. Read [more about it](Replaying HTTP traffic) 15 | * `--output-file` - records incoming traffic to the file. More about [[Saving and Replaying from file]] 16 | * `--output-tcp` - forward incoming data to another Gor instance, used in conjunction with `--input-tcp`. Read more about [[Aggregator-forwarder setup]]. 17 | * `--output-stdout` - used for debugging, outputs all data to stdout. -------------------------------------------------------------------------------- /docs/getting-started/tutorial.md: -------------------------------------------------------------------------------- 1 | ### Dependencies 2 | To start working with Gor, you need to have a web server running on your machine, and a terminal to run commands. If you are just poking around, you can quickly start the server by calling `gor file-server :8000`, this will start a simple file server of the current directory on port `8000`. 3 | 4 | ### Installing Gor 5 | Download the latest Gor binary from https://github.com/buger/gor/releases (we provide precompiled binaries for Windows, Linux x64 and Mac OS), or you can compile by yourself [[Compilation]]. 6 | 7 | Once the archive is downloaded and uncompressed, you can run Gor from the current directory, or you may want to copy binary to your PATH (for Linux and Mac OS it can be `/usr/local/bin`). 8 | 9 | ### Capturing web traffic 10 | Now run this command in terminal: `sudo ./gor --input-raw :8000 --output-stdout` 11 | 12 | This command says to listen for all network activity happening on port 8000 and log it to stdout. 13 | If you are familiar with `tcpdump`, we are going to implement similar functionality. 14 | 15 | > You may notice that it uses `sudo` and asks for the password: to analyze network, Gor needs permissions which are available only to super users. 16 | > However, it is possible to configure Gor [being run for non-root users](Running-as-non-root-user). 17 | 18 | 19 | Make a few requests by opening `http://localhost:8000` in your browser, or just by calling curl in terminal `curl http://localhost:8000`. You should see that `gor` outputs all the HTTP requests and responses right to the terminal window where it is running. 20 | 21 | 22 | **Gor is not a proxy:** you do not need to put 3-rd party tool to your critical path. Instead Gor just silently analyzes the traffic of your application and does not affect it anyhow. 23 | 24 | ### Replaying 25 | 26 | Now it's time to replay your original traffic to another environment. Let's start the same file web server but on a different port: `gor file-server :8001`. 27 | 28 | Instead of `--output-stdout` we will use `--output-http` and provide URL of second server: `sudo ./gor --input-raw :8000 --output-http="http://localhost:8001"` 29 | 30 | Make few requests to first server. You should see them replicated to the second one, voila! 31 | 32 | ### Saving requests to file and replaying them later 33 | Sometimes it's not possible to replay requests in real time; Gor allows you to save requests to the file and replay them later. 34 | 35 | First use `--output-file` to save them: `sudo ./gor --input-raw :8000 --output-file=requests.gor`. This will create new file and continuously write all captured requests to it. 36 | 37 | Let's re-run Gor, but now to replay requests from file: `./gor --input-file requests.gor --output-http="http://localhost:8001"`. You should see all the recorded requests coming to the second server, and they will be replayed in the same order and with exactly same timing as they were recorded. 38 | 39 | Next: [[The Basics]] 40 | 41 | ### Watch an overview: 42 | 43 | 44 | ![YOUTUBE](https://www.youtube.com/watch?v=CxuKZcMKaW4) -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | Gor is an open-source tool for capturing and replaying live HTTP traffic into a test environment in order to continuously test your system with real data. It can be used to increase confidence in code deployments, configuration changes and infrastructure changes. 2 | 3 | Read for more info: 4 | 5 | * [[Getting Started]] 6 | * [[The Basics]] 7 | * [[Capturing and replaying traffic]] 8 | * [[Replaying HTTP traffic]] 9 | * [[[PRO] Replaying Binary protocols]] 10 | * [[[PRO] Recording and replaying keep alive TCP sessions]] 11 | * [[Saving and Replaying from file]] 12 | * [Performance testing](https://github.com/buger/gor/wiki/Saving-and-Replaying-from-file#performance-testing) 13 | * [[Rate limiting]] 14 | * [[Request filtering]] 15 | * [[Request rewriting]] 16 | * [[Middleware]] 17 | * [[Distributed configuration]] 18 | * [[Exporting to ElasticSearch]] 19 | * [[FAQ]] 20 | * [[Troubleshooting]] 21 | 22 | ## Commercial Aspects 23 | 24 | * [[Commercial Support]] 25 | * [[Commercial FAQ]] 26 | * [[Commercial collaboration]] 27 | 28 | 29 | Next: [Getting Started](Getting-Started) 30 | -------------------------------------------------------------------------------- /docs/js/base.js: -------------------------------------------------------------------------------- 1 | $(document).ready(function () { 2 | $('img[alt="YOUTUBE"]').each(function () { 3 | var id = $(this).attr('src').split('/')[$(this).attr('src').split('/').length - 1].split("=")[1]; 4 | var video = ''; 5 | $(this).replaceWith(video); 6 | }); 7 | }); -------------------------------------------------------------------------------- /docs/pro/recording-and-replaying-keep-alive-tcp-sessions.md: -------------------------------------------------------------------------------- 1 | > **This feature available only in PRO version. See https://goreplay.org/pro.html for details.** 2 | 3 | By default, GoReplay does not guarantee that when you record keep-alive TCP session, it will be replayed in the same TCP connection as well. This is ok for most of the cases, but it does not give an accurate number of TCP sessions while replaying, also may cause issues if your application state depends on TCP session (do not mess with HTTP session). 4 | 5 | [GoReplay PRO](https://goreplay.org/pro.html) extension adds support for accurate recording and replaying of keep-alive TCP sessions. Separate connection to your server is created per original session and it makes benchmarks and tests incredibly accurate. To enable session recognition you just need to pass `--recognize-tcp-sessions` option. 6 | 7 | ``` 8 | gor --input-raw :80 --recognize-tcp-sessions --output-http http://test.target 9 | ``` 10 | 11 | Note that enabling this option also change algorithm of distributing traffic when using `--split-output`, see [Distributed configuration]. -------------------------------------------------------------------------------- /docs/pro/replaying-binary-protocols.md: -------------------------------------------------------------------------------- 1 | > **This feature available only in PRO version. See https://goreplay.org/pro.html for details.** 2 | 3 | Gor includes basic support for working with binary formats like `thrift` or `protocol-buffers`. To start set `--input-raw-protocol` to 'binary' (by default 'http'). For replaying, you should use `--output-binary`, example: 4 | 5 | ``` 6 | gor --input-raw :80 --input-raw-protocol binary --output-binary staging:8081 7 | ``` 8 | 9 | While working with `--input-raw` you may notice a 2-second delay before messages are emitted to the outputs. This behaviour is expected and happening because for general binary protocol it is impossible to know when TCP message ends, so Gor has to set inactivity timeout. Each protocol has own rules (for example write message length as first bytes), and require individual handling to know message length. We consider improving detailed protocol support for `thrift`, `protocol-buffer` and etc. 10 | 11 | Note that you can use all load testing features for binary protocols. For example, the following command will loop and replay recorded payload on 10x speed for 30 seconds: 12 | ``` 13 | gor --input-file './binary*.gor|1000%' --output-binary staging:9091 --input-file-loop --exit-after 30s 14 | ``` -------------------------------------------------------------------------------- /elasticsearch_test.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | const expectedIndex = "gor" 8 | 9 | func assertExpectedGorIndex(index string, t *testing.T) { 10 | if expectedIndex != index { 11 | t.Fatalf("Expected index %s but got %s", expectedIndex, index) 12 | } 13 | } 14 | 15 | func assertExpectedIndex(expectedIndex string, index string, t *testing.T) { 16 | if expectedIndex != index { 17 | t.Fatalf("Expected index %s but got %s", expectedIndex, index) 18 | } 19 | } 20 | 21 | func assertExpectedError(returnedError error, t *testing.T) { 22 | expectedError := new(ESUriErorr) 23 | 24 | if expectedError != returnedError { 25 | t.Errorf("Expected err %s but got %s", expectedError, returnedError) 26 | } 27 | } 28 | 29 | func assertNoError(returnedError error, t *testing.T) { 30 | if nil != returnedError { 31 | t.Errorf("Expected no err but got %s", returnedError) 32 | } 33 | } 34 | 35 | // Argument host:port/index_name 36 | // i.e : localhost:9200/gor 37 | // Fail because scheme is mandatory 38 | func TestElasticConnectionBuildFailWithoutScheme(t *testing.T) { 39 | uri := "localhost:9200/" + expectedIndex 40 | 41 | err, _ := parseURI(uri) 42 | assertExpectedError(err, t) 43 | } 44 | 45 | // Argument scheme://Host:port 46 | // i.e : http://localhost:9200 47 | // Fail : explicit index is required 48 | func TestElasticConnectionBuildFailWithoutIndex(t *testing.T) { 49 | uri := "http://localhost:9200" 50 | 51 | err, index := parseURI(uri) 52 | 53 | assertExpectedIndex("", index, t) 54 | 55 | assertExpectedError(err, t) 56 | } 57 | 58 | // Argument scheme://Host/index_name 59 | // i.e : http://localhost/gor 60 | func TestElasticConnectionBuildFailWithoutPort(t *testing.T) { 61 | uri := "http://localhost/" + expectedIndex 62 | 63 | err, index := parseURI(uri) 64 | 65 | assertNoError(err, t) 66 | 67 | assertExpectedGorIndex(index, t) 68 | } 69 | 70 | // Argument scheme://Host:port/index_name 71 | // i.e : http://localhost:9200/gor 72 | func TestElasticLocalConnectionBuild(t *testing.T) { 73 | uri := "http://localhost:9200/" + expectedIndex 74 | 75 | err, index := parseURI(uri) 76 | 77 | assertNoError(err, t) 78 | 79 | assertExpectedGorIndex(index, t) 80 | } 81 | 82 | // Argument scheme://Host:port/index_name 83 | // i.e : http://localhost.local:9200/gor or https://localhost.local:9200/gor 84 | func TestElasticSimpleLocalWithSchemeConnectionBuild(t *testing.T) { 85 | uri := "http://localhost.local:9200/" + expectedIndex 86 | 87 | err, index := parseURI(uri) 88 | 89 | assertNoError(err, t) 90 | 91 | assertExpectedGorIndex(index, t) 92 | } 93 | 94 | // Argument scheme://Host:port/index_name 95 | // i.e : http://localhost.local:9200/gor or https://localhost.local:9200/gor 96 | func TestElasticSimpleLocalWithHTTPSConnectionBuild(t *testing.T) { 97 | uri := "https://localhost.local:9200/" + expectedIndex 98 | 99 | err, index := parseURI(uri) 100 | 101 | assertNoError(err, t) 102 | 103 | assertExpectedGorIndex(index, t) 104 | } 105 | 106 | // Argument scheme://Host:port/index_name 107 | // i.e : localhost.local:9200/pathtoElastic/gor 108 | func TestElasticLongPathConnectionBuild(t *testing.T) { 109 | uri := "http://localhost.local:9200/pathtoElastic/" + expectedIndex 110 | 111 | err, index := parseURI(uri) 112 | 113 | assertNoError(err, t) 114 | 115 | assertExpectedGorIndex(index, t) 116 | } 117 | 118 | // Argument scheme://Host:userinfo@port/index_name 119 | // i.e : http://user:password@localhost.local:9200/gor 120 | func TestElasticBasicAuthConnectionBuild(t *testing.T) { 121 | uri := "http://user:password@localhost.local:9200/" + expectedIndex 122 | 123 | err, index := parseURI(uri) 124 | 125 | assertNoError(err, t) 126 | 127 | assertExpectedGorIndex(index, t) 128 | } 129 | 130 | // Argument scheme://Host:port/path/index_name 131 | // i.e : http://localhost.local:9200/path/gor or https://localhost.local:9200/path/gor 132 | func TestElasticComplexPathConnectionBuild(t *testing.T) { 133 | uri := "http://localhost.local:9200/path/" + expectedIndex 134 | 135 | err, index := parseURI(uri) 136 | 137 | assertNoError(err, t) 138 | 139 | assertExpectedGorIndex(index, t) 140 | } 141 | -------------------------------------------------------------------------------- /emitter.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "fmt" 5 | "github.com/buger/goreplay/internal/byteutils" 6 | "hash/fnv" 7 | "io" 8 | "log" 9 | "sync" 10 | 11 | "github.com/coocood/freecache" 12 | ) 13 | 14 | // Emitter represents an abject to manage plugins communication 15 | type Emitter struct { 16 | sync.WaitGroup 17 | plugins *InOutPlugins 18 | } 19 | 20 | // NewEmitter creates and initializes new Emitter object. 21 | func NewEmitter() *Emitter { 22 | return &Emitter{} 23 | } 24 | 25 | // Start initialize loop for sending data from inputs to outputs 26 | func (e *Emitter) Start(plugins *InOutPlugins, middlewareCmd string) { 27 | if Settings.CopyBufferSize < 1 { 28 | Settings.CopyBufferSize = 5 << 20 29 | } 30 | e.plugins = plugins 31 | 32 | if middlewareCmd != "" { 33 | middleware := NewMiddleware(middlewareCmd) 34 | 35 | for _, in := range plugins.Inputs { 36 | middleware.ReadFrom(in) 37 | } 38 | 39 | e.plugins.Inputs = append(e.plugins.Inputs, middleware) 40 | e.plugins.All = append(e.plugins.All, middleware) 41 | e.Add(1) 42 | go func() { 43 | defer e.Done() 44 | if err := CopyMulty(middleware, plugins.Outputs...); err != nil { 45 | Debug(2, fmt.Sprintf("[EMITTER] error during copy: %q", err)) 46 | } 47 | }() 48 | } else { 49 | for _, in := range plugins.Inputs { 50 | e.Add(1) 51 | go func(in PluginReader) { 52 | defer e.Done() 53 | if err := CopyMulty(in, plugins.Outputs...); err != nil { 54 | Debug(2, fmt.Sprintf("[EMITTER] error during copy: %q", err)) 55 | } 56 | }(in) 57 | } 58 | } 59 | } 60 | 61 | // Close closes all the goroutine and waits for it to finish. 62 | func (e *Emitter) Close() { 63 | for _, p := range e.plugins.All { 64 | if cp, ok := p.(io.Closer); ok { 65 | cp.Close() 66 | } 67 | } 68 | if len(e.plugins.All) > 0 { 69 | // wait for everything to stop 70 | e.Wait() 71 | } 72 | e.plugins.All = nil // avoid Close to make changes again 73 | } 74 | 75 | // CopyMulty copies from 1 reader to multiple writers 76 | func CopyMulty(src PluginReader, writers ...PluginWriter) error { 77 | wIndex := 0 78 | modifier := NewHTTPModifier(&Settings.ModifierConfig) 79 | filteredRequests := freecache.NewCache(200 * 1024 * 1024) // 200M 80 | 81 | for { 82 | msg, err := src.PluginRead() 83 | if err != nil { 84 | if err == ErrorStopped || err == io.EOF { 85 | return nil 86 | } 87 | return err 88 | } 89 | if msg != nil && len(msg.Data) > 0 { 90 | if len(msg.Data) > int(Settings.CopyBufferSize) { 91 | msg.Data = msg.Data[:Settings.CopyBufferSize] 92 | } 93 | meta := payloadMeta(msg.Meta) 94 | if len(meta) < 3 { 95 | Debug(2, fmt.Sprintf("[EMITTER] Found malformed record %q from %q", msg.Meta, src)) 96 | continue 97 | } 98 | requestID := meta[1] 99 | // start a subroutine only when necessary 100 | if Settings.Verbose >= 3 { 101 | Debug(3, "[EMITTER] input: ", byteutils.SliceToString(msg.Meta[:len(msg.Meta)-1]), " from: ", src) 102 | } 103 | if modifier != nil { 104 | Debug(3, "[EMITTER] modifier:", requestID, "from:", src) 105 | if isRequestPayload(msg.Meta) { 106 | msg.Data = modifier.Rewrite(msg.Data) 107 | // If modifier tells to skip request 108 | if len(msg.Data) == 0 { 109 | filteredRequests.Set(requestID, []byte{}, 60) // 110 | continue 111 | } 112 | Debug(3, "[EMITTER] Rewritten input:", requestID, "from:", src) 113 | 114 | } else { 115 | _, err := filteredRequests.Get(requestID) 116 | if err == nil { 117 | filteredRequests.Del(requestID) 118 | continue 119 | } 120 | } 121 | } 122 | 123 | if Settings.PrettifyHTTP { 124 | msg.Data = prettifyHTTP(msg.Data) 125 | if len(msg.Data) == 0 { 126 | continue 127 | } 128 | } 129 | 130 | if Settings.SplitOutput { 131 | if Settings.RecognizeTCPSessions { 132 | if !PRO { 133 | log.Fatal("Detailed TCP sessions work only with PRO license") 134 | } 135 | hasher := fnv.New32a() 136 | hasher.Write(meta[1]) 137 | 138 | wIndex = int(hasher.Sum32()) % len(writers) 139 | if _, err := writers[wIndex].PluginWrite(msg); err != nil { 140 | return err 141 | } 142 | } else { 143 | // Simple round robin 144 | if _, err := writers[wIndex].PluginWrite(msg); err != nil { 145 | return err 146 | } 147 | 148 | wIndex = (wIndex + 1) % len(writers) 149 | } 150 | } else { 151 | for _, dst := range writers { 152 | if _, err := dst.PluginWrite(msg); err != nil && err != io.ErrClosedPipe { 153 | return err 154 | } 155 | } 156 | } 157 | } 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /examples/middleware/echo.clj: -------------------------------------------------------------------------------- 1 | (ns echo.core 2 | (:gen-class) 3 | (:require [clojure.string :as cs] 4 | [clojure.java.io :as io]) 5 | (:import org.apache.commons.codec.binary.Hex 6 | java.io.BufferedReader 7 | java.io.IOException 8 | java.io.InputStreamReader)) 9 | 10 | 11 | (defn transform-http-msg 12 | "Function that transforms/filters the incoming HTTP messages." 13 | [headers body] 14 | ;; do actual transformations here 15 | [headers body]) 16 | 17 | 18 | (defn decode-hex-string 19 | "Decode an Hex-encoded string." 20 | [s] 21 | (String. (Hex/decodeHex (.toCharArray s)))) 22 | 23 | 24 | (defn encode-hex-string 25 | "Encode a string to a hex-encoded string." 26 | [^String s] 27 | (String. (Hex/encodeHex (.getBytes s)))) 28 | 29 | 30 | (defn -main 31 | [& args] 32 | (let [br (BufferedReader. (InputStreamReader. System/in))] 33 | (try 34 | (loop [hex-line (.readLine br)] 35 | (let [decoded-req (decode-hex-string hex-line) 36 | 37 | ;; empty line separates headers from body 38 | http-request (partition-by empty? (cs/split-lines decoded-req)) 39 | headers (first http-request) 40 | 41 | ;; HTTP messages can contain no body: 42 | body (when (= 3 (count http-request)) (last http-request)) 43 | [new-headers new-body] (transform-http-msg headers body)] 44 | 45 | (println (encode-hex-string (str (cs/join "\n" headers) 46 | (when body 47 | (str "\n\n" 48 | (cs/join "\n" body))))))) 49 | (when-let [line (.readLine br)] 50 | (recur line))) 51 | (catch IOException e nil)))) 52 | 53 | 54 | -------------------------------------------------------------------------------- /examples/middleware/echo.java: -------------------------------------------------------------------------------- 1 | import java.io.BufferedReader; 2 | import java.io.IOException; 3 | import java.io.InputStreamReader; 4 | 5 | import org.apache.commons.codec.DecoderException; 6 | import org.apache.commons.codec.binary.Hex; 7 | 8 | 9 | class Echo { 10 | public static String decodeHexString(String s) throws DecoderException { 11 | return new String(Hex.decodeHex(s.toCharArray())); 12 | } 13 | 14 | public static String encodeHexString(String s) { 15 | return new String(Hex.encodeHex(s.getBytes())); 16 | } 17 | 18 | public static String transformHTTPMessage(String req) { 19 | // do actual transformations here 20 | return req; 21 | } 22 | 23 | public static void main(String[] args) throws DecoderException { 24 | if(args != null){ 25 | for(String arg : args){ 26 | System.out.println(arg); 27 | } 28 | 29 | } 30 | 31 | BufferedReader stdin = new BufferedReader(new InputStreamReader( 32 | System.in)); 33 | String line = null; 34 | 35 | try { 36 | while ((line = stdin.readLine()) != null) { 37 | String decodedLine = decodeHexString(line); 38 | 39 | String transformedLine = transformHTTPMessage(decodedLine); 40 | 41 | String encodedLine = encodeHexString(transformedLine); 42 | System.out.println(encodedLine); 43 | 44 | } 45 | } catch (IOException e) { 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /examples/middleware/echo.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | const readline = require("readline"); 3 | const StringDecoder = require("string_decoder").StringDecoder 4 | 5 | const rl = readline.createInterface({ 6 | input: process.stdin 7 | }); 8 | 9 | var ignoreIds = new Set(); 10 | var ignoreAddresses = "/api"; 11 | const decoder = new StringDecoder("utf8"); 12 | 13 | function convertHexString(hex) { 14 | var bytes = []; 15 | for (var i = 0; i < hex.length - 1; i += 2) { 16 | bytes.push(parseInt(hex.substr(i, 2), 16)); 17 | } 18 | return decoder.write(Buffer.from(bytes)); 19 | } 20 | 21 | function log(output) { 22 | console.error("==================="); 23 | console.error(output); 24 | } 25 | 26 | function shouldOutputLine(request) { 27 | const components = request.split("\n"); 28 | const header = components[0].split(" "); 29 | const type = parseInt(header[0]); 30 | const tag = header[1]; 31 | 32 | if (type === 3) { 33 | return true; 34 | } 35 | if (type === 1) { 36 | // Check if it's oauth 37 | const endpoint = components[1].split(" ")[1]; 38 | if (!endpoint.startsWith(ignoreAddresses)) { 39 | ignoreIds.add(tag); 40 | return false; 41 | } 42 | } else if (type === 2) { 43 | if (ignoreIds.has(tag)) { 44 | ignoreIds.delete(tag); 45 | return false; 46 | } 47 | } 48 | return true; 49 | } 50 | 51 | rl.on("line", (input) => { 52 | const str = convertHexString(input); 53 | console.log(input); 54 | if (shouldOutputLine(str)) { 55 | log(str); 56 | } 57 | }); 58 | -------------------------------------------------------------------------------- /examples/middleware/echo.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | import sys 5 | import fileinput 6 | import binascii 7 | 8 | # Used to find end of the Headers section 9 | EMPTY_LINE = b'\r\n\r\n' 10 | 11 | 12 | def log(msg): 13 | """ 14 | Logging to STDERR as STDOUT and STDIN used for data transfer 15 | @type msg: str or byte string 16 | @param msg: Message to log to STDERR 17 | """ 18 | try: 19 | msg = str(msg) + '\n' 20 | except: 21 | pass 22 | sys.stderr.write(msg) 23 | sys.stderr.flush() 24 | 25 | 26 | def find_end_of_headers(byte_data): 27 | """ 28 | Finds where the header portion ends and the content portion begins. 29 | @type byte_data: str or byte string 30 | @param byte_data: Hex decoded req or resp string 31 | """ 32 | return byte_data.index(EMPTY_LINE) + 4 33 | 34 | 35 | def process_stdin(): 36 | """ 37 | Process STDIN and output to STDOUT 38 | """ 39 | for raw_line in fileinput.input(): 40 | 41 | line = raw_line.rstrip() 42 | 43 | # Decode base64 encoded line 44 | decoded = bytes.fromhex(line) 45 | 46 | # Split into metadata and payload, the payload is headers + body 47 | (raw_metadata, payload) = decoded.split(b'\n', 1) 48 | 49 | # Split into headers and payload 50 | headers_pos = find_end_of_headers(payload) 51 | raw_headers = payload[:headers_pos] 52 | raw_content = payload[headers_pos:] 53 | 54 | log('===================================') 55 | request_type_id = int(raw_metadata.split(b' ')[0]) 56 | log('Request type: {}'.format({ 57 | 1: 'Request', 58 | 2: 'Original Response', 59 | 3: 'Replayed Response' 60 | }[request_type_id])) 61 | log('===================================') 62 | 63 | log('Original data:') 64 | log(line) 65 | 66 | log('Decoded request:') 67 | log(decoded) 68 | 69 | encoded = binascii.hexlify(raw_metadata + b'\n' + raw_headers + raw_content).decode('ascii') 70 | log('Encoded data:') 71 | log(encoded) 72 | 73 | sys.stdout.write(encoded + '\n') 74 | 75 | if __name__ == '__main__': 76 | process_stdin() 77 | -------------------------------------------------------------------------------- /examples/middleware/echo.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # encoding: utf-8 3 | while data = STDIN.gets # continuously read line from STDIN 4 | next unless data 5 | data = data.chomp # remove end of line symbol 6 | 7 | decoded = [data].pack("H*") # decode base64 encoded request 8 | 9 | # decoded value is raw HTTP payload, example: 10 | # 11 | # POST /post HTTP/1.1 12 | # Content-Length: 7 13 | # Host: www.w3.org 14 | # 15 | # a=1&b=2" 16 | 17 | encoded = decoded.unpack("H*").first # encoding back to base64 18 | 19 | # Emit request back 20 | # You can skip this if want to filter out request 21 | STDOUT.puts encoded 22 | end 23 | -------------------------------------------------------------------------------- /examples/middleware/echo.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # `xxd` utility included into vim-common package 4 | # It allow hex decoding/encoding 5 | # 6 | # This example may broke if you request contains `null` string, you may consider using pipes instead. 7 | # See: https://github.com/buger/gor/issues/309 8 | # 9 | 10 | function log { 11 | if [[ -n "$GOR_TEST" ]]; then # if we are not testing 12 | # Logging to stderr, because stdout/stdin used for data transfer 13 | >&2 echo "[DEBUG][ECHO] $1" 14 | fi 15 | } 16 | 17 | while read line; do 18 | decoded=$(echo -e "$line" | xxd -r -p) 19 | 20 | header=$(echo -e "$decoded" | head -n +1) 21 | payload=$(echo -e "$decoded" | tail -n +2) 22 | 23 | encoded=$(echo -e "$header\n$payload" | xxd -p | tr -d "\\n") 24 | 25 | log "" 26 | log "===================================" 27 | 28 | case ${header:0:1} in 29 | "1") 30 | log "Request type: Request" 31 | ;; 32 | "2") 33 | log "Request type: Original Response" 34 | ;; 35 | "3") 36 | log "Request type: Replayed Response" 37 | ;; 38 | *) 39 | log "Unknown request type $header" 40 | esac 41 | echo "$encoded" 42 | 43 | log "===================================" 44 | 45 | log "Original data: $line" 46 | log "Decoded request: $decoded" 47 | log "Encoded data: $encoded" 48 | done; 49 | -------------------------------------------------------------------------------- /examples/middleware/token_modifier.go: -------------------------------------------------------------------------------- 1 | /* 2 | This middleware made for auth system that randomly generate access tokens, which used later for accessing secure content. Since there is no pre-defined token value, naive approach without middleware (or if middleware use only request payloads) will fail, because replayed server have own tokens, not synced with origin. To fix this, our middleware should take in account responses of replayed and origin server, store `originalToken -> replayedToken` aliases and rewrite all requests using this token to use replayed alias. See `middleware_test.go#TestTokenMiddleware` test for examples of using this middleware. 3 | 4 | How middleware works: 5 | 6 | Original request +--------------+ 7 | +-------------+----------STDIN---------->+ | 8 | | Gor input | | Middleware | 9 | +-------------+----------STDIN---------->+ | 10 | Original response +------+---+---+ 11 | | ^ 12 | +-------------+ Modified request v | 13 | | Gor output +<---------STDOUT-----------------+ | 14 | +-----+-------+ | 15 | | | 16 | | Replayed response | 17 | +------------------STDIN----------------->----+ 18 | */ 19 | 20 | package main 21 | 22 | import ( 23 | "bufio" 24 | "bytes" 25 | "encoding/hex" 26 | "fmt" 27 | "github.com/buger/goreplay/proto" 28 | "os" 29 | ) 30 | 31 | // requestID -> originalToken 32 | var originalTokens map[string][]byte 33 | 34 | // originalToken -> replayedToken 35 | var tokenAliases map[string][]byte 36 | 37 | func main() { 38 | originalTokens = make(map[string][]byte) 39 | tokenAliases = make(map[string][]byte) 40 | 41 | scanner := bufio.NewScanner(os.Stdin) 42 | 43 | for scanner.Scan() { 44 | encoded := scanner.Bytes() 45 | buf := make([]byte, len(encoded)/2) 46 | hex.Decode(buf, encoded) 47 | 48 | process(buf) 49 | } 50 | } 51 | 52 | func process(buf []byte) { 53 | // First byte indicate payload type, possible values: 54 | // 1 - Request 55 | // 2 - Response 56 | // 3 - ReplayedResponse 57 | payloadType := buf[0] 58 | headerSize := bytes.IndexByte(buf, '\n') + 1 59 | header := buf[:headerSize-1] 60 | 61 | // Header contains space separated values of: request type, request id, and request start time (or round-trip time for responses) 62 | meta := bytes.Split(header, []byte(" ")) 63 | // For each request you should receive 3 payloads (request, response, replayed response) with same request id 64 | reqID := string(meta[1]) 65 | payload := buf[headerSize:] 66 | 67 | Debug("Received payload:", string(buf)) 68 | 69 | switch payloadType { 70 | case '1': // Request 71 | if bytes.Equal(proto.Path(payload), []byte("/token")) { 72 | originalTokens[reqID] = []byte{} 73 | Debug("Found token request:", reqID) 74 | } else { 75 | token, vs, _ := proto.PathParam(payload, []byte("token")) 76 | 77 | if vs != -1 { // If there is GET token param 78 | if alias, ok := tokenAliases[string(token)]; ok { 79 | // Rewrite original token to alias 80 | payload = proto.SetPathParam(payload, []byte("token"), alias) 81 | 82 | // Copy modified payload to our buffer 83 | buf = append(buf[:headerSize], payload...) 84 | } 85 | } 86 | } 87 | 88 | // Emitting data back 89 | os.Stdout.Write(encode(buf)) 90 | case '2': // Original response 91 | if _, ok := originalTokens[reqID]; ok { 92 | // Token is inside response body 93 | secureToken := proto.Body(payload) 94 | originalTokens[reqID] = secureToken 95 | Debug("Remember origial token:", string(secureToken)) 96 | } 97 | case '3': // Replayed response 98 | if originalToken, ok := originalTokens[reqID]; ok { 99 | delete(originalTokens, reqID) 100 | secureToken := proto.Body(payload) 101 | tokenAliases[string(originalToken)] = secureToken 102 | 103 | Debug("Create alias for new token token, was:", string(originalToken), "now:", string(secureToken)) 104 | } 105 | } 106 | } 107 | 108 | func encode(buf []byte) []byte { 109 | dst := make([]byte, len(buf)*2+1) 110 | hex.Encode(dst, buf) 111 | dst[len(dst)-1] = '\n' 112 | 113 | return dst 114 | } 115 | 116 | func Debug(args ...interface{}) { 117 | if os.Getenv("GOR_TEST") == "" { // if we are not testing 118 | fmt.Fprint(os.Stderr, "[DEBUG][TOKEN-MOD] ") 119 | fmt.Fprintln(os.Stderr, args...) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/buger/goreplay 2 | 3 | go 1.21 4 | 5 | require ( 6 | github.com/Shopify/sarama v1.38.1 7 | github.com/aws/aws-sdk-go v1.44.262 8 | github.com/coocood/freecache v1.2.3 9 | github.com/google/gopacket v1.1.20-0.20210429153827-3eaba0894325 10 | github.com/gorilla/websocket v1.5.0 11 | github.com/klauspost/compress v1.16.5 // indirect 12 | github.com/mattbaird/elastigo v0.0.0-20170123220020-2fe47fd29e4b 13 | github.com/stretchr/testify v1.8.2 14 | github.com/xdg-go/scram v1.1.2 15 | golang.org/x/net v0.34.0 16 | golang.org/x/sys v0.29.0 17 | k8s.io/apimachinery v0.27.1 18 | k8s.io/client-go v0.27.1 19 | ) 20 | 21 | require ( 22 | github.com/araddon/gou v0.0.0-20211019181548-e7d08105776c // indirect 23 | github.com/bitly/go-hostpool v0.1.0 // indirect 24 | github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect 25 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 26 | github.com/davecgh/go-spew v1.1.1 // indirect 27 | github.com/eapache/go-resiliency v1.3.0 // indirect 28 | github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect 29 | github.com/eapache/queue v1.1.0 // indirect 30 | github.com/emicklei/go-restful/v3 v3.10.2 // indirect 31 | github.com/go-logr/logr v1.2.4 // indirect 32 | github.com/go-openapi/jsonpointer v0.19.6 // indirect 33 | github.com/go-openapi/jsonreference v0.20.2 // indirect 34 | github.com/go-openapi/swag v0.22.3 // indirect 35 | github.com/gogo/protobuf v1.3.2 // indirect 36 | github.com/golang/protobuf v1.5.3 // indirect 37 | github.com/golang/snappy v0.0.4 // indirect 38 | github.com/google/gnostic v0.6.9 // indirect 39 | github.com/google/go-cmp v0.5.9 // indirect 40 | github.com/google/gofuzz v1.2.0 // indirect 41 | github.com/google/uuid v1.3.0 // indirect 42 | github.com/hashicorp/errwrap v1.1.0 // indirect 43 | github.com/hashicorp/go-multierror v1.1.1 // indirect 44 | github.com/hashicorp/go-uuid v1.0.3 // indirect 45 | github.com/jcmturner/aescts/v2 v2.0.0 // indirect 46 | github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect 47 | github.com/jcmturner/gofork v1.7.6 // indirect 48 | github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect 49 | github.com/jcmturner/rpc/v2 v2.0.3 // indirect 50 | github.com/jmespath/go-jmespath v0.4.0 // indirect 51 | github.com/josharian/intern v1.0.0 // indirect 52 | github.com/json-iterator/go v1.1.12 // indirect 53 | github.com/mailru/easyjson v0.7.7 // indirect 54 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 55 | github.com/modern-go/reflect2 v1.0.2 // indirect 56 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 57 | github.com/pierrec/lz4/v4 v4.1.17 // indirect 58 | github.com/pmezard/go-difflib v1.0.0 // indirect 59 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect 60 | github.com/smartystreets/goconvey v1.7.2 // indirect 61 | github.com/xdg-go/pbkdf2 v1.0.0 // indirect 62 | github.com/xdg-go/stringprep v1.0.4 // indirect 63 | golang.org/x/crypto v0.32.0 // indirect 64 | golang.org/x/oauth2 v0.8.0 // indirect 65 | golang.org/x/term v0.28.0 // indirect 66 | golang.org/x/text v0.21.0 // indirect 67 | golang.org/x/time v0.3.0 // indirect 68 | google.golang.org/appengine v1.6.7 // indirect 69 | google.golang.org/protobuf v1.30.0 // indirect 70 | gopkg.in/inf.v0 v0.9.1 // indirect 71 | gopkg.in/yaml.v2 v2.4.0 // indirect 72 | gopkg.in/yaml.v3 v3.0.1 // indirect 73 | k8s.io/api v0.27.1 // indirect 74 | k8s.io/klog/v2 v2.100.1 // indirect 75 | k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect 76 | k8s.io/utils v0.0.0-20230505201702-9f6742963106 // indirect 77 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 78 | sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect 79 | sigs.k8s.io/yaml v1.3.0 // indirect 80 | ) 81 | -------------------------------------------------------------------------------- /gor_stat.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "runtime" 5 | "strconv" 6 | "time" 7 | ) 8 | 9 | type GorStat struct { 10 | statName string 11 | rateMs int 12 | latest int 13 | mean int 14 | max int 15 | count int 16 | } 17 | 18 | func NewGorStat(statName string, rateMs int) (s *GorStat) { 19 | s = new(GorStat) 20 | s.statName = statName 21 | s.rateMs = rateMs 22 | s.latest = 0 23 | s.mean = 0 24 | s.max = 0 25 | s.count = 0 26 | 27 | if Settings.Stats { 28 | go s.reportStats() 29 | } 30 | return 31 | } 32 | 33 | func (s *GorStat) Write(latest int) { 34 | if Settings.Stats { 35 | if latest > s.max { 36 | s.max = latest 37 | } 38 | if latest != 0 { 39 | s.mean = ((s.mean * s.count) + latest) / (s.count + 1) 40 | } 41 | s.latest = latest 42 | s.count = s.count + 1 43 | } 44 | } 45 | 46 | func (s *GorStat) Reset() { 47 | s.latest = 0 48 | s.max = 0 49 | s.mean = 0 50 | s.count = 0 51 | } 52 | 53 | func (s *GorStat) String() string { 54 | return s.statName + ":" + strconv.Itoa(s.latest) + "," + strconv.Itoa(s.mean) + "," + strconv.Itoa(s.max) + "," + strconv.Itoa(s.count) + "," + strconv.Itoa(s.count/(s.rateMs/1000.0)) + "," + strconv.Itoa(runtime.NumGoroutine()) 55 | } 56 | 57 | func (s *GorStat) reportStats() { 58 | Debug(0, "\n", s.statName+":latest,mean,max,count,count/second,gcount") 59 | for { 60 | Debug(0, "\n", s) 61 | s.Reset() 62 | time.Sleep(time.Duration(s.rateMs) * time.Millisecond) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /homebrew/gor.rb: -------------------------------------------------------------------------------- 1 | require "language/go" 2 | 3 | class Gor < Formula 4 | desc "Real-time HTTP traffic replay tool written in Go" 5 | homepage "https://gortool.com" 6 | url "https://github.com/buger/gor/archive/v0.14.0.tar.gz" 7 | sha256 "62260a6f5cabde571b91d5762fba9c47691643df0a58565cbe808854cd064dc8" 8 | head "https://github.com/buger/gor.git" 9 | 10 | bottle do 11 | cellar :any_skip_relocation 12 | sha256 "c382403de70a41b7445920a02051f5e82030704aaaae70cfcd4e8f401cc87f6a" => :el_capitan 13 | sha256 "4b76b3785584897800e87967f1af9510208faefe46f57d7bd6f8b40a7133c19b" => :yosemite 14 | sha256 "d186cb1566d33ab8f78215e69934f49dd96becb1c236905b4502d94399ae1974" => :mavericks 15 | end 16 | 17 | depends_on "go" => :build 18 | 19 | go_resource "github.com/bitly/go-hostpool" do 20 | url "https://github.com/bitly/go-hostpool.git", 21 | :revision => "d0e59c22a56e8dadfed24f74f452cea5a52722d2" 22 | end 23 | 24 | go_resource "github.com/buger/elastigo" do 25 | url "https://github.com/buger/elastigo.git", 26 | :revision => "23fcfd9db0d8be2189a98fdab77a4c90fcc3a1e9" 27 | end 28 | 29 | go_resource "github.com/google/gopacket" do 30 | url "https://github.com/google/gopacket.git", 31 | :revision => "aa09ced736460d76535444c825932a0742975f7d" 32 | end 33 | 34 | def install 35 | ENV["GOPATH"] = buildpath 36 | mkdir_p buildpath/"src/github.com/buger/" 37 | ln_sf buildpath, buildpath/"src/github.com/buger/gor" 38 | Language::Go.stage_deps resources, buildpath/"src" 39 | 40 | system "go", "build", "-o", "#{bin}/gor", "-ldflags", "-X main.VERSION \"#{version}\"" 41 | end 42 | 43 | test do 44 | assert_match version.to_s, shell_output("#{bin}/gor", 1) 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /http_modifier.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "bytes" 5 | "encoding/base64" 6 | "github.com/buger/goreplay/proto" 7 | "hash/fnv" 8 | "strings" 9 | ) 10 | 11 | type HTTPModifier struct { 12 | config *HTTPModifierConfig 13 | } 14 | 15 | func NewHTTPModifier(config *HTTPModifierConfig) *HTTPModifier { 16 | // Optimization to skip modifier completely if we do not need it 17 | if len(config.URLRegexp) == 0 && 18 | len(config.URLNegativeRegexp) == 0 && 19 | len(config.URLRewrite) == 0 && 20 | len(config.HeaderRewrite) == 0 && 21 | len(config.HeaderFilters) == 0 && 22 | len(config.HeaderNegativeFilters) == 0 && 23 | len(config.HeaderBasicAuthFilters) == 0 && 24 | len(config.HeaderHashFilters) == 0 && 25 | len(config.ParamHashFilters) == 0 && 26 | len(config.Params) == 0 && 27 | len(config.Headers) == 0 && 28 | len(config.Methods) == 0 { 29 | return nil 30 | } 31 | 32 | return &HTTPModifier{config: config} 33 | } 34 | 35 | func (m *HTTPModifier) Rewrite(payload []byte) (response []byte) { 36 | if !proto.HasRequestTitle(payload) { 37 | return payload 38 | } 39 | 40 | if len(m.config.Methods) > 0 { 41 | method := proto.Method(payload) 42 | 43 | matched := false 44 | 45 | for _, m := range m.config.Methods { 46 | if bytes.Equal(method, m) { 47 | matched = true 48 | break 49 | } 50 | } 51 | 52 | if !matched { 53 | return 54 | } 55 | } 56 | 57 | if len(m.config.Headers) > 0 { 58 | for _, header := range m.config.Headers { 59 | payload = proto.SetHeader(payload, []byte(header.Name), []byte(header.Value)) 60 | } 61 | } 62 | 63 | if len(m.config.Params) > 0 { 64 | for _, param := range m.config.Params { 65 | payload = proto.SetPathParam(payload, param.Name, param.Value) 66 | } 67 | } 68 | 69 | if len(m.config.URLRegexp) > 0 { 70 | path := proto.Path(payload) 71 | 72 | matched := false 73 | 74 | for _, f := range m.config.URLRegexp { 75 | if f.regexp.Match(path) { 76 | matched = true 77 | break 78 | } 79 | } 80 | 81 | if !matched { 82 | return 83 | } 84 | } 85 | 86 | if len(m.config.URLNegativeRegexp) > 0 { 87 | path := proto.Path(payload) 88 | 89 | for _, f := range m.config.URLNegativeRegexp { 90 | if f.regexp.Match(path) { 91 | return 92 | } 93 | } 94 | } 95 | 96 | if len(m.config.HeaderFilters) > 0 { 97 | for _, f := range m.config.HeaderFilters { 98 | value := proto.Header(payload, f.name) 99 | 100 | if len(value) == 0 { 101 | return 102 | } 103 | 104 | if !f.regexp.Match(value) { 105 | return 106 | } 107 | } 108 | } 109 | 110 | if len(m.config.HeaderNegativeFilters) > 0 { 111 | for _, f := range m.config.HeaderNegativeFilters { 112 | value := proto.Header(payload, f.name) 113 | 114 | if len(value) > 0 && f.regexp.Match(value) { 115 | return 116 | } 117 | } 118 | } 119 | 120 | if len(m.config.HeaderBasicAuthFilters) > 0 { 121 | for _, f := range m.config.HeaderBasicAuthFilters { 122 | value := proto.Header(payload, []byte("Authorization")) 123 | 124 | if len(value) > 0 { 125 | valueString := string(value) 126 | trimmedBasicAuthEncoded := strings.TrimPrefix(valueString, "Basic ") 127 | if strings.Compare(valueString, trimmedBasicAuthEncoded) != 0 { 128 | decodedAuth, _ := base64.StdEncoding.DecodeString(trimmedBasicAuthEncoded) 129 | if !f.regexp.Match(decodedAuth) { 130 | return 131 | } 132 | } 133 | } 134 | } 135 | } 136 | 137 | if len(m.config.HeaderHashFilters) > 0 { 138 | for _, f := range m.config.HeaderHashFilters { 139 | value := proto.Header(payload, f.name) 140 | 141 | if len(value) > 0 { 142 | hasher := fnv.New32a() 143 | hasher.Write(value) 144 | 145 | if (hasher.Sum32() % 100) >= f.percent { 146 | return 147 | } 148 | } 149 | } 150 | } 151 | 152 | if len(m.config.ParamHashFilters) > 0 { 153 | for _, f := range m.config.ParamHashFilters { 154 | value, s, _ := proto.PathParam(payload, f.name) 155 | 156 | if s != -1 { 157 | hasher := fnv.New32a() 158 | hasher.Write(value) 159 | 160 | if (hasher.Sum32() % 100) >= f.percent { 161 | return 162 | } 163 | } 164 | } 165 | } 166 | 167 | if len(m.config.URLRewrite) > 0 { 168 | path := proto.Path(payload) 169 | 170 | for _, f := range m.config.URLRewrite { 171 | if f.src.Match(path) { 172 | path = f.src.ReplaceAll(path, f.target) 173 | payload = proto.SetPath(payload, path) 174 | 175 | break 176 | } 177 | } 178 | } 179 | 180 | if len(m.config.HeaderRewrite) > 0 { 181 | for _, f := range m.config.HeaderRewrite { 182 | value := proto.Header(payload, f.header) 183 | if len(value) == 0 { 184 | break 185 | } 186 | 187 | if f.src.Match(value) { 188 | newValue := f.src.ReplaceAll(value, f.target) 189 | payload = proto.SetHeader(payload, f.header, newValue) 190 | } 191 | } 192 | } 193 | 194 | return payload 195 | } 196 | -------------------------------------------------------------------------------- /http_modifier_settings_test.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestHTTPHeaderFilters(t *testing.T) { 8 | filters := HTTPHeaderFilters{} 9 | 10 | err := filters.Set("Header1:^$") 11 | if err != nil { 12 | t.Error("Should not error on Header1:^$") 13 | } 14 | 15 | err = filters.Set("Header2:^:$") 16 | if err != nil { 17 | t.Error("Should not error on Header2:^:$") 18 | } 19 | 20 | // Missing colon 21 | err = filters.Set("Header3-^$") 22 | if err == nil { 23 | t.Error("Should error on Header2:^:$") 24 | } 25 | } 26 | 27 | func TestHTTPHashFilters(t *testing.T) { 28 | filters := HTTPHashFilters{} 29 | 30 | err := filters.Set("Header1:1/2") 31 | if err != nil { 32 | t.Error("Should support old syntax") 33 | } 34 | 35 | if filters[0].percent != 50 { 36 | t.Error("Wrong percentage", filters[0].percent) 37 | } 38 | 39 | err = filters.Set("Header2:1") 40 | if err == nil { 41 | t.Error("Should error on Header2 because no % symbol") 42 | } 43 | 44 | err = filters.Set("Header2:10%") 45 | if err != nil { 46 | t.Error("Should pass") 47 | } 48 | 49 | if filters[1].percent != 10 { 50 | t.Error("Wrong percentage", filters[1].percent) 51 | } 52 | } 53 | 54 | func TestUrlRewriteMap(t *testing.T) { 55 | var err error 56 | rewrites := URLRewriteMap{} 57 | 58 | if err = rewrites.Set("/v1/user/([^\\/]+)/ping:/v2/user/$1/ping"); err != nil { 59 | t.Error("Should set mapping", err) 60 | } 61 | 62 | if err = rewrites.Set("/v1/user/([^\\/]+)/ping"); err == nil { 63 | t.Error("Should not set mapping without :") 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /http_prettifier.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "fmt" 7 | "github.com/buger/goreplay/proto" 8 | "io/ioutil" 9 | "net/http/httputil" 10 | "strconv" 11 | ) 12 | 13 | func prettifyHTTP(p []byte) []byte { 14 | 15 | tEnc := bytes.Equal(proto.Header(p, []byte("Transfer-Encoding")), []byte("chunked")) 16 | cEnc := bytes.Equal(proto.Header(p, []byte("Content-Encoding")), []byte("gzip")) 17 | 18 | if !(tEnc || cEnc) { 19 | return p 20 | } 21 | 22 | headersPos := proto.MIMEHeadersEndPos(p) 23 | 24 | if headersPos < 5 || headersPos > len(p) { 25 | return p 26 | } 27 | 28 | headers := p[:headersPos] 29 | content := p[headersPos:] 30 | 31 | if tEnc { 32 | buf := bytes.NewReader(content) 33 | r := httputil.NewChunkedReader(buf) 34 | content, _ = ioutil.ReadAll(r) 35 | 36 | headers = proto.DeleteHeader(headers, []byte("Transfer-Encoding")) 37 | 38 | newLen := strconv.Itoa(len(content)) 39 | headers = proto.SetHeader(headers, []byte("Content-Length"), []byte(newLen)) 40 | } 41 | 42 | if cEnc { 43 | buf := bytes.NewReader(content) 44 | g, err := gzip.NewReader(buf) 45 | 46 | if err != nil { 47 | Debug(1, "[Prettifier] GZIP encoding error:", err) 48 | return []byte{} 49 | } 50 | 51 | content, err = ioutil.ReadAll(g) 52 | if err != nil { 53 | Debug(1, fmt.Sprintf("[HTTP-PRETTIFIER] %q", err)) 54 | return p 55 | } 56 | 57 | headers = proto.DeleteHeader(headers, []byte("Content-Encoding")) 58 | 59 | newLen := strconv.Itoa(len(content)) 60 | headers = proto.SetHeader(headers, []byte("Content-Length"), []byte(newLen)) 61 | } 62 | 63 | newPayload := append(headers, content...) 64 | 65 | return newPayload 66 | } 67 | -------------------------------------------------------------------------------- /http_prettifier_test.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "github.com/buger/goreplay/proto" 7 | "strconv" 8 | "testing" 9 | ) 10 | 11 | func TestHTTPPrettifierGzip(t *testing.T) { 12 | b := bytes.NewBufferString("") 13 | w := gzip.NewWriter(b) 14 | w.Write([]byte("test")) 15 | w.Close() 16 | 17 | size := strconv.Itoa(len(b.Bytes())) 18 | 19 | payload := []byte("HTTP/1.1 200 OK\r\nContent-Length: " + size + "\r\nContent-Encoding: gzip\r\n\r\n") 20 | payload = append(payload, b.Bytes()...) 21 | 22 | newPayload := prettifyHTTP(payload) 23 | 24 | if string(newPayload) != "HTTP/1.1 200 OK\r\nContent-Length: 4\r\n\r\ntest" { 25 | t.Errorf("Payload not match %q", string(newPayload)) 26 | } 27 | } 28 | 29 | func TestHTTPPrettifierChunked(t *testing.T) { 30 | payload := []byte("POST / HTTP/1.1\r\nHost: www.w3.org\r\nTransfer-Encoding: chunked\r\n\r\n4\r\nWiki\r\n5\r\npedia\r\ne\r\n in\r\n\r\nchunks.\r\n0\r\n\r\n") 31 | 32 | payload = prettifyHTTP(payload) 33 | if string(proto.Header(payload, []byte("Content-Length"))) != "23" { 34 | t.Errorf("payload should have content length of 23") 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /input_dummy.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | // DummyInput used for debugging. It generate 1 "GET /"" request per second. 8 | type DummyInput struct { 9 | data chan []byte 10 | quit chan struct{} 11 | } 12 | 13 | // NewDummyInput constructor for DummyInput 14 | func NewDummyInput(options string) (di *DummyInput) { 15 | di = new(DummyInput) 16 | di.data = make(chan []byte) 17 | di.quit = make(chan struct{}) 18 | 19 | go di.emit() 20 | 21 | return 22 | } 23 | 24 | // PluginRead reads message from this plugin 25 | func (i *DummyInput) PluginRead() (*Message, error) { 26 | var msg Message 27 | select { 28 | case <-i.quit: 29 | return nil, ErrorStopped 30 | case buf := <-i.data: 31 | msg.Meta, msg.Data = payloadMetaWithBody(buf) 32 | return &msg, nil 33 | } 34 | } 35 | 36 | func (i *DummyInput) emit() { 37 | ticker := time.NewTicker(time.Second) 38 | 39 | for range ticker.C { 40 | uuid := uuid() 41 | reqh := payloadHeader(RequestPayload, uuid, time.Now().UnixNano(), -1) 42 | i.data <- append(reqh, []byte("GET / HTTP/1.1\r\nHost: www.w3.org\r\nUser-Agent: Go 1.1 package http\r\nAccept-Encoding: gzip\r\n\r\n")...) 43 | 44 | resh := payloadHeader(ResponsePayload, uuid, time.Now().UnixNano()+1, 1) 45 | i.data <- append(resh, []byte("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")...) 46 | } 47 | } 48 | 49 | func (i *DummyInput) String() string { 50 | return "Dummy Input" 51 | } 52 | 53 | // Close closes this plugins 54 | func (i *DummyInput) Close() error { 55 | close(i.quit) 56 | return nil 57 | } 58 | -------------------------------------------------------------------------------- /input_http.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "log" 5 | "net" 6 | "net/http" 7 | "net/http/httputil" 8 | "time" 9 | ) 10 | 11 | // HTTPInput used for sending requests to Gor via http 12 | type HTTPInput struct { 13 | data chan []byte 14 | address string 15 | listener net.Listener 16 | stop chan bool // Channel used only to indicate goroutine should shutdown 17 | } 18 | 19 | // NewHTTPInput constructor for HTTPInput. Accepts address with port which it will listen on. 20 | func NewHTTPInput(address string) (i *HTTPInput) { 21 | i = new(HTTPInput) 22 | i.data = make(chan []byte, 1000) 23 | i.stop = make(chan bool) 24 | 25 | i.listen(address) 26 | 27 | return 28 | } 29 | 30 | // PluginRead reads message from this plugin 31 | func (i *HTTPInput) PluginRead() (*Message, error) { 32 | var msg Message 33 | select { 34 | case <-i.stop: 35 | return nil, ErrorStopped 36 | case buf := <-i.data: 37 | msg.Data = buf 38 | msg.Meta = payloadHeader(RequestPayload, uuid(), time.Now().UnixNano(), -1) 39 | return &msg, nil 40 | } 41 | } 42 | 43 | // Close closes this plugin 44 | func (i *HTTPInput) Close() error { 45 | close(i.stop) 46 | return nil 47 | } 48 | 49 | func (i *HTTPInput) handler(w http.ResponseWriter, r *http.Request) { 50 | r.URL.Scheme = "http" 51 | r.URL.Host = i.address 52 | 53 | buf, _ := httputil.DumpRequestOut(r, true) 54 | http.Error(w, http.StatusText(200), 200) 55 | i.data <- buf 56 | } 57 | 58 | func (i *HTTPInput) listen(address string) { 59 | var err error 60 | 61 | mux := http.NewServeMux() 62 | 63 | mux.HandleFunc("/", i.handler) 64 | 65 | i.listener, err = net.Listen("tcp", address) 66 | if err != nil { 67 | log.Fatal("HTTP input listener failure:", err) 68 | } 69 | i.address = i.listener.Addr().String() 70 | 71 | go func() { 72 | err = http.Serve(i.listener, mux) 73 | if err != nil && err != http.ErrServerClosed { 74 | log.Fatal("HTTP input serve failure ", err) 75 | } 76 | }() 77 | } 78 | 79 | func (i *HTTPInput) String() string { 80 | return "HTTP input: " + i.address 81 | } 82 | -------------------------------------------------------------------------------- /input_http_test.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "bytes" 5 | "net/http" 6 | "strings" 7 | "sync" 8 | "testing" 9 | "time" 10 | ) 11 | 12 | func TestHTTPInput(t *testing.T) { 13 | wg := new(sync.WaitGroup) 14 | 15 | input := NewHTTPInput("127.0.0.1:0") 16 | time.Sleep(time.Millisecond) 17 | output := NewTestOutput(func(*Message) { 18 | wg.Done() 19 | }) 20 | 21 | plugins := &InOutPlugins{ 22 | Inputs: []PluginReader{input}, 23 | Outputs: []PluginWriter{output}, 24 | } 25 | plugins.All = append(plugins.All, input, output) 26 | 27 | emitter := NewEmitter() 28 | go emitter.Start(plugins, Settings.Middleware) 29 | 30 | address := strings.Replace(input.address, "[::]", "127.0.0.1", -1) 31 | 32 | for i := 0; i < 100; i++ { 33 | wg.Add(1) 34 | http.Get("http://" + address + "/") 35 | } 36 | 37 | wg.Wait() 38 | emitter.Close() 39 | } 40 | 41 | func TestInputHTTPLargePayload(t *testing.T) { 42 | wg := new(sync.WaitGroup) 43 | const n = 10 << 20 // 10MB 44 | var large [n]byte 45 | large[n-1] = '0' 46 | 47 | input := NewHTTPInput("127.0.0.1:0") 48 | output := NewTestOutput(func(msg *Message) { 49 | _len := len(msg.Data) 50 | if _len >= n { // considering http body CRLF 51 | t.Errorf("expected body to be >= %d", n) 52 | } 53 | wg.Done() 54 | }) 55 | plugins := &InOutPlugins{ 56 | Inputs: []PluginReader{input}, 57 | Outputs: []PluginWriter{output}, 58 | } 59 | plugins.All = append(plugins.All, input, output) 60 | 61 | emitter := NewEmitter() 62 | defer emitter.Close() 63 | go emitter.Start(plugins, Settings.Middleware) 64 | 65 | address := strings.Replace(input.address, "[::]", "127.0.0.1", -1) 66 | var req *http.Request 67 | var err error 68 | req, err = http.NewRequest("POST", "http://"+address, bytes.NewBuffer(large[:])) 69 | if err != nil { 70 | t.Error(err) 71 | return 72 | } 73 | wg.Add(1) 74 | _, err = http.DefaultClient.Do(req) 75 | if err != nil { 76 | t.Error(err) 77 | return 78 | } 79 | wg.Wait() 80 | } 81 | -------------------------------------------------------------------------------- /input_kafka_test.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/Shopify/sarama" 7 | "github.com/Shopify/sarama/mocks" 8 | ) 9 | 10 | func TestInputKafkaRAW(t *testing.T) { 11 | consumer := mocks.NewConsumer(t, nil) 12 | defer consumer.Close() 13 | 14 | consumer.ExpectConsumePartition("test", 0, mocks.AnyOffset).YieldMessage(&sarama.ConsumerMessage{Value: []byte("1 2 3\nGET / HTTP1.1\r\nHeader: 1\r\n\r\n")}) 15 | consumer.SetTopicMetadata( 16 | map[string][]int32{"test": {0}}, 17 | ) 18 | 19 | input := NewKafkaInput("-1", &InputKafkaConfig{ 20 | consumer: consumer, 21 | Topic: "test", 22 | UseJSON: false, 23 | }, nil) 24 | 25 | msg, err := input.PluginRead() 26 | 27 | if err != nil { 28 | t.Fatal(err) 29 | } 30 | 31 | if string(append(msg.Meta, msg.Data...)) != "1 2 3\nGET / HTTP1.1\r\nHeader: 1\r\n\r\n" { 32 | t.Error("Message not properly decoded") 33 | } 34 | } 35 | 36 | func TestInputKafkaJSON(t *testing.T) { 37 | consumer := mocks.NewConsumer(t, nil) 38 | defer consumer.Close() 39 | 40 | consumer.ExpectConsumePartition("test", 0, mocks.AnyOffset).YieldMessage(&sarama.ConsumerMessage{Value: []byte(`{"Req_URL":"/","Req_Type":"1","Req_ID":"2","Req_Ts":"3","Req_Method":"GET","Req_Headers":{"Header":"1"}}`)}) 41 | consumer.SetTopicMetadata( 42 | map[string][]int32{"test": {0}}, 43 | ) 44 | 45 | input := NewKafkaInput("-1", &InputKafkaConfig{ 46 | consumer: consumer, 47 | Topic: "test", 48 | UseJSON: true, 49 | }, nil) 50 | 51 | msg, err := input.PluginRead() 52 | 53 | if err != nil { 54 | t.Fatal(err) 55 | } 56 | 57 | if string(append(msg.Meta, msg.Data...)) != "1 2 3\nGET / HTTP/1.1\r\nHeader: 1\r\n\r\n" { 58 | t.Error("Message not properly decoded") 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /input_raw.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/buger/goreplay/internal/capture" 7 | "github.com/buger/goreplay/internal/tcp" 8 | "github.com/buger/goreplay/proto" 9 | "log" 10 | "net" 11 | "strconv" 12 | "strings" 13 | "sync" 14 | ) 15 | 16 | // RAWInputConfig represents configuration that can be applied on raw input 17 | type RAWInputConfig = capture.PcapOptions 18 | 19 | // RAWInput used for intercepting traffic for given address 20 | type RAWInput struct { 21 | sync.Mutex 22 | config RAWInputConfig 23 | messageStats []tcp.Stats 24 | listener *capture.Listener 25 | messageParser *tcp.MessageParser 26 | cancelListener context.CancelFunc 27 | closed bool 28 | 29 | quit chan bool // Channel used only to indicate goroutine should shutdown 30 | host string 31 | ports []uint16 32 | } 33 | 34 | // NewRAWInput constructor for RAWInput. Accepts raw input config as arguments. 35 | func NewRAWInput(address string, config RAWInputConfig) (i *RAWInput) { 36 | i = new(RAWInput) 37 | i.config = config 38 | i.quit = make(chan bool) 39 | 40 | host, _ports, err := net.SplitHostPort(address) 41 | if err != nil { 42 | // If we are reading pcap file, no port needed 43 | if strings.HasSuffix(address, "pcap") { 44 | host = address 45 | _ports = "0" 46 | err = nil 47 | } else if strings.HasPrefix(address, "k8s://") { 48 | portIndex := strings.LastIndex(address, ":") 49 | host = address[:portIndex] 50 | _ports = address[portIndex+1:] 51 | } else { 52 | log.Fatalf("input-raw: error while parsing address: %s", err) 53 | } 54 | } 55 | 56 | if strings.HasSuffix(host, "pcap") { 57 | i.config.Engine = capture.EnginePcapFile 58 | } 59 | 60 | var ports []uint16 61 | if _ports != "" { 62 | portsStr := strings.Split(_ports, ",") 63 | 64 | for _, portStr := range portsStr { 65 | port, err := strconv.Atoi(strings.TrimSpace(portStr)) 66 | if err != nil { 67 | log.Fatalf("parsing port error: %v", err) 68 | } 69 | ports = append(ports, uint16(port)) 70 | 71 | } 72 | } 73 | 74 | i.host = host 75 | i.ports = ports 76 | 77 | i.listen(address) 78 | 79 | return 80 | } 81 | 82 | // PluginRead reads meassage from this plugin 83 | func (i *RAWInput) PluginRead() (*Message, error) { 84 | var msgTCP *tcp.Message 85 | var msg Message 86 | select { 87 | case <-i.quit: 88 | return nil, ErrorStopped 89 | case msgTCP = <-i.listener.Messages(): 90 | msg.Data = msgTCP.Data() 91 | } 92 | 93 | var msgType byte = ResponsePayload 94 | if msgTCP.Direction == tcp.DirIncoming { 95 | msgType = RequestPayload 96 | if i.config.RealIPHeader != "" { 97 | msg.Data = proto.SetHeader(msg.Data, []byte(i.config.RealIPHeader), []byte(msgTCP.SrcAddr)) 98 | } 99 | } 100 | msg.Meta = payloadHeader(msgType, msgTCP.UUID(), msgTCP.Start.UnixNano(), msgTCP.End.UnixNano()-msgTCP.Start.UnixNano()) 101 | 102 | // to be removed.... 103 | if msgTCP.Truncated { 104 | Debug(2, "[INPUT-RAW] message truncated, increase copy-buffer-size") 105 | } 106 | // to be removed... 107 | if msgTCP.TimedOut { 108 | Debug(2, "[INPUT-RAW] message timeout reached, increase input-raw-expire") 109 | } 110 | if i.config.Stats { 111 | stat := msgTCP.Stats 112 | go i.addStats(stat) 113 | } 114 | msgTCP = nil 115 | return &msg, nil 116 | } 117 | 118 | func (i *RAWInput) listen(address string) { 119 | var err error 120 | i.listener, err = capture.NewListener(i.host, i.ports, i.config) 121 | if err != nil { 122 | log.Fatal(err) 123 | } 124 | 125 | err = i.listener.Activate() 126 | if err != nil { 127 | log.Fatal(err) 128 | } 129 | 130 | var ctx context.Context 131 | ctx, i.cancelListener = context.WithCancel(context.Background()) 132 | errCh := i.listener.ListenBackground(ctx) 133 | <-i.listener.Reading 134 | Debug(1, i) 135 | go func() { 136 | <-errCh // the listener closed voluntarily 137 | i.Close() 138 | }() 139 | } 140 | 141 | func (i *RAWInput) String() string { 142 | return fmt.Sprintf("Intercepting traffic from: %s:%s", i.host, strings.Join(strings.Fields(fmt.Sprint(i.ports)), ",")) 143 | } 144 | 145 | // GetStats returns the stats so far and reset the stats 146 | func (i *RAWInput) GetStats() []tcp.Stats { 147 | i.Lock() 148 | defer func() { 149 | i.messageStats = []tcp.Stats{} 150 | i.Unlock() 151 | }() 152 | return i.messageStats 153 | } 154 | 155 | // Close closes the input raw listener 156 | func (i *RAWInput) Close() error { 157 | i.Lock() 158 | defer i.Unlock() 159 | if i.closed { 160 | return nil 161 | } 162 | i.cancelListener() 163 | close(i.quit) 164 | i.closed = true 165 | return nil 166 | } 167 | 168 | func (i *RAWInput) addStats(mStats tcp.Stats) { 169 | i.Lock() 170 | if len(i.messageStats) >= 10000 { 171 | i.messageStats = []tcp.Stats{} 172 | } 173 | i.messageStats = append(i.messageStats, mStats) 174 | i.Unlock() 175 | } 176 | -------------------------------------------------------------------------------- /input_tcp.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "crypto/tls" 7 | "fmt" 8 | "io" 9 | "log" 10 | "net" 11 | ) 12 | 13 | // TCPInput used for internal communication 14 | type TCPInput struct { 15 | data chan *Message 16 | listener net.Listener 17 | address string 18 | config *TCPInputConfig 19 | stop chan bool // Channel used only to indicate goroutine should shutdown 20 | } 21 | 22 | // TCPInputConfig represents configuration of a TCP input plugin 23 | type TCPInputConfig struct { 24 | Secure bool `json:"input-tcp-secure"` 25 | CertificatePath string `json:"input-tcp-certificate"` 26 | KeyPath string `json:"input-tcp-certificate-key"` 27 | } 28 | 29 | // NewTCPInput constructor for TCPInput, accepts address with port 30 | func NewTCPInput(address string, config *TCPInputConfig) (i *TCPInput) { 31 | i = new(TCPInput) 32 | i.data = make(chan *Message, 1000) 33 | i.address = address 34 | i.config = config 35 | i.stop = make(chan bool) 36 | 37 | i.listen(address) 38 | 39 | return 40 | } 41 | 42 | // PluginRead returns data and details read from plugin 43 | func (i *TCPInput) PluginRead() (msg *Message, err error) { 44 | select { 45 | case <-i.stop: 46 | return nil, ErrorStopped 47 | case msg = <-i.data: 48 | return msg, nil 49 | } 50 | 51 | } 52 | 53 | // Close closes the plugin 54 | func (i *TCPInput) Close() error { 55 | close(i.stop) 56 | i.listener.Close() 57 | return nil 58 | } 59 | 60 | func (i *TCPInput) listen(address string) { 61 | if i.config.Secure { 62 | cer, err := tls.LoadX509KeyPair(i.config.CertificatePath, i.config.KeyPath) 63 | if err != nil { 64 | log.Fatalln("error while loading --input-tcp TLS certificate:", err) 65 | } 66 | 67 | config := &tls.Config{Certificates: []tls.Certificate{cer}} 68 | listener, err := tls.Listen("tcp", address, config) 69 | if err != nil { 70 | log.Fatalln("[INPUT-TCP] failed to start INPUT-TCP listener:", err) 71 | } 72 | i.listener = listener 73 | } else { 74 | listener, err := net.Listen("tcp", address) 75 | if err != nil { 76 | log.Fatalln("failed to start INPUT-TCP listener:", err) 77 | } 78 | i.listener = listener 79 | } 80 | go func() { 81 | for { 82 | conn, err := i.listener.Accept() 83 | if err == nil { 84 | go i.handleConnection(conn) 85 | continue 86 | } 87 | if isTemporaryNetworkError(err) { 88 | continue 89 | } 90 | if operr, ok := err.(*net.OpError); ok && operr.Err.Error() != "use of closed network connection" { 91 | Debug(0, fmt.Sprintf("[INPUT-TCP] listener closed, err: %q", err)) 92 | } 93 | break 94 | } 95 | }() 96 | } 97 | 98 | var payloadSeparatorAsBytes = []byte(payloadSeparator) 99 | 100 | func (i *TCPInput) handleConnection(conn net.Conn) { 101 | defer conn.Close() 102 | 103 | reader := bufio.NewReader(conn) 104 | var buffer bytes.Buffer 105 | 106 | for { 107 | line, err := reader.ReadBytes('\n') 108 | if err != nil { 109 | if isTemporaryNetworkError(err) { 110 | continue 111 | } 112 | if err != io.EOF { 113 | Debug(0, fmt.Sprintf("[INPUT-TCP] connection error: %q", err)) 114 | } 115 | break 116 | } 117 | 118 | if bytes.Equal(payloadSeparatorAsBytes[1:], line) { 119 | // unread the '\n' before monkeys 120 | buffer.UnreadByte() 121 | var msg Message 122 | msg.Meta, msg.Data = payloadMetaWithBody(buffer.Bytes()) 123 | i.data <- &msg 124 | buffer.Reset() 125 | } else { 126 | buffer.Write(line) 127 | } 128 | } 129 | } 130 | 131 | func (i *TCPInput) String() string { 132 | return "TCP input: " + i.address 133 | } 134 | 135 | func isTemporaryNetworkError(err error) bool { 136 | if nerr, ok := err.(net.Error); ok && nerr.Temporary() { 137 | return true 138 | } 139 | if operr, ok := err.(*net.OpError); ok && operr.Temporary() { 140 | return true 141 | } 142 | return false 143 | } 144 | -------------------------------------------------------------------------------- /input_tcp_test.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "bytes" 5 | "crypto/rand" 6 | "crypto/rsa" 7 | "crypto/tls" 8 | "crypto/x509" 9 | "encoding/pem" 10 | "io/ioutil" 11 | "log" 12 | "math/big" 13 | "net" 14 | "os" 15 | "sync" 16 | "testing" 17 | "time" 18 | ) 19 | 20 | func TestTCPInput(t *testing.T) { 21 | wg := new(sync.WaitGroup) 22 | 23 | input := NewTCPInput("127.0.0.1:0", &TCPInputConfig{}) 24 | output := NewTestOutput(func(*Message) { 25 | wg.Done() 26 | }) 27 | 28 | plugins := &InOutPlugins{ 29 | Inputs: []PluginReader{input}, 30 | Outputs: []PluginWriter{output}, 31 | } 32 | plugins.All = append(plugins.All, input, output) 33 | 34 | emitter := NewEmitter() 35 | go emitter.Start(plugins, Settings.Middleware) 36 | 37 | tcpAddr, err := net.ResolveTCPAddr("tcp", input.listener.Addr().String()) 38 | 39 | if err != nil { 40 | log.Fatal(err) 41 | } 42 | 43 | conn, err := net.DialTCP("tcp", nil, tcpAddr) 44 | if err != nil { 45 | log.Fatal(err) 46 | } 47 | 48 | msg := []byte("1 1 1\nGET / HTTP/1.1\r\n\r\n") 49 | 50 | for i := 0; i < 100; i++ { 51 | wg.Add(1) 52 | if _, err = conn.Write(msg); err == nil { 53 | _, err = conn.Write(payloadSeparatorAsBytes) 54 | } 55 | if err != nil { 56 | t.Error(err) 57 | return 58 | } 59 | } 60 | wg.Wait() 61 | emitter.Close() 62 | } 63 | 64 | func genCertificate(template *x509.Certificate) ([]byte, []byte) { 65 | priv, _ := rsa.GenerateKey(rand.Reader, 2048) 66 | 67 | serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) 68 | serialNumber, _ := rand.Int(rand.Reader, serialNumberLimit) 69 | template.SerialNumber = serialNumber 70 | template.BasicConstraintsValid = true 71 | template.NotBefore = time.Now() 72 | template.NotAfter = time.Now().Add(time.Hour) 73 | 74 | derBytes, _ := x509.CreateCertificate(rand.Reader, template, template, &priv.PublicKey, priv) 75 | 76 | var certPem, keyPem bytes.Buffer 77 | pem.Encode(&certPem, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) 78 | pem.Encode(&keyPem, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}) 79 | 80 | return certPem.Bytes(), keyPem.Bytes() 81 | } 82 | 83 | func TestTCPInputSecure(t *testing.T) { 84 | serverCertPem, serverPrivPem := genCertificate(&x509.Certificate{ 85 | DNSNames: []string{"localhost"}, 86 | IPAddresses: []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::")}, 87 | }) 88 | 89 | serverCertPemFile, _ := ioutil.TempFile("", "server.crt") 90 | serverCertPemFile.Write(serverCertPem) 91 | serverCertPemFile.Close() 92 | 93 | serverPrivPemFile, _ := ioutil.TempFile("", "server.key") 94 | serverPrivPemFile.Write(serverPrivPem) 95 | serverPrivPemFile.Close() 96 | 97 | defer func() { 98 | os.Remove(serverPrivPemFile.Name()) 99 | os.Remove(serverCertPemFile.Name()) 100 | }() 101 | 102 | wg := new(sync.WaitGroup) 103 | 104 | input := NewTCPInput("127.0.0.1:0", &TCPInputConfig{ 105 | Secure: true, 106 | CertificatePath: serverCertPemFile.Name(), 107 | KeyPath: serverPrivPemFile.Name(), 108 | }) 109 | output := NewTestOutput(func(*Message) { 110 | wg.Done() 111 | }) 112 | 113 | plugins := &InOutPlugins{ 114 | Inputs: []PluginReader{input}, 115 | Outputs: []PluginWriter{output}, 116 | } 117 | plugins.All = append(plugins.All, input, output) 118 | 119 | emitter := NewEmitter() 120 | go emitter.Start(plugins, Settings.Middleware) 121 | 122 | conf := &tls.Config{ 123 | InsecureSkipVerify: true, 124 | } 125 | 126 | conn, err := tls.Dial("tcp", input.listener.Addr().String(), conf) 127 | if err != nil { 128 | t.Fatal(err) 129 | } 130 | defer conn.Close() 131 | 132 | msg := []byte("1 1 1\nGET / HTTP/1.1\r\n\r\n") 133 | 134 | for i := 0; i < 100; i++ { 135 | wg.Add(1) 136 | conn.Write(msg) 137 | conn.Write([]byte(payloadSeparator)) 138 | } 139 | 140 | wg.Wait() 141 | emitter.Close() 142 | } 143 | -------------------------------------------------------------------------------- /internal/byteutils/byteutils.go: -------------------------------------------------------------------------------- 1 | // Package byteutils provides helpers for working with byte slices 2 | package byteutils 3 | 4 | import ( 5 | "unsafe" 6 | ) 7 | 8 | // Cut elements from slice for a given range 9 | func Cut(a []byte, from, to int) []byte { 10 | copy(a[from:], a[to:]) 11 | a = a[:len(a)-to+from] 12 | 13 | return a 14 | } 15 | 16 | // Insert new slice at specified position 17 | func Insert(a []byte, i int, b []byte) []byte { 18 | a = append(a, make([]byte, len(b))...) 19 | copy(a[i+len(b):], a[i:]) 20 | copy(a[i:i+len(b)], b) 21 | 22 | return a 23 | } 24 | 25 | // Replace function unlike bytes.Replace allows you to specify range 26 | func Replace(a []byte, from, to int, new []byte) []byte { 27 | lenDiff := len(new) - (to - from) 28 | 29 | if lenDiff > 0 { 30 | // Extend if new segment bigger 31 | a = append(a, make([]byte, lenDiff)...) 32 | copy(a[to+lenDiff:], a[to:]) 33 | copy(a[from:from+len(new)], new) 34 | 35 | return a 36 | } 37 | 38 | if lenDiff < 0 { 39 | copy(a[from:], new) 40 | copy(a[from+len(new):], a[to:]) 41 | return a[:len(a)+lenDiff] 42 | } 43 | 44 | // same size 45 | copy(a[from:], new) 46 | return a 47 | } 48 | 49 | // SliceToString preferred for large body payload (zero allocation and faster) 50 | func SliceToString(buf []byte) string { 51 | return *(*string)(unsafe.Pointer(&buf)) 52 | } 53 | -------------------------------------------------------------------------------- /internal/byteutils/byteutils_test.go: -------------------------------------------------------------------------------- 1 | package byteutils 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | ) 7 | 8 | func TestCut(t *testing.T) { 9 | if !bytes.Equal(Cut([]byte("123456"), 2, 4), []byte("1256")) { 10 | t.Error("Should properly cut") 11 | } 12 | } 13 | 14 | func TestInsert(t *testing.T) { 15 | if !bytes.Equal(Insert([]byte("123456"), 2, []byte("abcd")), []byte("12abcd3456")) { 16 | t.Error("Should insert into middle of slice") 17 | } 18 | } 19 | 20 | func TestReplace(t *testing.T) { 21 | if !bytes.Equal(Replace([]byte("123456"), 2, 4, []byte("ab")), []byte("12ab56")) { 22 | t.Error("Should replace when same length") 23 | } 24 | 25 | if !bytes.Equal(Replace([]byte("123456"), 2, 4, []byte("abcd")), []byte("12abcd56")) { 26 | t.Error("Should replace when replacement length bigger") 27 | } 28 | 29 | if !bytes.Equal(Replace([]byte("123456"), 2, 5, []byte("ab")), []byte("12ab6")) { 30 | t.Error("Should replace when replacement length bigger") 31 | } 32 | } 33 | 34 | func BenchmarkStringtoSlice(b *testing.B) { 35 | var s string 36 | var buf [1 << 20]byte 37 | for i := 0; i < b.N; i++ { 38 | s = SliceToString(buf[:]) 39 | } 40 | _ = s // avoid gc to optimize away the loop body 41 | } 42 | -------------------------------------------------------------------------------- /internal/capture/af_packet.go: -------------------------------------------------------------------------------- 1 | //go:build !linux 2 | 3 | package capture 4 | 5 | import ( 6 | "fmt" 7 | "time" 8 | 9 | "github.com/google/gopacket" 10 | ) 11 | 12 | func newAfpacketHandle(device string, snaplen int, block_size int, num_blocks int, 13 | useVLAN bool, timeout time.Duration) (*afpacketHandle, error) { 14 | return nil, fmt.Errorf("Not implemented") 15 | } 16 | 17 | func afpacketComputeSize(targetSizeMb int, snaplen int, pageSize int) ( 18 | frameSize int, blockSize int, numBlocks int, err error) { 19 | return 0, 0, 0, fmt.Errorf("Not implemented") 20 | } 21 | 22 | type afpacketHandle struct{} 23 | 24 | // ReadPacketData satisfies PacketDataSource interface 25 | func (h *afpacketHandle) ReadPacketData() (data []byte, ci gopacket.CaptureInfo, err error) { 26 | return nil, gopacket.CaptureInfo{}, fmt.Errorf("Not implemented") 27 | } 28 | 29 | // SetBPFFilter translates a BPF filter string into BPF RawInstruction and applies them. 30 | func (h *afpacketHandle) SetBPFFilter(filter string, snaplen int) (err error) { 31 | return fmt.Errorf("Not implemented") 32 | } 33 | -------------------------------------------------------------------------------- /internal/capture/af_packet_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | 3 | package capture 4 | 5 | import ( 6 | "fmt" 7 | "time" 8 | 9 | "github.com/google/gopacket" 10 | "github.com/google/gopacket/afpacket" 11 | "github.com/google/gopacket/layers" 12 | "github.com/google/gopacket/pcap" 13 | "golang.org/x/net/bpf" 14 | 15 | _ "github.com/google/gopacket/layers" 16 | ) 17 | 18 | type afpacketHandle struct { 19 | TPacket *afpacket.TPacket 20 | } 21 | 22 | func newAfpacketHandle(device string, snaplen int, block_size int, num_blocks int, 23 | useVLAN bool, timeout time.Duration) (*afpacketHandle, error) { 24 | 25 | h := &afpacketHandle{} 26 | var err error 27 | 28 | if device == "any" { 29 | h.TPacket, err = afpacket.NewTPacket( 30 | afpacket.OptFrameSize(snaplen), 31 | afpacket.OptBlockSize(block_size), 32 | afpacket.OptNumBlocks(num_blocks), 33 | afpacket.OptAddVLANHeader(false), 34 | afpacket.OptPollTimeout(timeout), 35 | afpacket.SocketRaw, 36 | afpacket.TPacketVersion3) 37 | } else { 38 | h.TPacket, err = afpacket.NewTPacket( 39 | afpacket.OptInterface(device), 40 | afpacket.OptFrameSize(snaplen), 41 | afpacket.OptBlockSize(block_size), 42 | afpacket.OptNumBlocks(num_blocks), 43 | afpacket.OptAddVLANHeader(false), 44 | afpacket.OptPollTimeout(timeout), 45 | afpacket.SocketRaw, 46 | afpacket.TPacketVersion3) 47 | } 48 | return h, err 49 | } 50 | 51 | // ReadPacketData satisfies PacketDataSource interface 52 | func (h *afpacketHandle) ReadPacketData() (data []byte, ci gopacket.CaptureInfo, err error) { 53 | return h.TPacket.ReadPacketData() 54 | } 55 | 56 | // SetBPFFilter translates a BPF filter string into BPF RawInstruction and applies them. 57 | func (h *afpacketHandle) SetBPFFilter(filter string, snaplen int) (err error) { 58 | pcapBPF, err := pcap.CompileBPFFilter(layers.LinkTypeEthernet, snaplen, filter) 59 | if err != nil { 60 | return err 61 | } 62 | bpfIns := []bpf.RawInstruction{} 63 | for _, ins := range pcapBPF { 64 | bpfIns2 := bpf.RawInstruction{ 65 | Op: ins.Code, 66 | Jt: ins.Jt, 67 | Jf: ins.Jf, 68 | K: ins.K, 69 | } 70 | bpfIns = append(bpfIns, bpfIns2) 71 | } 72 | if h.TPacket.SetBPF(bpfIns); err != nil { 73 | return err 74 | } 75 | return h.TPacket.SetBPF(bpfIns) 76 | } 77 | 78 | // LinkType returns ethernet link type. 79 | func (h *afpacketHandle) LinkType() layers.LinkType { 80 | return layers.LinkTypeEthernet 81 | } 82 | 83 | // Close will close afpacket source. 84 | func (h *afpacketHandle) Close() { 85 | h.TPacket.Close() 86 | } 87 | 88 | // SocketStats prints received, dropped, queue-freeze packet stats. 89 | func (h *afpacketHandle) SocketStats() (as afpacket.SocketStats, asv afpacket.SocketStatsV3, err error) { 90 | return h.TPacket.SocketStats() 91 | } 92 | 93 | // afpacketComputeSize computes the block_size and the num_blocks in such a way that the 94 | // allocated mmap buffer is close to but smaller than target_size_mb. 95 | // The restriction is that the block_size must be divisible by both the 96 | // frame size and page size. 97 | func afpacketComputeSize(targetSizeMb int, snaplen int, pageSize int) ( 98 | frameSize int, blockSize int, numBlocks int, err error) { 99 | 100 | if snaplen < pageSize { 101 | frameSize = pageSize / (pageSize / snaplen) 102 | } else { 103 | frameSize = (snaplen/pageSize + 1) * pageSize 104 | } 105 | 106 | // 128 is the default from the gopacket library so just use that 107 | blockSize = frameSize * 128 108 | numBlocks = (targetSizeMb * 1024 * 1024) / blockSize 109 | 110 | if numBlocks == 0 { 111 | return 0, 0, 0, fmt.Errorf("Interface buffersize is too small") 112 | } 113 | 114 | return frameSize, blockSize, numBlocks, nil 115 | } 116 | -------------------------------------------------------------------------------- /internal/capture/capture_test.go: -------------------------------------------------------------------------------- 1 | package capture 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestSetInterfaces(t *testing.T) { 8 | listener := &Listener{ 9 | loopIndex: 99999, 10 | } 11 | listener.setInterfaces() 12 | 13 | for _, nic := range listener.Interfaces { 14 | if (len(nic.Addresses)) == 0 { 15 | t.Errorf("nic %s was captured with 0 addresses", nic.Name) 16 | } 17 | } 18 | 19 | if listener.loopIndex == 99999 { 20 | t.Errorf("loopback nic index was not found") 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /internal/capture/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package capture provides traffic sniffier using AF_PACKET, pcap or pcap file. 3 | it allows you to listen for traffic from any port (e.g. sniffing) because they operate on IP level. 4 | Ports is TCP/IP feature, same as flow control, reliable transmission and etc. 5 | Currently this package implements TCP layer: flow control is managed under tcp package. 6 | BPF filters can also be applied. 7 | 8 | example: 9 | 10 | // for the transport should be "tcp" 11 | listener, err := capture.NewListener(host, port, transport, engine, trackResponse) 12 | 13 | if err != nil { 14 | // handle error 15 | } 16 | 17 | listener.SetPcapOptions(opts) 18 | err = listner.Activate() 19 | 20 | if err != nil { 21 | // handle it 22 | } 23 | 24 | if err := listener.Listen(context.Background(), handler); err != nil { 25 | // handle error 26 | } 27 | 28 | // or 29 | errCh := listener.ListenBackground(context.Background(), handler) // runs in the background 30 | select { 31 | case err := <- errCh: 32 | 33 | // handle error 34 | 35 | case <-quit: 36 | 37 | // 38 | 39 | case <- l.Reading: // if we have started reading 40 | } 41 | */ 42 | package capture // import github.com/buger/goreplay/capture 43 | -------------------------------------------------------------------------------- /internal/capture/dump.go: -------------------------------------------------------------------------------- 1 | // https://github.com/google/gopacket/blob/403ca653c4/pcapgo/read.go 2 | 3 | package capture 4 | 5 | import ( 6 | "encoding/binary" 7 | "fmt" 8 | "io" 9 | "time" 10 | 11 | "github.com/google/gopacket" 12 | "github.com/google/gopacket/layers" 13 | ) 14 | 15 | // Writer wraps an underlying io.Writer to write packet data in PCAP 16 | // format. See http://wiki.wireshark.org/Development/LibpcapFileFormat 17 | // for information on the file format. 18 | // 19 | // For those that care, we currently write v2.4 files with nanosecond 20 | // or microsecond timestamp resolution and little-endian encoding. 21 | type Writer struct { 22 | w io.Writer 23 | tsScaler int 24 | // Moving this into the struct seems to save an allocation for each call to writePacketHeader 25 | buf [16]byte 26 | } 27 | 28 | const magicNanoseconds = 0xA1B23C4D 29 | const magicMicroseconds = 0xA1B2C3D4 30 | const versionMajor = 2 31 | const versionMinor = 4 32 | 33 | // NewWriterNanos returns a new writer object, for writing packet data out 34 | // to the given writer. If this is a new empty writer (as opposed to 35 | // an append), you must call WriteFileHeader before WritePacket. Packet 36 | // timestamps are written with nanosecond precision. 37 | // 38 | // // Write a new file: 39 | // f, _ := os.Create("/tmp/file.pcap") 40 | // w := pcapgo.NewWriterNanos(f) 41 | // w.WriteFileHeader(65536, layers.LinkTypeEthernet) // new file, must do this. 42 | // w.WritePacket(gopacket.CaptureInfo{...}, data1) 43 | // f.Close() 44 | // // Append to existing file (must have same snaplen and linktype) 45 | // f2, _ := os.OpenFile("/tmp/fileNano.pcap", os.O_APPEND, 0700) 46 | // w2 := pcapgo.NewWriter(f2) 47 | // // no need for file header, it's already written. 48 | // w2.WritePacket(gopacket.CaptureInfo{...}, data2) 49 | // f2.Close() 50 | func NewWriterNanos(w io.Writer) *Writer { 51 | return &Writer{w: w, tsScaler: nanosPerNano} 52 | } 53 | 54 | // NewWriter returns a new writer object, for writing packet data out 55 | // to the given writer. If this is a new empty writer (as opposed to 56 | // an append), you must call WriteFileHeader before WritePacket. 57 | // Packet timestamps are written with microsecond precision. 58 | // 59 | // // Write a new file: 60 | // f, _ := os.Create("/tmp/file.pcap") 61 | // w := pcapgo.NewWriter(f) 62 | // w.WriteFileHeader(65536, layers.LinkTypeEthernet) // new file, must do this. 63 | // w.WritePacket(gopacket.CaptureInfo{...}, data1) 64 | // f.Close() 65 | // // Append to existing file (must have same snaplen and linktype) 66 | // f2, _ := os.OpenFile("/tmp/file.pcap", os.O_APPEND, 0700) 67 | // w2 := pcapgo.NewWriter(f2) 68 | // // no need for file header, it's already written. 69 | // w2.WritePacket(gopacket.CaptureInfo{...}, data2) 70 | // f2.Close() 71 | func NewWriter(w io.Writer) *Writer { 72 | return &Writer{w: w, tsScaler: nanosPerMicro} 73 | } 74 | 75 | // WriteFileHeader writes a file header out to the writer. 76 | // This must be called exactly once per output. 77 | func (w *Writer) WriteFileHeader(snaplen uint32, linktype layers.LinkType) error { 78 | var buf [24]byte 79 | if w.tsScaler == nanosPerMicro { 80 | binary.LittleEndian.PutUint32(buf[0:4], magicMicroseconds) 81 | } else { 82 | binary.LittleEndian.PutUint32(buf[0:4], magicNanoseconds) 83 | } 84 | binary.LittleEndian.PutUint16(buf[4:6], versionMajor) 85 | binary.LittleEndian.PutUint16(buf[6:8], versionMinor) 86 | // bytes 8:12 stay 0 (timezone = UTC) 87 | // bytes 12:16 stay 0 (sigfigs is always set to zero, according to 88 | // http://wiki.wireshark.org/Development/LibpcapFileFormat 89 | binary.LittleEndian.PutUint32(buf[16:20], snaplen) 90 | binary.LittleEndian.PutUint32(buf[20:24], uint32(linktype)) 91 | _, err := w.w.Write(buf[:]) 92 | return err 93 | } 94 | 95 | const nanosPerMicro = 1000 96 | const nanosPerNano = 1 97 | 98 | func (w *Writer) writePacketHeader(ci gopacket.CaptureInfo) error { 99 | t := ci.Timestamp 100 | if t.IsZero() { 101 | t = time.Now() 102 | } 103 | secs := t.Unix() 104 | usecs := t.Nanosecond() / w.tsScaler 105 | binary.LittleEndian.PutUint32(w.buf[0:4], uint32(secs)) 106 | binary.LittleEndian.PutUint32(w.buf[4:8], uint32(usecs)) 107 | binary.LittleEndian.PutUint32(w.buf[8:12], uint32(ci.CaptureLength)) 108 | binary.LittleEndian.PutUint32(w.buf[12:16], uint32(ci.Length)) 109 | _, err := w.w.Write(w.buf[:]) 110 | return err 111 | } 112 | 113 | // WritePacket writes the given packet data out to the file. 114 | func (w *Writer) WritePacket(ci gopacket.CaptureInfo, data []byte) error { 115 | if ci.CaptureLength != len(data) { 116 | return fmt.Errorf("capture length %d does not match data length %d", ci.CaptureLength, len(data)) 117 | } 118 | if ci.CaptureLength > ci.Length { 119 | return fmt.Errorf("invalid capture info %+v: capture length > length", ci) 120 | } 121 | if err := w.writePacketHeader(ci); err != nil { 122 | return fmt.Errorf("error writing packet header: %v", err) 123 | } 124 | _, err := w.w.Write(data) 125 | return err 126 | } 127 | -------------------------------------------------------------------------------- /internal/capture/sock_others.go: -------------------------------------------------------------------------------- 1 | //go:build !linux || arm64 || darwin 2 | 3 | package capture 4 | 5 | import ( 6 | "errors" 7 | 8 | "github.com/google/gopacket/pcap" 9 | ) 10 | 11 | // NewSocket returns new M'maped sock_raw on packet version 2. 12 | func NewSocket(_ pcap.Interface) (Socket, error) { 13 | return nil, errors.New("afpacket socket is only available on linux") 14 | } 15 | -------------------------------------------------------------------------------- /internal/capture/socket.go: -------------------------------------------------------------------------------- 1 | package capture 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/google/gopacket" 7 | ) 8 | 9 | // Socket is any interface that defines the behaviors of Socket 10 | type Socket interface { 11 | ReadPacketData() ([]byte, gopacket.CaptureInfo, error) 12 | WritePacketData([]byte) error 13 | SetBPFFilter(string) error 14 | SetPromiscuous(bool) error 15 | SetSnapLen(int) error 16 | GetSnapLen() int 17 | SetTimeout(time.Duration) error 18 | SetLoopbackIndex(i int32) 19 | Close() error 20 | } 21 | -------------------------------------------------------------------------------- /internal/capture/vxlan.go: -------------------------------------------------------------------------------- 1 | package capture 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "github.com/google/gopacket" 7 | "github.com/google/gopacket/layers" 8 | "net" 9 | "time" 10 | ) 11 | 12 | const VxLanPacketSize = 1526 //vxlan 8 B + ethernet II 1518 B 13 | 14 | type vxlanHandle struct { 15 | connection *net.UDPConn 16 | packetChannel chan gopacket.Packet 17 | vnis []int 18 | } 19 | 20 | func newVXLANHandler(port int, vnis []int) (*vxlanHandle, error) { 21 | if port == 0 { 22 | port = 4789 23 | } 24 | 25 | addr := net.UDPAddr{ 26 | Port: port, 27 | IP: net.ParseIP("0.0.0.0"), 28 | } 29 | 30 | vxlanHandle := &vxlanHandle{} 31 | con, err := net.ListenUDP("udp", &addr) 32 | if err != nil { 33 | return nil, fmt.Errorf(err.Error()) 34 | } 35 | vxlanHandle.connection = con 36 | vxlanHandle.packetChannel = make(chan gopacket.Packet, 1000) 37 | vxlanHandle.vnis = vnis 38 | go vxlanHandle.reader() 39 | 40 | return vxlanHandle, nil 41 | } 42 | 43 | func (v *vxlanHandle) reader() { 44 | for { 45 | inputBytes := make([]byte, VxLanPacketSize) 46 | length, _, err := v.connection.ReadFromUDP(inputBytes) 47 | if err != nil { 48 | if errors.Is(err, net.ErrClosed) { 49 | return 50 | } 51 | continue 52 | } 53 | packet := gopacket.NewPacket(inputBytes[:length], layers.LayerTypeVXLAN, gopacket.NoCopy) 54 | ci := packet.Metadata() 55 | ci.Timestamp = time.Now() 56 | ci.CaptureLength = length 57 | ci.Length = length 58 | 59 | if len(v.vnis) > 0 && !v.vniIsAllowed(packet) { 60 | continue 61 | } 62 | 63 | v.packetChannel <- packet 64 | } 65 | } 66 | 67 | func (v *vxlanHandle) vniIsAllowed(packet gopacket.Packet) bool { 68 | defaultState := false 69 | if layer := packet.Layer(layers.LayerTypeVXLAN); layer != nil { 70 | vxlan, _ := layer.(*layers.VXLAN) 71 | for _, vn := range v.vnis { 72 | if vn > 0 && int(vxlan.VNI) == vn { 73 | return true 74 | } 75 | 76 | if vn < 0 { 77 | if int(vxlan.VNI) == -vn { 78 | return false 79 | } 80 | defaultState = true 81 | } 82 | } 83 | } 84 | return defaultState 85 | } 86 | 87 | func (v *vxlanHandle) ReadPacketData() ([]byte, gopacket.CaptureInfo, error) { 88 | packet := <-v.packetChannel 89 | layer := packet.Layer(layers.LayerTypeVXLAN) 90 | bytes := layer.LayerPayload() 91 | 92 | return bytes, packet.Metadata().CaptureInfo, nil 93 | } 94 | 95 | func (v *vxlanHandle) Close() error { 96 | if v.connection != nil { 97 | return v.connection.Close() 98 | } 99 | return nil 100 | } 101 | -------------------------------------------------------------------------------- /internal/simpletime/time.go: -------------------------------------------------------------------------------- 1 | package simpletime 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | var Now time.Time 8 | 9 | func init() { 10 | go func() { 11 | for { 12 | // Accurate enough 13 | Now = time.Now() 14 | time.Sleep(100 * time.Millisecond) 15 | } 16 | }() 17 | } 18 | -------------------------------------------------------------------------------- /internal/size/size.go: -------------------------------------------------------------------------------- 1 | package size 2 | 3 | import ( 4 | "fmt" 5 | "regexp" 6 | "strconv" 7 | ) 8 | 9 | // Size represents size that implements flag.Var 10 | type Size int64 11 | 12 | // the following regexes follow Go semantics https://golang.org/ref/spec#Letters_and_digits 13 | var ( 14 | rB = regexp.MustCompile(`(?i)^(?:0b|0x|0o)?[\da-f_]+$`) 15 | rKB = regexp.MustCompile(`(?i)^(?:0b|0x|0o)?[\da-f_]+kb$`) 16 | rMB = regexp.MustCompile(`(?i)^(?:0b|0x|0o)?[\da-f_]+mb$`) 17 | rGB = regexp.MustCompile(`(?i)^(?:0b|0x|0o)?[\da-f_]+gb$`) 18 | rTB = regexp.MustCompile(`(?i)^(?:0b|0x|0o)?[\da-f_]+tb$`) 19 | ) 20 | 21 | // Set parses size to integer from different bases and data units 22 | func (siz *Size) Set(size string) (err error) { 23 | if size == "" { 24 | return 25 | } 26 | const ( 27 | _ = 1 << (iota * 10) 28 | KB 29 | MB 30 | GB 31 | TB 32 | ) 33 | 34 | var ( 35 | lmt = len(size) - 2 36 | s = []byte(size) 37 | ) 38 | 39 | var _len int64 40 | switch { 41 | case rB.Match(s): 42 | _len, err = strconv.ParseInt(size, 0, 64) 43 | case rKB.Match(s): 44 | _len, err = strconv.ParseInt(size[:lmt], 0, 64) 45 | _len *= KB 46 | case rMB.Match(s): 47 | _len, err = strconv.ParseInt(size[:lmt], 0, 64) 48 | _len *= MB 49 | case rGB.Match(s): 50 | _len, err = strconv.ParseInt(size[:lmt], 0, 64) 51 | _len *= GB 52 | case rTB.Match(s): 53 | _len, err = strconv.ParseInt(size[:lmt], 0, 64) 54 | _len *= TB 55 | default: 56 | return fmt.Errorf("invalid _len %q", size) 57 | } 58 | *siz = Size(_len) 59 | return 60 | } 61 | 62 | func (siz *Size) String() string { 63 | return fmt.Sprintf("%d", *siz) 64 | } 65 | -------------------------------------------------------------------------------- /internal/size/size_test.go: -------------------------------------------------------------------------------- 1 | package size 2 | 3 | import "testing" 4 | 5 | func TestParseDataUnit(t *testing.T) { 6 | var d = map[string]int{ 7 | "42mb": 42 << 20, 8 | "4_2": 42, 9 | "00": 0, 10 | "0": 0, 11 | "0_600tb": 384 << 40, 12 | "0600Tb": 384 << 40, 13 | "0o12Mb": 10 << 20, 14 | "0b_10010001111_1kb": 2335 << 10, 15 | "1024": 1 << 10, 16 | "0b111": 7, 17 | "0x12gB": 18 << 30, 18 | "0x_67_7a_2f_cc_40_c6": 113774485586118, 19 | "121562380192901": 121562380192901, 20 | } 21 | var buf Size 22 | var err error 23 | for k, v := range d { 24 | err = buf.Set(k) 25 | if err != nil || buf != Size(v) { 26 | t.Errorf("Error parsing %s: %v", k, err) 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /internal/tcp/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package tcp implements TCP transport layer protocol, it is responsible for 3 | parsing, reassembling tcp packets, handling communication with engine listeners(github.com/buger/goreplay/capture), 4 | and reporting errors and statistics of packets. 5 | the packets are parsed by following TCP way(https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_segment_structure). 6 | 7 | example: 8 | 9 | import "github.com/buger/goreplay/tcp" 10 | 11 | messageExpire := time.Second*5 12 | maxSize := 5 << 20 13 | 14 | debugger := func(debugLevel int, data ...interface{}){} // debugger can also be nil 15 | messageHandler := func(mssg *tcp.Message){} 16 | 17 | mssgPool := tcp.NewMessageParser(maxMessageSize, messageExpire, debugger, messageHandler) 18 | listener.Listen(ctx, mssgPool.Handler) 19 | 20 | you can use pool.End or/and pool.Start to set custom session behaviors 21 | 22 | debugLevel in debugger function indicates the priority of the logs, the bigger the number the lower 23 | the priority. errors are signified by debug level 4 for errors, 5 for discarded packets, and 6 for received packets. 24 | */ 25 | package tcp // import github.com/buger/goreplay/tcp 26 | -------------------------------------------------------------------------------- /k8s/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: pod-reader 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["pods"] 8 | verbs: ["get", "watch", "list"] 9 | - apiGroups: ["apps"] 10 | resources: ["deployments"] 11 | verbs: ["get", "watch", "list"] 12 | - apiGroups: ["apps"] 13 | resources: ["daemonsets"] 14 | verbs: ["get", "watch", "list"] -------------------------------------------------------------------------------- /k8s/collect_goreplay_telemetry.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # collect_goreplay_telemetry.sh 4 | # 5 | # Gathers telemetry from a GoReplay DaemonSet in the 'goreplay' namespace. 6 | # Works on macOS and Linux, assuming 'kubectl' (or compatible) is installed. 7 | # 8 | # Usage examples: 9 | # ./collect_goreplay_telemetry.sh 10 | # ./collect_goreplay_telemetry.sh "microk8s kubectl" 11 | 12 | set -euo pipefail 13 | 14 | ######################################## 15 | # Determine kubectl command 16 | ######################################## 17 | if [[ $# -gt 0 ]]; then 18 | # If an argument was provided, use that as the kubectl command 19 | KUBECTL="$*" 20 | else 21 | # Default to 'kubectl' 22 | KUBECTL="kubectl" 23 | fi 24 | 25 | ######################################## 26 | # Check that the base command exists 27 | ######################################## 28 | # For "microk8s kubectl", we only check "microk8s" in PATH. For "oc" we check "oc". 29 | BASE_CMD="${KUBECTL%% *}" # everything before the first space 30 | if ! command -v "${BASE_CMD}" >/dev/null 2>&1; then 31 | echo "ERROR: '${BASE_CMD}' not found in PATH. Please install or configure it first." 32 | exit 1 33 | fi 34 | 35 | echo "Using kubectl command: $KUBECTL" 36 | echo 37 | 38 | ######################################## 39 | # Helper function to print and run commands 40 | ######################################## 41 | run_cmd() { 42 | echo "Command: $*" 43 | eval "$*" 44 | } 45 | 46 | ######################################## 47 | # 1. Print logs from ALL GoReplay pods 48 | ######################################## 49 | echo "==================================================" 50 | echo "1. Gathering logs from all goreplay pods (all containers)..." 51 | echo "==================================================" 52 | run_cmd "$KUBECTL logs -n goreplay -l app=goreplay --all-containers" || { 53 | echo "WARNING: Failed to get logs from pods with label app=goreplay" 54 | } 55 | 56 | ######################################## 57 | # 2. Describe the GoReplay DaemonSet 58 | ######################################## 59 | echo 60 | echo "==================================================" 61 | echo "2. Describing DaemonSet goreplay-daemon..." 62 | echo "==================================================" 63 | run_cmd "$KUBECTL describe daemonset goreplay-daemon -n goreplay" || { 64 | echo "WARNING: Failed to describe daemonset goreplay-daemon" 65 | } 66 | 67 | ######################################## 68 | # 3. Get list of GoReplay pods (full output) 69 | ######################################## 70 | echo 71 | echo "==================================================" 72 | echo "3. Listing goreplay pods (full output)..." 73 | echo "==================================================" 74 | 75 | # Print full output (no -o name here): 76 | run_cmd "$KUBECTL get pods -n goreplay -l app=goreplay" 77 | 78 | # Then retrieve just the names for further processing: 79 | echo 80 | echo "Getting goreplay pod names for telemetry collection..." 81 | pods=$($KUBECTL get pods -n goreplay -l app=goreplay -o name 2>/dev/null) || { 82 | echo "ERROR: Failed to list pods with label app=goreplay" 83 | exit 1 84 | } 85 | echo "Found pods:" 86 | echo "$pods" 87 | echo 88 | 89 | ######################################## 90 | # 4. For each pod, gather logs, describe, and get events 91 | ######################################## 92 | for pod in $pods; do 93 | # pod looks like "pod/goreplay-daemon-xyz" 94 | pod_name="${pod##*/}" # remove "pod/" prefix 95 | 96 | echo "==================================================" 97 | echo "LOGS for pod: ${pod_name}" 98 | echo "==================================================" 99 | run_cmd "$KUBECTL logs ${pod_name} -n goreplay" || { 100 | echo "WARNING: Failed to get logs for pod ${pod_name}" 101 | } 102 | 103 | echo 104 | echo "--------------------------------------------------" 105 | echo "DESCRIBE for pod: ${pod_name}" 106 | echo "--------------------------------------------------" 107 | run_cmd "$KUBECTL describe pod -n goreplay ${pod_name}" || { 108 | echo "WARNING: Failed to describe pod ${pod_name}" 109 | } 110 | 111 | echo 112 | echo "--------------------------------------------------" 113 | echo "EVENTS for pod: ${pod_name}" 114 | echo "--------------------------------------------------" 115 | run_cmd "$KUBECTL get events -n goreplay --field-selector involvedObject.name=${pod_name}" || { 116 | echo "WARNING: Failed to get events for pod ${pod_name}" 117 | } 118 | echo 119 | done 120 | 121 | echo "==================================================" 122 | echo "Telemetry collection complete." 123 | echo "==================================================" 124 | -------------------------------------------------------------------------------- /k8s/goreplay.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: goreplay-daemon 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: goreplay 9 | template: 10 | metadata: 11 | labels: 12 | app: goreplay 13 | spec: 14 | hostNetwork: true 15 | serviceAccountName: goreplay 16 | containers: 17 | - name: goreplay 18 | image: buger/gor:v2.0.0-rc4 19 | args: 20 | - "--input-raw k8s://deployments/nginx:80" 21 | - "--output-stdout" 22 | - "--verbose" 23 | -------------------------------------------------------------------------------- /k8s/nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx 20 | ports: 21 | - containerPort: 80 22 | 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: ngnix-service 28 | spec: 29 | selector: 30 | app: nginx 31 | type: NodePort 32 | ports: 33 | - protocol: TCP 34 | port: 80 35 | targetPort: 80 36 | -------------------------------------------------------------------------------- /k8s/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: goreplay-reader-binding 5 | subjects: 6 | - kind: ServiceAccount 7 | name: goreplay 8 | namespace: goreplay 9 | roleRef: 10 | kind: ClusterRole 11 | name: pod-reader 12 | apiGroup: "" 13 | -------------------------------------------------------------------------------- /limiter.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "math/rand" 7 | "strconv" 8 | "strings" 9 | "time" 10 | ) 11 | 12 | // Limiter is a wrapper for input or output plugin which adds rate limiting 13 | type Limiter struct { 14 | plugin interface{} 15 | limit int 16 | isPercent bool 17 | 18 | currentRPS int 19 | currentTime int64 20 | } 21 | 22 | func parseLimitOptions(options string) (limit int, isPercent bool) { 23 | if n := strings.Index(options, "%"); n > 0 { 24 | limit, _ = strconv.Atoi(options[:n]) 25 | isPercent = true 26 | } else { 27 | limit, _ = strconv.Atoi(options) 28 | isPercent = false 29 | } 30 | 31 | return 32 | } 33 | 34 | func newLimiterExceptions(l *Limiter) { 35 | 36 | if !l.isPercent { 37 | return 38 | } 39 | speedFactor := float64(l.limit) / float64(100) 40 | 41 | // FileInput、KafkaInput have its own rate limiting. Unlike other inputs we not just dropping requests, we can slow down or speed up request emittion. 42 | switch input := l.plugin.(type) { 43 | case *FileInput: 44 | input.speedFactor = speedFactor 45 | case *KafkaInput: 46 | input.speedFactor = speedFactor 47 | } 48 | } 49 | 50 | // NewLimiter constructor for Limiter, accepts plugin and options 51 | // `options` allow to sprcify relatve or absolute limiting 52 | func NewLimiter(plugin interface{}, options string) PluginReadWriter { 53 | l := new(Limiter) 54 | l.limit, l.isPercent = parseLimitOptions(options) 55 | l.plugin = plugin 56 | l.currentTime = time.Now().UnixNano() 57 | 58 | newLimiterExceptions(l) 59 | 60 | return l 61 | } 62 | 63 | func (l *Limiter) isLimitedExceptions() bool { 64 | if !l.isPercent { 65 | return false 66 | } 67 | // Fileinput、Kafkainput have its own limiting algorithm 68 | switch l.plugin.(type) { 69 | case *FileInput: 70 | return true 71 | case *KafkaInput: 72 | return true 73 | default: 74 | return false 75 | } 76 | } 77 | 78 | func (l *Limiter) isLimited() bool { 79 | if l.isLimitedExceptions() { 80 | return false 81 | } 82 | 83 | if l.isPercent { 84 | return l.limit <= rand.Intn(100) 85 | } 86 | 87 | if (time.Now().UnixNano() - l.currentTime) > time.Second.Nanoseconds() { 88 | l.currentTime = time.Now().UnixNano() 89 | l.currentRPS = 0 90 | } 91 | 92 | if l.currentRPS >= l.limit { 93 | return true 94 | } 95 | 96 | l.currentRPS++ 97 | 98 | return false 99 | } 100 | 101 | // PluginWrite writes message to this plugin 102 | func (l *Limiter) PluginWrite(msg *Message) (n int, err error) { 103 | if l.isLimited() { 104 | return 0, nil 105 | } 106 | if w, ok := l.plugin.(PluginWriter); ok { 107 | return w.PluginWrite(msg) 108 | } 109 | // avoid further writing 110 | return 0, io.ErrClosedPipe 111 | } 112 | 113 | // PluginRead reads message from this plugin 114 | func (l *Limiter) PluginRead() (msg *Message, err error) { 115 | if r, ok := l.plugin.(PluginReader); ok { 116 | msg, err = r.PluginRead() 117 | } else { 118 | // avoid further reading 119 | return nil, io.ErrClosedPipe 120 | } 121 | 122 | if l.isLimited() { 123 | return nil, nil 124 | } 125 | 126 | return 127 | } 128 | 129 | func (l *Limiter) String() string { 130 | return fmt.Sprintf("Limiting %s to: %d (isPercent: %v)", l.plugin, l.limit, l.isPercent) 131 | } 132 | 133 | // Close closes the resources. 134 | func (l *Limiter) Close() error { 135 | if fi, ok := l.plugin.(io.Closer); ok { 136 | fi.Close() 137 | } 138 | return nil 139 | } 140 | -------------------------------------------------------------------------------- /limiter_test.go: -------------------------------------------------------------------------------- 1 | //go:build !race 2 | 3 | package goreplay 4 | 5 | import ( 6 | "sync" 7 | "testing" 8 | ) 9 | 10 | func TestOutputLimiter(t *testing.T) { 11 | wg := new(sync.WaitGroup) 12 | 13 | input := NewTestInput() 14 | output := NewLimiter(NewTestOutput(func(*Message) { 15 | wg.Done() 16 | }), "10") 17 | wg.Add(10) 18 | 19 | plugins := &InOutPlugins{ 20 | Inputs: []PluginReader{input}, 21 | Outputs: []PluginWriter{output}, 22 | } 23 | plugins.All = append(plugins.All, input, output) 24 | 25 | emitter := NewEmitter() 26 | go emitter.Start(plugins, Settings.Middleware) 27 | 28 | for i := 0; i < 100; i++ { 29 | input.EmitGET() 30 | } 31 | 32 | wg.Wait() 33 | emitter.Close() 34 | } 35 | 36 | func TestInputLimiter(t *testing.T) { 37 | wg := new(sync.WaitGroup) 38 | 39 | input := NewLimiter(NewTestInput(), "10") 40 | output := NewTestOutput(func(*Message) { 41 | wg.Done() 42 | }) 43 | wg.Add(10) 44 | 45 | plugins := &InOutPlugins{ 46 | Inputs: []PluginReader{input}, 47 | Outputs: []PluginWriter{output}, 48 | } 49 | plugins.All = append(plugins.All, input, output) 50 | 51 | emitter := NewEmitter() 52 | go emitter.Start(plugins, Settings.Middleware) 53 | 54 | for i := 0; i < 100; i++ { 55 | input.(*Limiter).plugin.(*TestInput).EmitGET() 56 | } 57 | 58 | wg.Wait() 59 | emitter.Close() 60 | } 61 | 62 | // Should limit all requests 63 | func TestPercentLimiter1(t *testing.T) { 64 | wg := new(sync.WaitGroup) 65 | 66 | input := NewTestInput() 67 | output := NewLimiter(NewTestOutput(func(*Message) { 68 | wg.Done() 69 | }), "0%") 70 | 71 | plugins := &InOutPlugins{ 72 | Inputs: []PluginReader{input}, 73 | Outputs: []PluginWriter{output}, 74 | } 75 | plugins.All = append(plugins.All, input, output) 76 | 77 | emitter := NewEmitter() 78 | go emitter.Start(plugins, Settings.Middleware) 79 | 80 | for i := 0; i < 100; i++ { 81 | input.EmitGET() 82 | } 83 | 84 | wg.Wait() 85 | } 86 | 87 | // Should not limit at all 88 | func TestPercentLimiter2(t *testing.T) { 89 | wg := new(sync.WaitGroup) 90 | 91 | input := NewTestInput() 92 | output := NewLimiter(NewTestOutput(func(*Message) { 93 | wg.Done() 94 | }), "100%") 95 | wg.Add(100) 96 | 97 | plugins := &InOutPlugins{ 98 | Inputs: []PluginReader{input}, 99 | Outputs: []PluginWriter{output}, 100 | } 101 | plugins.All = append(plugins.All, input, output) 102 | 103 | emitter := NewEmitter() 104 | go emitter.Start(plugins, Settings.Middleware) 105 | 106 | for i := 0; i < 100; i++ { 107 | input.EmitGET() 108 | } 109 | 110 | wg.Wait() 111 | } 112 | -------------------------------------------------------------------------------- /middleware.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "encoding/hex" 7 | "fmt" 8 | "io" 9 | "os" 10 | "os/exec" 11 | "strings" 12 | "sync" 13 | "syscall" 14 | ) 15 | 16 | // Middleware represents a middleware object 17 | type Middleware struct { 18 | command string 19 | data chan *Message 20 | Stdin io.Writer 21 | Stdout io.Reader 22 | commandCancel context.CancelFunc 23 | stop chan bool // Channel used only to indicate goroutine should shutdown 24 | closed bool 25 | mu sync.RWMutex 26 | } 27 | 28 | // NewMiddleware returns new middleware 29 | func NewMiddleware(command string) *Middleware { 30 | m := new(Middleware) 31 | m.command = command 32 | m.data = make(chan *Message, 1000) 33 | m.stop = make(chan bool) 34 | 35 | commands := strings.Split(command, " ") 36 | ctx, cancl := context.WithCancel(context.Background()) 37 | m.commandCancel = cancl 38 | cmd := exec.CommandContext(ctx, commands[0], commands[1:]...) 39 | 40 | m.Stdout, _ = cmd.StdoutPipe() 41 | m.Stdin, _ = cmd.StdinPipe() 42 | 43 | cmd.Stderr = os.Stderr 44 | 45 | go m.read(m.Stdout) 46 | 47 | go func() { 48 | defer m.Close() 49 | var err error 50 | if err = cmd.Start(); err == nil { 51 | err = cmd.Wait() 52 | } 53 | if err != nil { 54 | if e, ok := err.(*exec.ExitError); ok { 55 | status := e.Sys().(syscall.WaitStatus) 56 | if status.Signal() == syscall.SIGKILL /*killed or context canceld */ { 57 | return 58 | } 59 | } 60 | Debug(0, fmt.Sprintf("[MIDDLEWARE] command[%q] error: %q", command, err.Error())) 61 | } 62 | }() 63 | 64 | return m 65 | } 66 | 67 | // ReadFrom start a worker to read from this plugin 68 | func (m *Middleware) ReadFrom(plugin PluginReader) { 69 | Debug(2, fmt.Sprintf("[MIDDLEWARE] command[%q] Starting reading from %q", m.command, plugin)) 70 | go m.copy(m.Stdin, plugin) 71 | } 72 | 73 | func (m *Middleware) copy(to io.Writer, from PluginReader) { 74 | var buf, dst []byte 75 | 76 | for { 77 | msg, err := from.PluginRead() 78 | if err != nil { 79 | return 80 | } 81 | if msg == nil || len(msg.Data) == 0 { 82 | continue 83 | } 84 | buf = msg.Data 85 | if Settings.PrettifyHTTP { 86 | buf = prettifyHTTP(msg.Data) 87 | } 88 | dstLen := (len(buf)+len(msg.Meta))*2 + 1 89 | // if enough space was previously allocated use it instead 90 | if dstLen > len(dst) { 91 | dst = make([]byte, dstLen) 92 | } 93 | n := hex.Encode(dst, msg.Meta) 94 | n += hex.Encode(dst[n:], buf) 95 | dst[n] = '\n' 96 | 97 | n, err = to.Write(dst[:n+1]) 98 | if err == nil { 99 | continue 100 | } 101 | if m.isClosed() { 102 | return 103 | } 104 | } 105 | } 106 | 107 | func (m *Middleware) read(from io.Reader) { 108 | reader := bufio.NewReader(from) 109 | var line []byte 110 | var e error 111 | for { 112 | if line, e = reader.ReadBytes('\n'); e != nil { 113 | if m.isClosed() { 114 | return 115 | } 116 | continue 117 | } 118 | buf := make([]byte, (len(line)-1)/2) 119 | if _, err := hex.Decode(buf, line[:len(line)-1]); err != nil { 120 | Debug(0, fmt.Sprintf("[MIDDLEWARE] command[%q] failed to decode err: %q", m.command, err)) 121 | continue 122 | } 123 | var msg Message 124 | msg.Meta, msg.Data = payloadMetaWithBody(buf) 125 | select { 126 | case <-m.stop: 127 | return 128 | case m.data <- &msg: 129 | } 130 | } 131 | 132 | } 133 | 134 | // PluginRead reads message from this plugin 135 | func (m *Middleware) PluginRead() (msg *Message, err error) { 136 | select { 137 | case <-m.stop: 138 | return nil, ErrorStopped 139 | case msg = <-m.data: 140 | } 141 | 142 | return 143 | } 144 | 145 | func (m *Middleware) String() string { 146 | return fmt.Sprintf("Modifying traffic using %q command", m.command) 147 | } 148 | 149 | func (m *Middleware) isClosed() bool { 150 | m.mu.RLock() 151 | defer m.mu.RUnlock() 152 | return m.closed 153 | } 154 | 155 | // Close closes this plugin 156 | func (m *Middleware) Close() error { 157 | if m.isClosed() { 158 | return nil 159 | } 160 | m.mu.Lock() 161 | defer m.mu.Unlock() 162 | m.commandCancel() 163 | close(m.stop) 164 | m.closed = true 165 | return nil 166 | } 167 | -------------------------------------------------------------------------------- /middleware/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "goreplay_middleware", 3 | "version": "1.0.0", 4 | "description": "Package for writing middleware for GoReplay https://goreplay.org", 5 | "main": "middleware.js", 6 | "scripts": { 7 | "test": "node -e \"var gor = require('./middleware.js'); gor.test(); process.exit()\"", 8 | "benchmark": "node -e \"var gor = require('./middleware.js'); gor.benchmark(); process.exit()\"" 9 | }, 10 | "keywords": [ 11 | "middleware", 12 | "goreplay" 13 | ], 14 | "author": "Leonid Bugaev", 15 | "license": "LGPL-3.0" 16 | } 17 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: My Docs 2 | theme: readthedocs 3 | -------------------------------------------------------------------------------- /nfpm.yaml: -------------------------------------------------------------------------------- 1 | # nfpm example config file 2 | # 3 | # check https://nfpm.goreleaser.com/configuration for detailed usage 4 | # 5 | name: "GoReplay" 6 | arch: ${PLATFORM} 7 | platform: "linux" 8 | version: ${VERSION} 9 | section: "default" 10 | priority: "extra" 11 | provides: 12 | - goreplay 13 | maintainer: "Leonid Bugaev " 14 | description: | 15 | GoReplay is the simplest and safest way to test your app using real traffic before you put it into production. 16 | vendor: "GoReplay" 17 | homepage: "https://goreplay.org" 18 | license: "AGPL" 19 | contents: 20 | - src: ./${BIN_NAME} 21 | dst: /usr/local/bin 22 | -------------------------------------------------------------------------------- /output_binary.go: -------------------------------------------------------------------------------- 1 | //go:build !pro 2 | 3 | package goreplay 4 | 5 | import ( 6 | "errors" 7 | "time" 8 | 9 | "github.com/buger/goreplay/internal/size" 10 | ) 11 | 12 | var _ PluginWriter = (*BinaryOutput)(nil) 13 | 14 | // BinaryOutputConfig struct for holding binary output configuration 15 | type BinaryOutputConfig struct { 16 | Workers int `json:"output-binary-workers"` 17 | Timeout time.Duration `json:"output-binary-timeout"` 18 | BufferSize size.Size `json:"output-tcp-response-buffer"` 19 | Debug bool `json:"output-binary-debug"` 20 | TrackResponses bool `json:"output-binary-track-response"` 21 | } 22 | 23 | // BinaryOutput plugin manage pool of workers which send request to replayed server 24 | // By default workers pool is dynamic and starts with 10 workers 25 | // You can specify fixed number of workers using `--output-tcp-workers` 26 | type BinaryOutput struct { 27 | address string 28 | } 29 | 30 | // NewBinaryOutput constructor for BinaryOutput 31 | // Initialize workers 32 | func NewBinaryOutput(address string, config *BinaryOutputConfig) PluginReadWriter { 33 | return &BinaryOutput{address: address} 34 | } 35 | 36 | // PluginWrite writes a message to this plugin 37 | func (o *BinaryOutput) PluginWrite(msg *Message) (n int, err error) { 38 | return 0, errors.New("binary output is only available in PRO version") 39 | } 40 | 41 | // PluginRead reads a message from this plugin 42 | func (o *BinaryOutput) PluginRead() (*Message, error) { 43 | return nil, errors.New("binary output is only available in PRO version") 44 | } 45 | 46 | func (o *BinaryOutput) String() string { 47 | return "Binary output: " + o.address + " (PRO version required)" 48 | } 49 | 50 | // Close closes this plugin for reading 51 | func (o *BinaryOutput) Close() error { 52 | return nil 53 | } 54 | -------------------------------------------------------------------------------- /output_dummy.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "os" 5 | ) 6 | 7 | // DummyOutput used for debugging, prints all incoming requests 8 | type DummyOutput struct { 9 | } 10 | 11 | // NewDummyOutput constructor for DummyOutput 12 | func NewDummyOutput() (di *DummyOutput) { 13 | di = new(DummyOutput) 14 | 15 | return 16 | } 17 | 18 | // PluginWrite writes message to this plugin 19 | func (i *DummyOutput) PluginWrite(msg *Message) (int, error) { 20 | var n, nn int 21 | var err error 22 | n, err = os.Stdout.Write(msg.Meta) 23 | nn, err = os.Stdout.Write(msg.Data) 24 | n += nn 25 | nn, err = os.Stdout.Write(payloadSeparatorAsBytes) 26 | n += nn 27 | 28 | return n, err 29 | } 30 | 31 | func (i *DummyOutput) String() string { 32 | return "Dummy Output" 33 | } 34 | -------------------------------------------------------------------------------- /output_kafka.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "encoding/json" 5 | "github.com/buger/goreplay/internal/byteutils" 6 | "github.com/buger/goreplay/proto" 7 | "log" 8 | "strings" 9 | "time" 10 | 11 | "github.com/Shopify/sarama" 12 | "github.com/Shopify/sarama/mocks" 13 | ) 14 | 15 | // KafkaOutput is used for sending payloads to kafka in JSON format. 16 | type KafkaOutput struct { 17 | config *OutputKafkaConfig 18 | producer sarama.AsyncProducer 19 | } 20 | 21 | // KafkaOutputFrequency in milliseconds 22 | const KafkaOutputFrequency = 500 23 | 24 | // NewKafkaOutput creates instance of kafka producer client with TLS config 25 | func NewKafkaOutput(_ string, config *OutputKafkaConfig, tlsConfig *KafkaTLSConfig) PluginWriter { 26 | c := NewKafkaConfig(&config.SASLConfig, tlsConfig) 27 | 28 | var producer sarama.AsyncProducer 29 | 30 | if mock, ok := config.producer.(*mocks.AsyncProducer); ok && mock != nil { 31 | producer = config.producer 32 | } else { 33 | c.Producer.RequiredAcks = sarama.WaitForLocal 34 | c.Producer.Compression = sarama.CompressionSnappy 35 | c.Producer.Flush.Frequency = KafkaOutputFrequency * time.Millisecond 36 | 37 | brokerList := strings.Split(config.Host, ",") 38 | 39 | var err error 40 | producer, err = sarama.NewAsyncProducer(brokerList, c) 41 | if err != nil { 42 | log.Fatalln("Failed to start Sarama(Kafka) producer:", err) 43 | } 44 | } 45 | 46 | o := &KafkaOutput{ 47 | config: config, 48 | producer: producer, 49 | } 50 | 51 | // Start infinite loop for tracking errors for kafka producer. 52 | go o.ErrorHandler() 53 | 54 | return o 55 | } 56 | 57 | // ErrorHandler should receive errors 58 | func (o *KafkaOutput) ErrorHandler() { 59 | for err := range o.producer.Errors() { 60 | Debug(1, "Failed to write access log entry:", err) 61 | } 62 | } 63 | 64 | // PluginWrite writes a message to this plugin 65 | func (o *KafkaOutput) PluginWrite(msg *Message) (n int, err error) { 66 | var message sarama.StringEncoder 67 | 68 | if !o.config.UseJSON { 69 | message = sarama.StringEncoder(byteutils.SliceToString(msg.Meta) + byteutils.SliceToString(msg.Data)) 70 | } else { 71 | mimeHeader := proto.ParseHeaders(msg.Data) 72 | header := make(map[string]string) 73 | for k, v := range mimeHeader { 74 | header[k] = strings.Join(v, ", ") 75 | } 76 | 77 | meta := payloadMeta(msg.Meta) 78 | req := msg.Data 79 | 80 | kafkaMessage := KafkaMessage{ 81 | ReqURL: byteutils.SliceToString(proto.Path(req)), 82 | ReqType: byteutils.SliceToString(meta[0]), 83 | ReqID: byteutils.SliceToString(meta[1]), 84 | ReqTs: byteutils.SliceToString(meta[2]), 85 | ReqMethod: byteutils.SliceToString(proto.Method(req)), 86 | ReqBody: byteutils.SliceToString(proto.Body(req)), 87 | ReqHeaders: header, 88 | } 89 | jsonMessage, _ := json.Marshal(&kafkaMessage) 90 | message = sarama.StringEncoder(byteutils.SliceToString(jsonMessage)) 91 | } 92 | 93 | o.producer.Input() <- &sarama.ProducerMessage{ 94 | Topic: o.config.Topic, 95 | Value: message, 96 | } 97 | 98 | return len(message), nil 99 | } 100 | -------------------------------------------------------------------------------- /output_kafka_test.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/Shopify/sarama" 7 | "github.com/Shopify/sarama/mocks" 8 | ) 9 | 10 | func TestOutputKafkaRAW(t *testing.T) { 11 | config := sarama.NewConfig() 12 | config.Producer.Return.Successes = true 13 | producer := mocks.NewAsyncProducer(t, config) 14 | producer.ExpectInputAndSucceed() 15 | 16 | output := NewKafkaOutput("", &OutputKafkaConfig{ 17 | producer: producer, 18 | Topic: "test", 19 | UseJSON: false, 20 | }, nil) 21 | 22 | output.PluginWrite(&Message{Meta: []byte("1 2 3\n"), Data: []byte("GET / HTTP1.1\r\nHeader: 1\r\n\r\n")}) 23 | 24 | resp := <-producer.Successes() 25 | 26 | data, _ := resp.Value.Encode() 27 | 28 | if string(data) != "1 2 3\nGET / HTTP1.1\r\nHeader: 1\r\n\r\n" { 29 | t.Errorf("Message not properly encoded: %q", data) 30 | } 31 | } 32 | 33 | func TestOutputKafkaJSON(t *testing.T) { 34 | config := sarama.NewConfig() 35 | config.Producer.Return.Successes = true 36 | producer := mocks.NewAsyncProducer(t, config) 37 | producer.ExpectInputAndSucceed() 38 | 39 | output := NewKafkaOutput("", &OutputKafkaConfig{ 40 | producer: producer, 41 | Topic: "test", 42 | UseJSON: true, 43 | }, nil) 44 | 45 | output.PluginWrite(&Message{Meta: []byte("1 2 3\n"), Data: []byte("GET / HTTP1.1\r\nHeader: 1\r\n\r\n")}) 46 | 47 | resp := <-producer.Successes() 48 | 49 | data, _ := resp.Value.Encode() 50 | 51 | if string(data) != `{"Req_URL":"","Req_Type":"1","Req_ID":"2","Req_Ts":"3","Req_Method":"GET"}` { 52 | t.Error("Message not properly encoded: ", string(data)) 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /output_null.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | // NullOutput used for debugging, prints nothing 4 | type NullOutput struct { 5 | } 6 | 7 | // NewNullOutput constructor for NullOutput 8 | func NewNullOutput() (o *NullOutput) { 9 | return new(NullOutput) 10 | } 11 | 12 | // PluginWrite writes message to this plugin 13 | func (o *NullOutput) PluginWrite(msg *Message) (int, error) { 14 | return len(msg.Data) + len(msg.Meta), nil 15 | } 16 | 17 | func (o *NullOutput) String() string { 18 | return "Null Output" 19 | } 20 | -------------------------------------------------------------------------------- /output_s3.go: -------------------------------------------------------------------------------- 1 | //go:build !pro 2 | 3 | package goreplay 4 | 5 | import ( 6 | "errors" 7 | "fmt" 8 | ) 9 | 10 | // S3Output output plugin 11 | type S3Output struct{} 12 | 13 | // NewS3Output constructor for FileOutput, accepts path 14 | func NewS3Output(pathTemplate string, config *FileOutputConfig) *S3Output { 15 | fmt.Println("S3 output is only available in the pro version") 16 | return &S3Output{} 17 | } 18 | 19 | func (o *S3Output) PluginWrite(msg *Message) (n int, err error) { 20 | return 0, errors.New("S3 output is only available in the pro version") 21 | } 22 | 23 | func (o *S3Output) String() string { 24 | return "S3 output (pro version only)" 25 | } 26 | 27 | func (o *S3Output) Close() error { 28 | return errors.New("S3 output is only available in the pro version") 29 | } 30 | -------------------------------------------------------------------------------- /output_s3_pro.go: -------------------------------------------------------------------------------- 1 | //go:build pro 2 | 3 | package goreplay 4 | 5 | import ( 6 | _ "bufio" 7 | "fmt" 8 | _ "io" 9 | "log" 10 | "math/rand" 11 | "os" 12 | "path/filepath" 13 | "strings" 14 | 15 | "github.com/aws/aws-sdk-go/aws" 16 | "github.com/aws/aws-sdk-go/aws/session" 17 | "github.com/aws/aws-sdk-go/service/s3" 18 | _ "github.com/aws/aws-sdk-go/service/s3/s3manager" 19 | ) 20 | 21 | var _ PluginWriter = (*S3Output)(nil) 22 | 23 | // S3Output output plugin 24 | type S3Output struct { 25 | pathTemplate string 26 | 27 | buffer *FileOutput 28 | session *session.Session 29 | config *FileOutputConfig 30 | closeC chan struct{} 31 | } 32 | 33 | // NewS3Output constructor for FileOutput, accepts path 34 | func NewS3Output(pathTemplate string, config *FileOutputConfig) *S3Output { 35 | o := new(S3Output) 36 | o.pathTemplate = pathTemplate 37 | o.config = config 38 | o.config.onClose = o.onBufferUpdate 39 | 40 | if config.BufferPath == "" { 41 | config.BufferPath = "/tmp" 42 | } 43 | 44 | rnd := rand.Int63() 45 | buffer_name := fmt.Sprintf("gor_output_s3_%d_buf_", rnd) 46 | 47 | pathParts := strings.Split(pathTemplate, "/") 48 | buffer_name += pathParts[len(pathParts)-1] 49 | 50 | if strings.HasSuffix(o.pathTemplate, ".gz") { 51 | buffer_name += ".gz" 52 | } 53 | 54 | buffer_path := filepath.Join(config.BufferPath, buffer_name) 55 | 56 | o.buffer = NewFileOutput(buffer_path, config) 57 | o.connect() 58 | 59 | return o 60 | } 61 | 62 | func (o *S3Output) connect() { 63 | if o.session == nil { 64 | o.session = session.Must(session.NewSession(awsConfig())) 65 | log.Println("[S3 Output] S3 connection succesfully initialized") 66 | } 67 | } 68 | 69 | func (o *S3Output) PluginWrite(msg *Message) (n int, err error) { 70 | return o.buffer.PluginWrite(msg) 71 | } 72 | 73 | func (o *S3Output) String() string { 74 | return "S3 output: " + o.pathTemplate 75 | } 76 | 77 | func (o *S3Output) Close() error { 78 | return o.buffer.Close() 79 | } 80 | 81 | func (o *S3Output) keyPath(idx int) (bucket, key string) { 82 | bucket, key = parseS3Url(o.pathTemplate) 83 | 84 | for name, fn := range dateFileNameFuncs { 85 | key = strings.Replace(key, name, fn(o.buffer), -1) 86 | } 87 | 88 | key = setFileIndex(key, idx) 89 | 90 | return 91 | } 92 | 93 | func (o *S3Output) onBufferUpdate(path string) { 94 | svc := s3.New(o.session) 95 | idx := getFileIndex(path) 96 | bucket, key := o.keyPath(idx) 97 | 98 | file, _ := os.Open(path) 99 | // reader := bufio.NewReader(file) 100 | 101 | _, err := svc.PutObject(&s3.PutObjectInput{ 102 | Body: file, 103 | Bucket: aws.String(bucket), 104 | Key: aws.String(key), 105 | }) 106 | if err != nil { 107 | log.Printf("[S3 Output] Failed to upload data to %s/%s, %s\n", bucket, key, err) 108 | os.Remove(path) 109 | return 110 | } 111 | 112 | os.Remove(path) 113 | 114 | if o.closeC != nil { 115 | o.closeC <- struct{}{} 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /output_tcp.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "fmt" 7 | "hash/fnv" 8 | "net" 9 | "time" 10 | ) 11 | 12 | // TCPOutput used for sending raw tcp payloads 13 | // Currently used for internal communication between listener and replay server 14 | // Can be used for transferring binary payloads like protocol buffers 15 | type TCPOutput struct { 16 | address string 17 | limit int 18 | buf []chan *Message 19 | bufStats *GorStat 20 | config *TCPOutputConfig 21 | workerIndex uint32 22 | 23 | close bool 24 | } 25 | 26 | // TCPOutputConfig tcp output configuration 27 | type TCPOutputConfig struct { 28 | Secure bool `json:"output-tcp-secure"` 29 | Sticky bool `json:"output-tcp-sticky"` 30 | SkipVerify bool `json:"output-tcp-skip-verify"` 31 | Workers int `json:"output-tcp-workers"` 32 | 33 | GetInitMessage func() *Message `json:"-"` 34 | WriteBeforeMessage func(conn net.Conn, msg *Message) error `json:"-"` 35 | } 36 | 37 | // NewTCPOutput constructor for TCPOutput 38 | // Initialize X workers which hold keep-alive connection 39 | func NewTCPOutput(address string, config *TCPOutputConfig) PluginWriter { 40 | o := new(TCPOutput) 41 | 42 | o.address = address 43 | o.config = config 44 | 45 | if Settings.OutputTCPStats { 46 | o.bufStats = NewGorStat("output_tcp", 5000) 47 | } 48 | 49 | // create X buffers and send the buffer index to the worker 50 | o.buf = make([]chan *Message, o.config.Workers) 51 | for i := 0; i < o.config.Workers; i++ { 52 | o.buf[i] = make(chan *Message, 100) 53 | go o.worker(i) 54 | } 55 | 56 | return o 57 | } 58 | 59 | func (o *TCPOutput) worker(bufferIndex int) { 60 | retries := 0 61 | conn, err := o.connect(o.address) 62 | for { 63 | if o.close { 64 | return 65 | } 66 | 67 | if err == nil { 68 | break 69 | } 70 | 71 | Debug(1, fmt.Sprintf("Can't connect to aggregator instance, reconnecting in 1 second. Retries:%d", retries)) 72 | time.Sleep(1 * time.Second) 73 | 74 | conn, err = o.connect(o.address) 75 | retries++ 76 | } 77 | 78 | if retries > 0 { 79 | Debug(2, fmt.Sprintf("Connected to aggregator instance after %d retries", retries)) 80 | } 81 | 82 | defer conn.Close() 83 | 84 | if o.config.GetInitMessage != nil { 85 | msg := o.config.GetInitMessage() 86 | _ = o.writeToConnection(conn, msg) 87 | } 88 | 89 | for { 90 | msg := <-o.buf[bufferIndex] 91 | err = o.writeToConnection(conn, msg) 92 | if err != nil { 93 | Debug(2, "INFO: TCP output connection closed, reconnecting") 94 | go o.worker(bufferIndex) 95 | o.buf[bufferIndex] <- msg 96 | break 97 | } 98 | } 99 | } 100 | 101 | func (o *TCPOutput) writeToConnection(conn net.Conn, msg *Message) (err error) { 102 | if o.config.WriteBeforeMessage != nil { 103 | err = o.config.WriteBeforeMessage(conn, msg) 104 | } 105 | 106 | if err == nil { 107 | if _, err = conn.Write(msg.Meta); err == nil { 108 | if _, err = conn.Write(msg.Data); err == nil { 109 | _, err = conn.Write(payloadSeparatorAsBytes) 110 | } 111 | } 112 | } 113 | 114 | return err 115 | } 116 | 117 | func (o *TCPOutput) getBufferIndex(msg *Message) int { 118 | if !o.config.Sticky { 119 | o.workerIndex++ 120 | return int(o.workerIndex) % o.config.Workers 121 | } 122 | 123 | hasher := fnv.New32a() 124 | hasher.Write(payloadID(msg.Meta)) 125 | return int(hasher.Sum32()) % o.config.Workers 126 | } 127 | 128 | // PluginWrite writes message to this plugin 129 | func (o *TCPOutput) PluginWrite(msg *Message) (n int, err error) { 130 | if !isOriginPayload(msg.Meta) { 131 | return len(msg.Data), nil 132 | } 133 | 134 | bufferIndex := o.getBufferIndex(msg) 135 | o.buf[bufferIndex] <- msg 136 | 137 | if Settings.OutputTCPStats { 138 | o.bufStats.Write(len(o.buf[bufferIndex])) 139 | } 140 | 141 | return len(msg.Data) + len(msg.Meta), nil 142 | } 143 | 144 | func (o *TCPOutput) connect(address string) (conn net.Conn, err error) { 145 | if o.config.Secure { 146 | var d tls.Dialer 147 | d.Config = &tls.Config{InsecureSkipVerify: o.config.SkipVerify} 148 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 149 | defer cancel() 150 | conn, err = d.DialContext(ctx, "tcp", address) 151 | } else { 152 | var d net.Dialer 153 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 154 | defer cancel() 155 | conn, err = d.DialContext(ctx, "tcp", address) 156 | } 157 | 158 | return 159 | } 160 | 161 | func (o *TCPOutput) String() string { 162 | return fmt.Sprintf("TCP output %s, limit: %d", o.address, o.limit) 163 | } 164 | 165 | func (o *TCPOutput) Close() { 166 | o.close = true 167 | } 168 | -------------------------------------------------------------------------------- /output_ws.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "encoding/base64" 7 | "fmt" 8 | "hash/fnv" 9 | "log" 10 | "net/http" 11 | "net/url" 12 | "strings" 13 | "time" 14 | 15 | "github.com/gorilla/websocket" 16 | ) 17 | 18 | // WebSocketOutput used for sending raw tcp payloads 19 | // Can be used for transferring binary payloads like protocol buffers 20 | type WebSocketOutput struct { 21 | address string 22 | limit int 23 | buf []chan *Message 24 | bufStats *GorStat 25 | config *WebSocketOutputConfig 26 | workerIndex uint32 27 | headers http.Header 28 | 29 | close bool 30 | } 31 | 32 | // WebSocketOutputConfig WebSocket output configuration 33 | type WebSocketOutputConfig struct { 34 | Sticky bool `json:"output-ws-sticky"` 35 | SkipVerify bool `json:"output-ws-skip-verify"` 36 | Workers int `json:"output-ws-workers"` 37 | 38 | Headers map[string][]string `json:"output-ws-headers"` 39 | } 40 | 41 | // NewWebSocketOutput constructor for WebSocketOutput 42 | // Initialize X workers which hold keep-alive connection 43 | func NewWebSocketOutput(address string, config *WebSocketOutputConfig) PluginWriter { 44 | o := new(WebSocketOutput) 45 | 46 | u, err := url.Parse(address) 47 | if err != nil { 48 | log.Fatal(fmt.Sprintf("[OUTPUT-WS] parse WS output URL error[%q]", err)) 49 | } 50 | 51 | o.config = config 52 | o.headers = http.Header{ 53 | "Authorization": []string{"Basic " + base64.StdEncoding.EncodeToString([]byte(u.User.String()))}, 54 | } 55 | for k, values := range config.Headers { 56 | for _, v := range values { 57 | o.headers.Add(k, v) 58 | } 59 | } 60 | 61 | u.User = nil // must be after creating the headers 62 | o.address = u.String() 63 | 64 | if Settings.OutputWebSocketStats { 65 | o.bufStats = NewGorStat("output_ws", 5000) 66 | } 67 | 68 | // create X buffers and send the buffer index to the worker 69 | o.buf = make([]chan *Message, o.config.Workers) 70 | for i := 0; i < o.config.Workers; i++ { 71 | o.buf[i] = make(chan *Message, 100) 72 | go o.worker(i) 73 | } 74 | 75 | return o 76 | } 77 | 78 | func (o *WebSocketOutput) worker(bufferIndex int) { 79 | retries := 0 80 | conn, err := o.connect(o.address) 81 | for { 82 | if o.close { 83 | return 84 | } 85 | 86 | if err == nil { 87 | break 88 | } 89 | 90 | Debug(1, fmt.Sprintf("Can't connect to aggregator instance, reconnecting in 1 second. Retries:%d", retries)) 91 | time.Sleep(1 * time.Second) 92 | 93 | conn, err = o.connect(o.address) 94 | retries++ 95 | } 96 | 97 | if retries > 0 { 98 | Debug(2, fmt.Sprintf("Connected to aggregator instance after %d retries", retries)) 99 | } 100 | 101 | defer conn.Close() 102 | 103 | for { 104 | msg := <-o.buf[bufferIndex] 105 | err = conn.WriteMessage(websocket.BinaryMessage, append(msg.Meta, msg.Data...)) 106 | if err != nil { 107 | Debug(2, "INFO: WebSocket output connection closed, reconnecting "+err.Error()) 108 | go o.worker(bufferIndex) 109 | o.buf[bufferIndex] <- msg 110 | break 111 | } 112 | } 113 | } 114 | 115 | func (o *WebSocketOutput) getBufferIndex(msg *Message) int { 116 | if !o.config.Sticky { 117 | o.workerIndex++ 118 | return int(o.workerIndex) % o.config.Workers 119 | } 120 | 121 | hasher := fnv.New32a() 122 | hasher.Write(payloadID(msg.Meta)) 123 | return int(hasher.Sum32()) % o.config.Workers 124 | } 125 | 126 | // PluginWrite writes message to this plugin 127 | func (o *WebSocketOutput) PluginWrite(msg *Message) (n int, err error) { 128 | if !isOriginPayload(msg.Meta) { 129 | return len(msg.Data), nil 130 | } 131 | 132 | bufferIndex := o.getBufferIndex(msg) 133 | o.buf[bufferIndex] <- msg 134 | 135 | if Settings.OutputTCPStats { 136 | o.bufStats.Write(len(o.buf[bufferIndex])) 137 | } 138 | 139 | return len(msg.Data) + len(msg.Meta), nil 140 | } 141 | 142 | func (o *WebSocketOutput) connect(address string) (conn *websocket.Conn, err error) { 143 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 144 | defer cancel() 145 | 146 | d := websocket.DefaultDialer 147 | if strings.HasPrefix(address, "wss://") { 148 | d.TLSClientConfig = &tls.Config{InsecureSkipVerify: o.config.SkipVerify} 149 | } 150 | 151 | conn, _, err = d.DialContext(ctx, address, o.headers) 152 | return 153 | } 154 | 155 | func (o *WebSocketOutput) String() string { 156 | return fmt.Sprintf("WebSocket output %s, limit: %d", o.address, o.limit) 157 | } 158 | 159 | // Close closes the output 160 | func (o *WebSocketOutput) Close() { 161 | o.close = true 162 | } 163 | -------------------------------------------------------------------------------- /output_ws_test.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "log" 5 | "net/http" 6 | "sync" 7 | "testing" 8 | 9 | "github.com/gorilla/websocket" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestWebSocketOutput(t *testing.T) { 14 | wg := new(sync.WaitGroup) 15 | 16 | var gotHeader http.Header 17 | wsAddr := startWebsocket(func(data []byte) { 18 | wg.Done() 19 | }, func(header http.Header) { 20 | gotHeader = header 21 | }) 22 | input := NewTestInput() 23 | headers := map[string][]string{ 24 | "key1": {"value1"}, 25 | "key2": {"value2"}, 26 | } 27 | output := NewWebSocketOutput(wsAddr, &WebSocketOutputConfig{Workers: 1, Headers: headers}) 28 | 29 | plugins := &InOutPlugins{ 30 | Inputs: []PluginReader{input}, 31 | Outputs: []PluginWriter{output}, 32 | } 33 | 34 | emitter := NewEmitter() 35 | go emitter.Start(plugins, Settings.Middleware) 36 | 37 | for i := 0; i < 10; i++ { 38 | wg.Add(1) 39 | input.EmitGET() 40 | } 41 | 42 | wg.Wait() 43 | emitter.Close() 44 | 45 | if assert.NotNil(t, gotHeader) { 46 | assert.Equal(t, "Basic dXNlcjE=", gotHeader.Get("Authorization")) 47 | for k, values := range headers { 48 | assert.Equal(t, 1, len(values)) 49 | assert.Equal(t, values[0], gotHeader.Get(k)) 50 | } 51 | } 52 | } 53 | 54 | func startWebsocket(cb func([]byte), headercb func(http.Header)) string { 55 | upgrader := websocket.Upgrader{} 56 | 57 | http.HandleFunc("/test", func(w http.ResponseWriter, r *http.Request) { 58 | headercb(r.Header) 59 | c, err := upgrader.Upgrade(w, r, nil) 60 | if err != nil { 61 | log.Print("upgrade:", err) 62 | return 63 | } 64 | 65 | go func(conn *websocket.Conn) { 66 | defer conn.Close() 67 | for { 68 | _, msg, _ := conn.ReadMessage() 69 | cb(msg) 70 | } 71 | }(c) 72 | }) 73 | 74 | go func() { 75 | err := http.ListenAndServe("localhost:8081", nil) 76 | if err != nil { 77 | log.Fatal("Can't start:", err) 78 | } 79 | }() 80 | 81 | return "ws://user1@localhost:8081/test" 82 | } 83 | -------------------------------------------------------------------------------- /plugins_test.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestPluginsRegistration(t *testing.T) { 8 | Settings.InputDummy = []string{"[]"} 9 | Settings.OutputDummy = []string{"[]"} 10 | Settings.OutputHTTP = []string{"www.example.com|10"} 11 | Settings.InputFile = []string{"/dev/null"} 12 | 13 | plugins := NewPlugins() 14 | 15 | if len(plugins.Inputs) != 3 { 16 | t.Errorf("Should be 3 inputs got %d", len(plugins.Inputs)) 17 | } 18 | 19 | if _, ok := plugins.Inputs[0].(*DummyInput); !ok { 20 | t.Errorf("First input should be DummyInput") 21 | } 22 | 23 | if _, ok := plugins.Inputs[1].(*FileInput); !ok { 24 | t.Errorf("Second input should be FileInput") 25 | } 26 | 27 | if len(plugins.Outputs) != 2 { 28 | t.Errorf("Should be 2 output %d", len(plugins.Outputs)) 29 | } 30 | 31 | if _, ok := plugins.Outputs[0].(*DummyOutput); !ok { 32 | t.Errorf("First output should be DummyOutput") 33 | } 34 | 35 | if l, ok := plugins.Outputs[1].(*Limiter); ok { 36 | if _, ok := l.plugin.(*HTTPOutput); !ok { 37 | t.Errorf("HTTPOutput should be wrapped in limiter") 38 | } 39 | } else { 40 | t.Errorf("Second output should be Limiter") 41 | } 42 | 43 | } 44 | -------------------------------------------------------------------------------- /pro.go: -------------------------------------------------------------------------------- 1 | //go:build pro 2 | 3 | package goreplay 4 | 5 | // PRO this value indicates if goreplay is running in PRO mode.. 6 | // it must not be modified explicitly in production 7 | var PRO = true 8 | 9 | // SettingsHook is intentionally left as a no-op 10 | var SettingsHook = func(*AppSettings) {} 11 | -------------------------------------------------------------------------------- /proto/fuzz.go: -------------------------------------------------------------------------------- 1 | //go:build gofuzz 2 | 3 | package proto 4 | 5 | func Fuzz(data []byte) int { 6 | 7 | ParseHeaders(data) 8 | 9 | return 1 10 | } 11 | -------------------------------------------------------------------------------- /protocol.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "bytes" 5 | "crypto/rand" 6 | "encoding/hex" 7 | "fmt" 8 | ) 9 | 10 | // These constants help to indicate the type of payload 11 | const ( 12 | RequestPayload = '1' 13 | ResponsePayload = '2' 14 | ReplayedResponsePayload = '3' 15 | ) 16 | 17 | func randByte(len int) []byte { 18 | b := make([]byte, len/2) 19 | rand.Read(b) 20 | 21 | h := make([]byte, len) 22 | hex.Encode(h, b) 23 | 24 | return h 25 | } 26 | 27 | func uuid() []byte { 28 | return randByte(24) 29 | } 30 | 31 | var payloadSeparator = "\n🐵🙈🙉\n" 32 | 33 | func payloadScanner(data []byte, atEOF bool) (advance int, token []byte, err error) { 34 | if atEOF && len(data) == 0 { 35 | return 0, nil, nil 36 | } 37 | 38 | if i := bytes.Index(data, []byte(payloadSeparator)); i >= 0 { 39 | // We have a full newline-terminated line. 40 | return i + len([]byte(payloadSeparator)), data[0:i], nil 41 | } 42 | 43 | if atEOF { 44 | return len(data), data, nil 45 | } 46 | return 0, nil, nil 47 | } 48 | 49 | // Timing is request start or round-trip time, depending on payloadType 50 | func payloadHeader(payloadType byte, uuid []byte, timing int64, latency int64) (header []byte) { 51 | //Example: 52 | // 3 f45590522cd1838b4a0d5c5aab80b77929dea3b3 13923489726487326 1231\n 53 | return []byte(fmt.Sprintf("%c %s %d %d\n", payloadType, uuid, timing, latency)) 54 | } 55 | 56 | func payloadBody(payload []byte) []byte { 57 | headerSize := bytes.IndexByte(payload, '\n') 58 | return payload[headerSize+1:] 59 | } 60 | 61 | func payloadMeta(payload []byte) [][]byte { 62 | headerSize := bytes.IndexByte(payload, '\n') 63 | if headerSize < 0 { 64 | return nil 65 | } 66 | return bytes.Split(payload[:headerSize], []byte{' '}) 67 | } 68 | 69 | func payloadMetaWithBody(payload []byte) (meta, body []byte) { 70 | if i := bytes.IndexByte(payload, '\n'); i > 0 && len(payload) > i+1 { 71 | meta = payload[:i+1] 72 | body = payload[i+1:] 73 | return 74 | } 75 | // we assume the message did not have meta data 76 | return nil, payload 77 | } 78 | 79 | func payloadID(payload []byte) (id []byte) { 80 | meta := payloadMeta(payload) 81 | 82 | if len(meta) < 2 { 83 | return 84 | } 85 | return meta[1] 86 | } 87 | 88 | func isOriginPayload(payload []byte) bool { 89 | return payload[0] == RequestPayload || payload[0] == ResponsePayload 90 | } 91 | 92 | func isRequestPayload(payload []byte) bool { 93 | return payload[0] == RequestPayload 94 | } 95 | -------------------------------------------------------------------------------- /s3_reader.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "bytes" 5 | "log" 6 | "os" 7 | "strconv" 8 | "strings" 9 | 10 | "github.com/aws/aws-sdk-go/aws" 11 | "github.com/aws/aws-sdk-go/aws/session" 12 | "github.com/aws/aws-sdk-go/service/s3" 13 | ) 14 | 15 | // S3ReadCloser ... 16 | type S3ReadCloser struct { 17 | bucket string 18 | key string 19 | offset int 20 | totalSize int 21 | readBytes int 22 | sess *session.Session 23 | buf *bytes.Buffer 24 | } 25 | 26 | func awsConfig() *aws.Config { 27 | region := os.Getenv("AWS_DEFAULT_REGION") 28 | if region == "" { 29 | region = os.Getenv("AWS_REGION") 30 | if region == "" { 31 | region = "us-east-1" 32 | } 33 | } 34 | 35 | config := &aws.Config{Region: aws.String(region)} 36 | 37 | if endpoint := os.Getenv("AWS_ENDPOINT_URL"); endpoint != "" { 38 | config.Endpoint = aws.String(endpoint) 39 | log.Println("Custom endpoint:", endpoint) 40 | } 41 | 42 | log.Println("Connecting to S3. Region: " + region) 43 | 44 | config.CredentialsChainVerboseErrors = aws.Bool(true) 45 | 46 | if os.Getenv("AWS_DEBUG") != "" { 47 | config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) 48 | } 49 | 50 | return config 51 | } 52 | 53 | // NewS3ReadCloser returns new instance of S3 read closer 54 | func NewS3ReadCloser(path string) *S3ReadCloser { 55 | if !PRO { 56 | log.Fatal("Using S3 input and output require PRO license") 57 | return nil 58 | } 59 | 60 | bucket, key := parseS3Url(path) 61 | sess := session.Must(session.NewSession(awsConfig())) 62 | 63 | log.Println("[S3 Input] S3 connection successfully initialized", path) 64 | 65 | return &S3ReadCloser{ 66 | bucket: bucket, 67 | key: key, 68 | sess: sess, 69 | buf: &bytes.Buffer{}, 70 | } 71 | } 72 | 73 | // Read reads buffer from s3 session 74 | func (s *S3ReadCloser) Read(b []byte) (n int, e error) { 75 | if s.readBytes == 0 || s.readBytes+len(b) > s.offset { 76 | svc := s3.New(s.sess) 77 | 78 | objectRange := "bytes=" + strconv.Itoa(s.offset) 79 | s.offset += 1000000 // Reading in chunks of 1 mb 80 | objectRange += "-" + strconv.Itoa(s.offset-1) 81 | 82 | params := &s3.GetObjectInput{ 83 | Bucket: aws.String(s.bucket), 84 | Key: aws.String(s.key), 85 | Range: aws.String(objectRange), 86 | } 87 | resp, err := svc.GetObject(params) 88 | 89 | if err != nil { 90 | log.Println("[S3 Input] Error during getting file", s.bucket, s.key, err) 91 | } else { 92 | s.totalSize, _ = strconv.Atoi(strings.Split(*resp.ContentRange, "/")[1]) 93 | s.buf.ReadFrom(resp.Body) 94 | } 95 | } 96 | 97 | s.readBytes += len(b) 98 | 99 | return s.buf.Read(b) 100 | } 101 | 102 | // Close is here to make S3ReadCloser satisfy ReadCloser interface 103 | func (s *S3ReadCloser) Close() error { 104 | return nil 105 | } 106 | -------------------------------------------------------------------------------- /s3_test.go: -------------------------------------------------------------------------------- 1 | //go:build pro 2 | 3 | package goreplay 4 | 5 | import ( 6 | "fmt" 7 | "math/rand" 8 | "os" 9 | "path/filepath" 10 | "testing" 11 | "time" 12 | 13 | "github.com/aws/aws-sdk-go/aws" 14 | "github.com/aws/aws-sdk-go/service/s3" 15 | ) 16 | 17 | func TestS3Output(t *testing.T) { 18 | bucket := aws.String("test-gor") 19 | rnd := rand.Int63() 20 | path := fmt.Sprintf("s3://test-gor/%d/requests.gz", rnd) 21 | 22 | output := NewS3Output(path, &FileOutputConfig{queueLimit: 2}) 23 | 24 | svc := s3.New(output.session) 25 | 26 | output.Write([]byte("1 1 1\ntest")) 27 | output.Write([]byte("1 1 1\ntest")) 28 | output.buffer.updateName() 29 | output.Write([]byte("1 1 1\ntest")) 30 | output.Write([]byte("1 1 1\ntest")) 31 | output.buffer.updateName() 32 | output.Write([]byte("1 1 1\ntest")) 33 | 34 | time.Sleep(time.Second) 35 | 36 | params := &s3.ListObjectsInput{ 37 | Bucket: bucket, 38 | Prefix: aws.String(fmt.Sprintf("%d", rnd)), 39 | } 40 | 41 | resp, _ := svc.ListObjects(params) 42 | if len(resp.Contents) != 2 { 43 | t.Error("Should create 2 objects", len(resp.Contents)) 44 | } else { 45 | if *resp.Contents[0].Key != fmt.Sprintf("%d/requests_0.gz", rnd) || 46 | *resp.Contents[1].Key != fmt.Sprintf("%d/requests_1.gz", rnd) { 47 | t.Error("Should assign proper names", resp.Contents) 48 | } 49 | } 50 | 51 | for _, c := range resp.Contents { 52 | svc.DeleteObject(&s3.DeleteObjectInput{Bucket: bucket, Key: c.Key}) 53 | } 54 | 55 | matches, _ := filepath.Glob(fmt.Sprintf("/tmp/gor_output_s3_*")) 56 | for _, m := range matches { 57 | os.Remove(m) 58 | } 59 | } 60 | 61 | func TestS3OutputQueueLimit(t *testing.T) { 62 | bucket := aws.String("test-gor") 63 | rnd := rand.Int63() 64 | path := fmt.Sprintf("s3://test-gor/%d/requests.gz", rnd) 65 | 66 | output := NewS3Output(path, &FileOutputConfig{queueLimit: 100}) 67 | output.closeCh = make(chan struct{}, 3) 68 | 69 | svc := s3.New(output.session) 70 | 71 | for i := 0; i < 3; i++ { 72 | for i := 0; i < 100; i++ { 73 | output.Write([]byte("1 1 1\ntest")) 74 | } 75 | output.buffer.updateName() 76 | } 77 | output.buffer.updateName() 78 | output.Write([]byte("1 1 1\ntest")) 79 | 80 | for i := 0; i < 3; i++ { 81 | <-output.closeCh 82 | } 83 | 84 | params := &s3.ListObjectsInput{ 85 | Bucket: bucket, 86 | Prefix: aws.String(fmt.Sprintf("%d", rnd)), 87 | } 88 | 89 | resp, _ := svc.ListObjects(params) 90 | if len(resp.Contents) != 3 { 91 | t.Error("Should create 3 object", len(resp.Contents)) 92 | } else { 93 | if *resp.Contents[0].Key != fmt.Sprintf("%d/requests_0.gz", rnd) || 94 | *resp.Contents[1].Key != fmt.Sprintf("%d/requests_1.gz", rnd) { 95 | t.Error("Should assign proper names", resp.Contents) 96 | } 97 | } 98 | 99 | for _, c := range resp.Contents { 100 | svc.DeleteObject(&s3.DeleteObjectInput{Bucket: bucket, Key: c.Key}) 101 | } 102 | 103 | matches, _ := filepath.Glob(fmt.Sprintf("/tmp/gor_output_s3_*")) 104 | for _, m := range matches { 105 | os.Remove(m) 106 | } 107 | } 108 | 109 | func TestInputFileFromS3(t *testing.T) { 110 | rnd := rand.Int63() 111 | path := fmt.Sprintf("s3://test-gor-eu/%d/requests.gz", rnd) 112 | 113 | output := NewS3Output(path, &FileOutputConfig{queueLimit: 5000}) 114 | output.closeCh = make(chan struct{}, 10) 115 | 116 | for i := 0; i <= 20000; i++ { 117 | output.Write([]byte("1 1 1\ntest")) 118 | 119 | if i%5000 == 0 { 120 | output.buffer.updateName() 121 | } 122 | } 123 | 124 | output.Write([]byte("1 1 1\ntest")) 125 | 126 | for i := 0; i < 2; i++ { 127 | <-output.closeCh 128 | } 129 | 130 | input := NewFileInput(fmt.Sprintf("s3://test-gor-eu/%d", rnd), false, 100, 0, false) 131 | 132 | buf := make([]byte, 1000) 133 | for i := 0; i <= 19999; i++ { 134 | input.Read(buf) 135 | } 136 | 137 | // Cleanup artifacts 138 | bucket := aws.String("test-gor") 139 | svc := s3.New(output.session) 140 | params := &s3.ListObjectsInput{ 141 | Bucket: bucket, 142 | Prefix: aws.String(fmt.Sprintf("%d", rnd)), 143 | } 144 | 145 | resp, _ := svc.ListObjects(params) 146 | 147 | for _, c := range resp.Contents { 148 | svc.DeleteObject(&s3.DeleteObjectInput{Bucket: bucket, Key: c.Key}) 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /settings_test.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | ) 7 | 8 | func TestAppSettings(t *testing.T) { 9 | a := AppSettings{} 10 | _, err := json.Marshal(&a) 11 | if err != nil { 12 | t.Error(err) 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /site/.gitignore: -------------------------------------------------------------------------------- 1 | _site 2 | .sass-cache 3 | .jekyll-metadata 4 | -------------------------------------------------------------------------------- /site/Gemfile: -------------------------------------------------------------------------------- 1 | source "https://rubygems.org" 2 | ruby RUBY_VERSION 3 | 4 | # Hello! This is where you manage which Jekyll version is used to run. 5 | # When you want to use a different version, change it below, save the 6 | # file and run `bundle install`. Run Jekyll with `bundle exec`, like so: 7 | # 8 | # bundle exec jekyll serve 9 | # 10 | # This will help ensure the proper Jekyll version is running. 11 | # Happy Jekylling! 12 | gem "jekyll", "3.3.1" 13 | 14 | # This is the default theme for new Jekyll sites. You may change this to anything you like. 15 | gem "minima", "~> 2.0" 16 | 17 | # If you want to use GitHub Pages, remove the "gem "jekyll"" above and 18 | # uncomment the line below. To upgrade, run `bundle update github-pages`. 19 | # gem "github-pages", group: :jekyll_plugins 20 | 21 | # If you have any plugins, put them here! 22 | group :jekyll_plugins do 23 | gem "jekyll-feed", "~> 0.6" 24 | end 25 | -------------------------------------------------------------------------------- /site/Gemfile.lock: -------------------------------------------------------------------------------- 1 | GEM 2 | remote: https://rubygems.org/ 3 | specs: 4 | addressable (2.5.0) 5 | public_suffix (~> 2.0, >= 2.0.2) 6 | colorator (1.1.0) 7 | ffi (1.9.14) 8 | forwardable-extended (2.6.0) 9 | jekyll (3.3.1) 10 | addressable (~> 2.4) 11 | colorator (~> 1.0) 12 | jekyll-sass-converter (~> 1.0) 13 | jekyll-watch (~> 1.1) 14 | kramdown (~> 1.3) 15 | liquid (~> 3.0) 16 | mercenary (~> 0.3.3) 17 | pathutil (~> 0.9) 18 | rouge (~> 1.7) 19 | safe_yaml (~> 1.0) 20 | jekyll-feed (0.8.0) 21 | jekyll (~> 3.3) 22 | jekyll-sass-converter (1.5.0) 23 | sass (~> 3.4) 24 | jekyll-watch (1.5.0) 25 | listen (~> 3.0, < 3.1) 26 | kramdown (1.13.1) 27 | liquid (3.0.6) 28 | listen (3.0.8) 29 | rb-fsevent (~> 0.9, >= 0.9.4) 30 | rb-inotify (~> 0.9, >= 0.9.7) 31 | mercenary (0.3.6) 32 | minima (2.1.0) 33 | jekyll (~> 3.3) 34 | pathutil (0.14.0) 35 | forwardable-extended (~> 2.6) 36 | public_suffix (2.0.5) 37 | rb-fsevent (0.9.8) 38 | rb-inotify (0.9.7) 39 | ffi (>= 0.5.0) 40 | rouge (1.11.1) 41 | safe_yaml (1.0.4) 42 | sass (3.4.23) 43 | 44 | PLATFORMS 45 | ruby 46 | 47 | DEPENDENCIES 48 | jekyll (= 3.3.1) 49 | jekyll-feed (~> 0.6) 50 | minima (~> 2.0) 51 | 52 | RUBY VERSION 53 | ruby 2.3.1p112 54 | 55 | BUNDLED WITH 56 | 1.13.7 57 | -------------------------------------------------------------------------------- /site/_config.yml: -------------------------------------------------------------------------------- 1 | # Welcome to Jekyll! 2 | # 3 | # This config file is meant for settings that affect your whole blog, values 4 | # which you are expected to set up once and rarely edit after that. If you find 5 | # yourself editing this file very often, consider using Jekyll's data files 6 | # feature for the data you need to update frequently. 7 | # 8 | # For technical reasons, this file is *NOT* reloaded automatically when you use 9 | # 'bundle exec jekyll serve'. If you change this file, please restart the server process. 10 | 11 | # Site settings 12 | # These are used to personalize your new site. If you look in the HTML files, 13 | # you will see them accessed via {{ site.title }}, {{ site.email }}, and so on. 14 | # You can create any custom variable you would like, and they will be accessible 15 | # in the templates via {{ site.myvariable }}. 16 | title: Your awesome title 17 | email: your-email@domain.com 18 | description: > # this means to ignore newlines until "baseurl:" 19 | Write an awesome description for your new site here. You can edit this 20 | line in _config.yml. It will appear in your document head meta (for 21 | Google search results) and in your feed.xml site description. 22 | baseurl: "" # the subpath of your site, e.g. /blog 23 | url: "" # the base hostname & protocol for your site, e.g. http://example.com 24 | twitter_username: jekyllrb 25 | github_username: jekyll 26 | 27 | # Build settings 28 | markdown: kramdown 29 | theme: minima 30 | gems: 31 | - jekyll-feed 32 | exclude: 33 | - Gemfile 34 | - Gemfile.lock 35 | -------------------------------------------------------------------------------- /site/_posts/2017-01-06-welcome-to-jekyll.markdown: -------------------------------------------------------------------------------- 1 | --- 2 | layout: post 3 | title: "Welcome to Jekyll!" 4 | date: 2017-01-06 11:19:34 +0300 5 | categories: jekyll update 6 | --- 7 | You’ll find this post in your `_posts` directory. Go ahead and edit it and re-build the site to see your changes. You can rebuild the site in many different ways, but the most common way is to run `jekyll serve`, which launches a web server and auto-regenerates your site when a file is updated. 8 | 9 | To add new posts, simply add a file in the `_posts` directory that follows the convention `YYYY-MM-DD-name-of-post.ext` and includes the necessary front matter. Take a look at the source for this post to get an idea about how it works. 10 | 11 | Jekyll also offers powerful support for code snippets: 12 | 13 | {% highlight ruby %} 14 | def print_hi(name) 15 | puts "Hi, #{name}" 16 | end 17 | print_hi('Tom') 18 | #=> prints 'Hi, Tom' to STDOUT. 19 | {% endhighlight %} 20 | 21 | Check out the [Jekyll docs][jekyll-docs] for more info on how to get the most out of Jekyll. File all bugs/feature requests at [Jekyll’s GitHub repo][jekyll-gh]. If you have questions, you can ask them on [Jekyll Talk][jekyll-talk]. 22 | 23 | [jekyll-docs]: http://jekyllrb.com/docs/home 24 | [jekyll-gh]: https://github.com/jekyll/jekyll 25 | [jekyll-talk]: https://talk.jekyllrb.com/ 26 | -------------------------------------------------------------------------------- /site/about.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: page 3 | title: About 4 | permalink: /about/ 5 | --- 6 | 7 | This is the base Jekyll theme. You can find out more info about customizing your Jekyll theme, as well as basic Jekyll usage documentation at [jekyllrb.com](http://jekyllrb.com/) 8 | 9 | You can find the source code for the Jekyll new theme at: 10 | {% include icon-github.html username="jekyll" %} / 11 | [minima](https://github.com/jekyll/minima) 12 | 13 | You can find the source code for Jekyll at 14 | {% include icon-github.html username="jekyll" %} / 15 | [jekyll](https://github.com/jekyll/jekyll) 16 | -------------------------------------------------------------------------------- /site/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | # You don't need to edit this file, it's empty on purpose. 3 | # Edit theme's home layout instead if you wanna make some changes 4 | # See: https://jekyllrb.com/docs/themes/#overriding-theme-defaults 5 | layout: home 6 | --- 7 | -------------------------------------------------------------------------------- /snapcraft.yaml: -------------------------------------------------------------------------------- 1 | name: goreplay 2 | version: '1.0' 3 | summary: GoReplay is an open-source tool for capturing and replaying live HTTP traffic 4 | description: | 5 | GoReplay is an open-source tool for capturing and replaying 6 | live HTTP traffic into a test environment in order to continuously 7 | test your system with real data. It can be used to increase confidence 8 | in code deployments, configuration changes and infrastructure changes. 9 | grade: stable 10 | confinement: strict 11 | base: core18 12 | parts: 13 | goreplay: 14 | plugin: go 15 | source: https://github.com/buger/goreplay.git 16 | go-importpath: github.com/buger/goreplay 17 | build-packages: 18 | - build-essential 19 | - libpcap-dev 20 | stage-packages: 21 | - libpcap0.8 22 | 23 | apps: 24 | goreplay: 25 | command: bin/goreplay 26 | daemon: simple 27 | restart-condition: on-abnormal 28 | plugs: 29 | - home 30 | - network 31 | - network-bind 32 | - network-control 33 | - network-observe 34 | - netlink-connector 35 | - netlink-audit 36 | - bluetooth-control 37 | - firewall-control 38 | - x11 39 | 40 | -------------------------------------------------------------------------------- /tcp_client.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "crypto/tls" 5 | "io" 6 | "net" 7 | "runtime/debug" 8 | "syscall" 9 | "time" 10 | ) 11 | 12 | // TCPClientConfig client configuration 13 | type TCPClientConfig struct { 14 | Debug bool 15 | ConnectionTimeout time.Duration 16 | Timeout time.Duration 17 | ResponseBufferSize int 18 | Secure bool 19 | } 20 | 21 | // TCPClient client connection properties 22 | type TCPClient struct { 23 | baseURL string 24 | addr string 25 | conn net.Conn 26 | respBuf []byte 27 | config *TCPClientConfig 28 | redirectsCount int 29 | } 30 | 31 | // NewTCPClient returns new TCPClient 32 | func NewTCPClient(addr string, config *TCPClientConfig) *TCPClient { 33 | if config.Timeout.Nanoseconds() == 0 { 34 | config.Timeout = 5 * time.Second 35 | } 36 | 37 | config.ConnectionTimeout = config.Timeout 38 | 39 | if config.ResponseBufferSize == 0 { 40 | config.ResponseBufferSize = 100 * 1024 // 100kb 41 | } 42 | 43 | client := &TCPClient{config: config, addr: addr} 44 | client.respBuf = make([]byte, config.ResponseBufferSize) 45 | 46 | return client 47 | } 48 | 49 | // Connect creates a tcp connection of the client 50 | func (c *TCPClient) Connect() (err error) { 51 | c.Disconnect() 52 | 53 | c.conn, err = net.DialTimeout("tcp", c.addr, c.config.ConnectionTimeout) 54 | 55 | if c.config.Secure { 56 | tlsConn := tls.Client(c.conn, &tls.Config{InsecureSkipVerify: true}) 57 | 58 | if err = tlsConn.Handshake(); err != nil { 59 | return 60 | } 61 | 62 | c.conn = tlsConn 63 | } 64 | 65 | return 66 | } 67 | 68 | // Disconnect closes the client connection 69 | func (c *TCPClient) Disconnect() { 70 | if c.conn != nil { 71 | c.conn.Close() 72 | c.conn = nil 73 | Debug(1, "[TCPClient] Disconnected: ", c.baseURL) 74 | } 75 | } 76 | 77 | func (c *TCPClient) isAlive() bool { 78 | one := make([]byte, 1) 79 | 80 | // Ready 1 byte from socket without timeout to check if it not closed 81 | c.conn.SetReadDeadline(time.Now().Add(time.Millisecond)) 82 | _, err := c.conn.Read(one) 83 | 84 | if err == nil { 85 | return true 86 | } else if err == io.EOF { 87 | Debug(1, "[TCPClient] connection closed, reconnecting") 88 | return false 89 | } else if err == syscall.EPIPE { 90 | Debug(1, "Detected broken pipe.", err) 91 | return false 92 | } 93 | 94 | return true 95 | } 96 | 97 | // Send sends data over created tcp connection 98 | func (c *TCPClient) Send(data []byte) (response []byte, err error) { 99 | // Don't exit on panic 100 | defer func() { 101 | if r := recover(); r != nil { 102 | Debug(1, "[TCPClient]", r, string(data)) 103 | 104 | if _, ok := r.(error); !ok { 105 | Debug(1, "[TCPClient] Failed to send request: ", string(data)) 106 | Debug(1, "PANIC: pkg:", r, debug.Stack()) 107 | } 108 | } 109 | }() 110 | 111 | if c.conn == nil || !c.isAlive() { 112 | Debug(1, "[TCPClient] Connecting:", c.baseURL) 113 | if err = c.Connect(); err != nil { 114 | Debug(1, "[TCPClient] Connection error:", err) 115 | return 116 | } 117 | } 118 | 119 | timeout := time.Now().Add(c.config.Timeout) 120 | 121 | c.conn.SetWriteDeadline(timeout) 122 | 123 | if c.config.Debug { 124 | Debug(1, "[TCPClient] Sending:", string(data)) 125 | } 126 | 127 | if _, err = c.conn.Write(data); err != nil { 128 | Debug(1, "[TCPClient] Write error:", err, c.baseURL) 129 | return 130 | } 131 | 132 | var readBytes, n int 133 | var currentChunk []byte 134 | timeout = time.Now().Add(c.config.Timeout) 135 | 136 | for { 137 | c.conn.SetReadDeadline(timeout) 138 | 139 | if readBytes < len(c.respBuf) { 140 | n, err = c.conn.Read(c.respBuf[readBytes:]) 141 | readBytes += n 142 | 143 | if err != nil { 144 | if err == io.EOF { 145 | err = nil 146 | } 147 | break 148 | } 149 | } else { 150 | if currentChunk == nil { 151 | currentChunk = make([]byte, readChunkSize) 152 | } 153 | 154 | n, err = c.conn.Read(currentChunk) 155 | 156 | if err == io.EOF { 157 | break 158 | } else if err != nil { 159 | Debug(1, "[TCPClient] Read the whole body error:", err, c.baseURL) 160 | break 161 | } 162 | 163 | readBytes += int(n) 164 | } 165 | 166 | if readBytes >= maxResponseSize { 167 | Debug(1, "[TCPClient] Body is more than the max size", maxResponseSize, 168 | c.baseURL) 169 | break 170 | } 171 | 172 | // For following chunks expect less timeout 173 | timeout = time.Now().Add(c.config.Timeout / 5) 174 | } 175 | 176 | if err != nil { 177 | Debug(1, "[TCPClient] Response read error", err, c.conn, readBytes) 178 | return 179 | } 180 | 181 | if readBytes > len(c.respBuf) { 182 | readBytes = len(c.respBuf) 183 | } 184 | 185 | payload := make([]byte, readBytes) 186 | copy(payload, c.respBuf[:readBytes]) 187 | 188 | if c.config.Debug { 189 | Debug(1, "[TCPClient] Received:", string(payload)) 190 | } 191 | 192 | return payload, err 193 | } 194 | -------------------------------------------------------------------------------- /test_input.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | import ( 4 | "encoding/base64" 5 | "errors" 6 | "math/rand" 7 | "time" 8 | ) 9 | 10 | // ErrorStopped is the error returned when the go routines reading the input is stopped. 11 | var ErrorStopped = errors.New("reading stopped") 12 | 13 | // TestInput used for testing purpose, it allows emitting requests on demand 14 | type TestInput struct { 15 | data chan []byte 16 | skipHeader bool 17 | stop chan bool // Channel used only to indicate goroutine should shutdown 18 | } 19 | 20 | // NewTestInput constructor for TestInput 21 | func NewTestInput() (i *TestInput) { 22 | i = new(TestInput) 23 | i.data = make(chan []byte, 100) 24 | i.stop = make(chan bool) 25 | return 26 | } 27 | 28 | // PluginRead reads message from this plugin 29 | func (i *TestInput) PluginRead() (*Message, error) { 30 | var msg Message 31 | select { 32 | case buf := <-i.data: 33 | msg.Data = buf 34 | if !i.skipHeader { 35 | msg.Meta = payloadHeader(RequestPayload, uuid(), time.Now().UnixNano(), -1) 36 | } else { 37 | msg.Meta, msg.Data = payloadMetaWithBody(msg.Data) 38 | } 39 | 40 | return &msg, nil 41 | case <-i.stop: 42 | return nil, ErrorStopped 43 | } 44 | } 45 | 46 | // Close closes this plugin 47 | func (i *TestInput) Close() error { 48 | close(i.stop) 49 | return nil 50 | } 51 | 52 | // EmitBytes sends data 53 | func (i *TestInput) EmitBytes(data []byte) { 54 | i.data <- data 55 | } 56 | 57 | // EmitGET emits GET request without headers 58 | func (i *TestInput) EmitGET() { 59 | i.data <- []byte("GET / HTTP/1.1\r\n\r\n") 60 | } 61 | 62 | // EmitPOST emits POST request with Content-Length 63 | func (i *TestInput) EmitPOST() { 64 | i.data <- []byte("POST /pub/WWW/ HTTP/1.1\r\nContent-Length: 7\r\nHost: www.w3.org\r\n\r\na=1&b=2") 65 | } 66 | 67 | // EmitChunkedPOST emits POST request with `Transfer-Encoding: chunked` and chunked body 68 | func (i *TestInput) EmitChunkedPOST() { 69 | i.data <- []byte("POST /pub/WWW/ HTTP/1.1\r\nHost: www.w3.org\r\nTransfer-Encoding: chunked\r\n\r\n4\r\nWiki\r\n5\r\npedia\r\ne\r\n in\r\n\r\nchunks.\r\n0\r\n\r\n") 70 | } 71 | 72 | // EmitLargePOST emits POST request with large payload (5mb) 73 | func (i *TestInput) EmitLargePOST() { 74 | size := 5 * 1024 * 1024 // 5 MB 75 | rb := make([]byte, size) 76 | rand.Read(rb) 77 | 78 | rs := base64.URLEncoding.EncodeToString(rb) 79 | 80 | i.data <- []byte("POST / HTTP/1.1\r\nHost: www.w3.org\nContent-Length:5242880\r\n\r\n" + rs) 81 | } 82 | 83 | // EmitSizedPOST emit a POST with a payload set to a supplied size 84 | func (i *TestInput) EmitSizedPOST(payloadSize int) { 85 | rb := make([]byte, payloadSize) 86 | rand.Read(rb) 87 | 88 | rs := base64.URLEncoding.EncodeToString(rb) 89 | 90 | i.data <- []byte("POST / HTTP/1.1\r\nHost: www.w3.org\nContent-Length:5242880\r\n\r\n" + rs) 91 | } 92 | 93 | // EmitOPTIONS emits OPTIONS request, similar to GET 94 | func (i *TestInput) EmitOPTIONS() { 95 | i.data <- []byte("OPTIONS / HTTP/1.1\r\nHost: www.w3.org\r\n\r\n") 96 | } 97 | 98 | func (i *TestInput) String() string { 99 | return "Test Input" 100 | } 101 | -------------------------------------------------------------------------------- /test_output.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | type writeCallback func(*Message) 4 | 5 | // TestOutput used in testing to intercept any output into callback 6 | type TestOutput struct { 7 | cb writeCallback 8 | } 9 | 10 | // NewTestOutput constructor for TestOutput, accepts callback which get called on each incoming Write 11 | func NewTestOutput(cb writeCallback) PluginWriter { 12 | i := new(TestOutput) 13 | i.cb = cb 14 | 15 | return i 16 | } 17 | 18 | // PluginWrite write message to this plugin 19 | func (i *TestOutput) PluginWrite(msg *Message) (int, error) { 20 | i.cb(msg) 21 | 22 | return len(msg.Data) + len(msg.Meta), nil 23 | } 24 | 25 | func (i *TestOutput) String() string { 26 | return "Test Output" 27 | } 28 | -------------------------------------------------------------------------------- /version.go: -------------------------------------------------------------------------------- 1 | package goreplay 2 | 3 | // VERSION the current version of goreplay 4 | var VERSION = "2.0.0" 5 | --------------------------------------------------------------------------------