├── .cargo └── config.toml ├── .config └── nextest.toml ├── .dockerignore ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ ├── bench_run.yaml │ ├── build.yaml │ ├── build_and_test.yaml │ ├── docker.yaml │ ├── license_check.yaml │ ├── lint.yaml │ ├── publish-to-pages.yaml │ ├── release.yaml │ └── windsock_benches.yaml ├── .gitignore ├── .pre-commit-config.yaml ├── CODE_OF_CONDUCT.md ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── NOTICE ├── README.md ├── changelog.md ├── cliff.toml ├── custom-transforms-example ├── Cargo.toml ├── config │ ├── config.yaml │ ├── docker-compose.yaml │ ├── redis.conf │ └── topology.yaml ├── src │ ├── main.rs │ └── valkey_get_rewrite.rs └── tests │ └── test.rs ├── deny.toml ├── docs ├── book.toml ├── mdbook.sh ├── mermaid-init.js ├── mermaid.min.js ├── src │ ├── SUMMARY.md │ ├── dev-docs │ │ ├── contributing.md │ │ ├── debugging.md │ │ ├── end-to-end-overview.md │ │ ├── end-to-end-overview.png │ │ ├── setting-up-linux.md │ │ └── setting-up-macos.md │ ├── examples │ │ ├── cassandra-cluster-shotover-sidecar.md │ │ ├── valkey-clustering-aware.md │ │ └── valkey-clustering-unaware.md │ ├── index.md │ ├── logo.png │ ├── logo.svg │ ├── other │ │ └── benchmark.md │ ├── sources.md │ ├── transforms.md │ └── user-guide │ │ ├── concepts.md │ │ ├── configuration.md │ │ ├── deployment.md │ │ ├── getting-started.md │ │ ├── introduction.md │ │ ├── observability.md │ │ └── writing-custom-transforms.md └── theme │ └── favicon.svg ├── ec2-cargo ├── Cargo.toml ├── readme.md └── src │ └── main.rs ├── rust-toolchain.toml ├── rustfmt.toml ├── shotover-proxy ├── Cargo.toml ├── benches │ └── windsock │ │ ├── cassandra │ │ ├── bench.rs │ │ └── mod.rs │ │ ├── cloud │ │ ├── aws.rs │ │ └── mod.rs │ │ ├── common.rs │ │ ├── kafka │ │ ├── bench.rs │ │ └── mod.rs │ │ ├── main.rs │ │ ├── profilers.rs │ │ ├── profilers │ │ ├── samply.rs │ │ ├── sar.rs │ │ └── shotover_metrics.rs │ │ ├── readme.md │ │ ├── shotover.rs │ │ └── valkey │ │ ├── bench.rs │ │ └── mod.rs ├── build │ ├── build_release.sh │ ├── cassandra-cpp-driver.control │ ├── install_ubuntu_deps.sh │ ├── install_ubuntu_packages.sh │ └── is_releasable.sh ├── config │ ├── config.yaml │ └── topology.yaml ├── src │ └── main.rs └── tests │ ├── cassandra_int_tests │ ├── batch_statements.rs │ ├── cache │ │ ├── assert.rs │ │ └── mod.rs │ ├── cluster │ │ ├── mod.rs │ │ ├── multi_rack.rs │ │ ├── single_rack_v3.rs │ │ └── single_rack_v4.rs │ ├── collections │ │ ├── list.rs │ │ ├── map.rs │ │ ├── mod.rs │ │ ├── set.rs │ │ └── vector.rs │ ├── functions.rs │ ├── keyspace.rs │ ├── mod.rs │ ├── native_types.rs │ ├── prepared_statements_all.rs │ ├── prepared_statements_simple.rs │ ├── protect.rs │ ├── routing.rs │ ├── table.rs │ ├── timestamp.rs │ └── udt.rs │ ├── kafka_int_tests │ ├── mod.rs │ └── test_cases.rs │ ├── lib.rs │ ├── opensearch_int_tests │ └── mod.rs │ ├── runner │ ├── mod.rs │ ├── observability_int_tests.rs │ └── runner_int_tests.rs │ ├── test-configs │ ├── cassandra │ │ ├── cassandra-5 │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ │ ├── cluster-multi-rack-2-per-rack │ │ │ ├── docker-compose.yaml │ │ │ ├── topology_rack1.yaml │ │ │ ├── topology_rack2.yaml │ │ │ └── topology_rack3.yaml │ │ ├── cluster-multi-rack │ │ │ ├── docker-compose.yaml │ │ │ ├── topology_rack1.yaml │ │ │ ├── topology_rack2.yaml │ │ │ └── topology_rack3.yaml │ │ ├── cluster-tls │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ │ ├── cluster-v3 │ │ │ ├── docker-compose.yaml │ │ │ ├── topology-dummy-peers.yaml │ │ │ └── topology.yaml │ │ ├── cluster-v4 │ │ │ ├── docker-compose.yaml │ │ │ ├── topology-dummy-peers.yaml │ │ │ ├── topology-encode.yaml │ │ │ └── topology.yaml │ │ ├── cluster-v5 │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ │ ├── passthrough-parse-request │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ │ ├── passthrough-parse-response │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ │ ├── passthrough-websocket-tls │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ │ ├── passthrough-websocket │ │ │ ├── docker-compose.yaml │ │ │ ├── topology-encode.yaml │ │ │ └── topology.yaml │ │ ├── passthrough │ │ │ ├── docker-compose.yaml │ │ │ ├── topology-encode.yaml │ │ │ └── topology.yaml │ │ ├── peers-rewrite │ │ │ ├── docker-compose-3.11-cassandra.yaml │ │ │ ├── docker-compose-4.0-cassandra.yaml │ │ │ └── topology.yaml │ │ ├── protect-aws │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ │ ├── protect-local │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ │ ├── request-throttling.yaml │ │ ├── request-throttling │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ │ ├── tls │ │ │ ├── docker-compose.yaml │ │ │ ├── topology-with-key.yaml │ │ │ └── topology.yaml │ │ └── valkey-cache │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ ├── invalid_non_terminating_last.yaml │ ├── invalid_protocol_mismatch.yaml │ ├── invalid_subchains.yaml │ ├── invalid_terminating_not_last.yaml │ ├── kafka │ │ ├── bench │ │ │ └── docker-compose.yaml │ │ ├── cluster-1-rack │ │ │ ├── docker-compose-short-idle-timeout.yaml │ │ │ ├── docker-compose.yaml │ │ │ ├── topology-single.yaml │ │ │ ├── topology1.yaml │ │ │ ├── topology2.yaml │ │ │ └── topology3.yaml │ │ ├── cluster-2-racks │ │ │ ├── docker-compose-rebalance-protocol.yaml │ │ │ ├── docker-compose.yaml │ │ │ ├── topology-rack1.yaml │ │ │ └── topology-rack2.yaml │ │ ├── cluster-3-racks │ │ │ ├── docker-compose.yaml │ │ │ ├── topology-rack1.yaml │ │ │ ├── topology-rack2.yaml │ │ │ └── topology-rack3.yaml │ │ ├── cluster-mtls │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ │ ├── cluster-sasl-plain │ │ │ ├── docker-compose.yaml │ │ │ ├── topology-single.yaml │ │ │ ├── topology1.yaml │ │ │ ├── topology2.yaml │ │ │ └── topology3.yaml │ │ ├── cluster-sasl-scram-over-mtls │ │ │ ├── docker-compose.yaml │ │ │ ├── topology-single.yaml │ │ │ ├── topology1.yaml │ │ │ ├── topology2.yaml │ │ │ └── topology3.yaml │ │ ├── cluster-sasl-scram │ │ │ ├── docker-compose.yaml │ │ │ └── topology-single.yaml │ │ ├── cluster-tls │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ │ ├── passthrough-mtls │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ │ ├── passthrough-sasl-plain │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ │ ├── passthrough-sasl-scram │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ │ ├── passthrough-tls │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ │ ├── passthrough │ │ │ ├── docker-compose.yaml │ │ │ ├── topology-encode.yaml │ │ │ └── topology.yaml │ │ └── single-sasl-scram-plaintext-source-tls-sink │ │ │ ├── docker-compose.yaml │ │ │ └── topology.yaml │ ├── log-to-file │ │ ├── docker-compose.yaml │ │ └── topology.yaml │ ├── null-cassandra │ │ └── topology.yaml │ ├── null-valkey │ │ └── topology.yaml │ ├── opensearch-passthrough │ │ ├── docker-compose.yaml │ │ └── topology.yaml │ ├── query_type_filter │ │ └── simple.yaml │ ├── shotover-config │ │ ├── config1.yaml │ │ ├── config2.yaml │ │ ├── config3.yaml │ │ └── config_metrics_disabled.yaml │ ├── tee │ │ ├── fail.yaml │ │ ├── fail_with_mismatch.yaml │ │ ├── ignore.yaml │ │ ├── ignore_with_mismatch.yaml │ │ ├── log.yaml │ │ ├── log_with_mismatch.yaml │ │ ├── subchain.yaml │ │ ├── subchain_with_mismatch.yaml │ │ └── switch_chain.yaml │ └── valkey │ │ ├── cluster-auth │ │ ├── docker-compose.yaml │ │ └── topology.yaml │ │ ├── cluster-dr │ │ ├── docker-compose.yaml │ │ └── topology.yaml │ │ ├── cluster-handling │ │ ├── docker-compose.yaml │ │ └── topology.yaml │ │ ├── cluster-hiding │ │ ├── docker-compose.yaml │ │ ├── topology-encode.yaml │ │ └── topology.yaml │ │ ├── cluster-ports-rewrite │ │ ├── docker-compose.yaml │ │ └── topology.yaml │ │ ├── cluster-tls │ │ ├── docker-compose.yaml │ │ ├── topology-encode.yaml │ │ ├── topology-no-source-encryption.yaml │ │ └── topology.yaml │ │ ├── passthrough │ │ ├── docker-compose.yaml │ │ ├── redis.conf │ │ ├── topology-encode.yaml │ │ └── topology.yaml │ │ ├── tls-no-client-auth │ │ ├── docker-compose.yaml │ │ └── topology.yaml │ │ ├── tls-no-verify-hostname │ │ ├── docker-compose.yaml │ │ └── topology.yaml │ │ └── tls │ │ ├── docker-compose.yaml │ │ ├── redis-cli.sh │ │ ├── topology-encode.yaml │ │ └── topology.yaml │ ├── transforms │ ├── docker-compose-moto.yaml │ ├── log_to_file.rs │ ├── mod.rs │ ├── query_type_filter.rs │ └── tee.rs │ └── valkey_int_tests │ ├── assert.rs │ ├── basic_driver_tests.rs │ └── mod.rs ├── shotover ├── Cargo.toml ├── benches │ └── benches │ │ ├── chain.rs │ │ ├── codec │ │ ├── cassandra.rs │ │ ├── kafka.rs │ │ ├── kafka_requests │ │ │ ├── fetch.bin │ │ │ ├── list_offsets.bin │ │ │ ├── metadata.bin │ │ │ └── produce.bin │ │ └── mod.rs │ │ └── main.rs └── src │ ├── codec │ ├── cassandra.rs │ ├── kafka.rs │ ├── mod.rs │ ├── opensearch.rs │ └── valkey.rs │ ├── config │ ├── chain.rs │ ├── mod.rs │ └── topology.rs │ ├── connection.rs │ ├── connection_span.rs │ ├── frame │ ├── cassandra.rs │ ├── kafka.rs │ ├── mod.rs │ ├── opensearch.rs │ ├── valkey.rs │ ├── value.rs │ └── value │ │ ├── cassandra.rs │ │ └── valkey.rs │ ├── http.rs │ ├── lib.rs │ ├── message │ └── mod.rs │ ├── observability │ └── mod.rs │ ├── runner.rs │ ├── server.rs │ ├── sources │ ├── cassandra.rs │ ├── kafka.rs │ ├── mod.rs │ ├── opensearch.rs │ └── valkey.rs │ ├── tcp.rs │ ├── tls.rs │ ├── tracing_panic_handler.rs │ └── transforms │ ├── cassandra │ ├── mod.rs │ ├── peers_rewrite.rs │ ├── sink_cluster │ │ ├── connection.rs │ │ ├── mod.rs │ │ ├── murmur.rs │ │ ├── node.rs │ │ ├── node_pool.rs │ │ ├── rewrite.rs │ │ ├── routing_key.rs │ │ ├── test_cluster_data.json │ │ ├── test_router.rs │ │ ├── token_ring.rs │ │ └── topology.rs │ └── sink_single.rs │ ├── chain.rs │ ├── coalesce.rs │ ├── debug │ ├── force_parse.rs │ ├── log_to_file.rs │ ├── mod.rs │ ├── printer.rs │ └── returner.rs │ ├── filter.rs │ ├── kafka │ ├── mod.rs │ ├── sink_cluster │ │ ├── api_versions.rs │ │ ├── connections.rs │ │ ├── kafka_node.rs │ │ ├── mod.rs │ │ ├── scram_over_mtls.rs │ │ ├── scram_over_mtls │ │ │ ├── connection.rs │ │ │ ├── create_token.rs │ │ │ └── recreate_token_queue.rs │ │ ├── shotover_node.rs │ │ └── split.rs │ └── sink_single.rs │ ├── load_balance.rs │ ├── loopback.rs │ ├── mod.rs │ ├── null.rs │ ├── opensearch │ └── mod.rs │ ├── parallel_map.rs │ ├── protect │ ├── aws_kms.rs │ ├── crypto.rs │ ├── key_management.rs │ ├── local_kek.rs │ ├── mod.rs │ └── pkcs_11.rs │ ├── query_counter.rs │ ├── tee.rs │ ├── throttling.rs │ ├── util │ ├── cluster_connection_pool.rs │ └── mod.rs │ └── valkey │ ├── cache.rs │ ├── cluster_ports_rewrite.rs │ ├── mod.rs │ ├── sink_cluster.rs │ ├── sink_single.rs │ └── timestamp_tagging.rs ├── test-helpers ├── Cargo.toml └── src │ ├── cert.rs │ ├── connection │ ├── cassandra │ │ ├── connection.rs │ │ ├── connection │ │ │ ├── cdrs.rs │ │ │ ├── cpp.rs │ │ │ ├── java.rs │ │ │ └── scylla.rs │ │ ├── cql_ws.rs │ │ ├── go.rs │ │ ├── go │ │ │ ├── basic.go │ │ │ ├── go.mod │ │ │ └── go.sum │ │ ├── mod.rs │ │ └── result_value.rs │ ├── java.rs │ ├── kafka │ │ ├── cpp.rs │ │ ├── java.rs │ │ ├── mod.rs │ │ ├── node.rs │ │ ├── node │ │ │ ├── index.js │ │ │ ├── package-lock.json │ │ │ └── package.json │ │ ├── python.rs │ │ └── python │ │ │ ├── .python-version │ │ │ ├── auth_fail.py │ │ │ ├── main.py │ │ │ ├── pyproject.toml │ │ │ └── uv.lock │ ├── mod.rs │ └── valkey_connection.rs │ ├── docker_compose.rs │ ├── lib.rs │ ├── metrics.rs │ ├── mock_cassandra.rs │ ├── shotover_process.rs │ └── test_tracing.rs ├── website ├── Cargo.toml ├── assets │ ├── arrow_right.png │ ├── favicon.ico │ ├── logo.png │ ├── style.css │ └── title_image.png ├── readme.md ├── src │ ├── cli.rs │ ├── docs.rs │ ├── main.rs │ └── version_tags.rs └── templates │ ├── base.html │ ├── docs.html │ └── landing.html └── windsock-cloud-docker ├── Cargo.toml └── src ├── container.rs └── main.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [target.x86_64-apple-darwin] 2 | rustflags = [ 3 | "-C", "link-arg=-undefined", 4 | "-C", "link-arg=dynamic_lookup", 5 | ] 6 | 7 | [target.aarch64-unknown-linux-gnu] 8 | linker = "aarch64-linux-gnu-gcc" 9 | 10 | [alias] 11 | # Can run every benchmark 12 | windsock = "test --release --bench windsock --features kafka,alpha-transforms,kafka-cpp-driver-tests,cassandra,valkey --" 13 | windsock-debug = "test --bench windsock --features kafka,alpha-transforms,kafka-cpp-driver-tests,cassandra,valkey --" 14 | 15 | # Can only run benchmarks specific to the protocol but compiles a lot faster 16 | windsock-valkey = "test --release --bench windsock --no-default-features --features valkey,alpha-transforms --" 17 | windsock-kafka = "test --release --bench windsock --no-default-features --features kafka,alpha-transforms,kafka-cpp-driver-tests --" 18 | windsock-cassandra = "test --release --bench windsock --no-default-features --features cassandra,alpha-transforms --" 19 | 20 | # Compile benches in docker to ensure compiled libc version is compatible with the EC2 instances libc 21 | windsock-cloud-docker = "run --package windsock-cloud-docker -- valkey,cassandra,kafka" 22 | windsock-cloud-docker-valkey = "run --package windsock-cloud-docker -- valkey" 23 | windsock-cloud-docker-kafka = "run --package windsock-cloud-docker -- kafka" 24 | windsock-cloud-docker-cassandra = "run --package windsock-cloud-docker -- cassandra" 25 | -------------------------------------------------------------------------------- /.config/nextest.toml: -------------------------------------------------------------------------------- 1 | [profile.default] 2 | fail-fast = false 3 | slow-timeout = { period = '5m', terminate-after = 2 } 4 | archive.include = [ 5 | { path = "debug/jassets", relative-to = "target" }, 6 | { path = "release/jassets", relative-to = "target" }, 7 | ] 8 | 9 | # Overwrites profile.default when the filter matches 10 | [[profile.default.overrides]] 11 | filter = 'package(shotover-proxy) | package(custom-transforms-example)' 12 | test-group = 'integration-tests' 13 | 14 | [test-groups] 15 | integration-tests = { max-threads = 1 } 16 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | /target 2 | /test_data 3 | /docs 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Describe the bug 11 | A clear and concise description of what the bug is. 12 | 13 | ## To Reproduce 14 | Steps to reproduce the behavior: 15 | 1. 16 | 2. 17 | ... 18 | 19 | ## Configuration 20 | 21 | ### Topology 22 | ``` 23 | // Your topology.yaml file 24 | ``` 25 | 26 | Please include any configuration for setting up services such as `docker-compose` that the `topology.yaml` depends on. 27 | 28 | ## Expected behaviour 29 | A clear and concise description of what you expected to happen. 30 | 31 | ## Systems and Version: 32 | - OS 33 | - Shotover Version 34 | - Version of 3rd party software e.g. Cassandra, Valkey 35 | 36 | ## Additional context 37 | Add any other context about the problem here. 38 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Is your feature request related to a problem? Please describe. 11 | A clear and concise description of what the problem is. 12 | 13 | ## Describe the solution you'd like 14 | A clear and concise description of what you want to happen. 15 | 16 | ## Describe alternatives you've considered 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | ## Additional context 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/bench_run.yaml: -------------------------------------------------------------------------------- 1 | name: MicroBenchmarks 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | # `workflow_dispatch` allows CodSpeed to trigger backtest 9 | # performance analysis in order to generate initial data. 10 | workflow_dispatch: 11 | 12 | # Cancel already running jobs 13 | concurrency: 14 | group: benchmark_run_${{ github.head_ref }} 15 | cancel-in-progress: true 16 | 17 | jobs: 18 | benchmarks: 19 | runs-on: ubuntu-22.04 20 | steps: 21 | - uses: actions/checkout@v4 22 | - uses: Swatinem/rust-cache@v2 23 | with: 24 | # this line means that only the main branch writes to the cache 25 | # benefits: 26 | # * prevents main branch caches from being evicted in favor of a PR cache 27 | # * saves about 1min per workflow by skipping the actual cache write 28 | # downsides: 29 | # * PRs that update rust version or changes deps will be slow to iterate on due to changes not being cached. 30 | save-if: ${{ github.ref == 'refs/heads/main' }} 31 | - name: Install codspeed 32 | run: cargo install --git https://github.com/CodSpeedHQ/codspeed-rust --rev 250ad317de0997bf50f2780653160774c0b64d59 cargo-codspeed 33 | - name: Build the benchmark target(s) 34 | # specify `--jobs 2` to avoid OoM on 4 core (and therefore 4 jobs) linux runners 35 | run: cargo codspeed build --jobs 2 --features alpha-transforms 36 | - name: Run the benchmarks 37 | uses: CodSpeedHQ/action@v2 38 | with: 39 | run: cargo codspeed run -------------------------------------------------------------------------------- /.github/workflows/build.yaml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | # Cancel already running jobs 10 | concurrency: 11 | group: build_${{ github.head_ref }} 12 | cancel-in-progress: true 13 | 14 | env: 15 | CARGO_TERM_COLOR: always 16 | RUST_BACKTRACE: 1 17 | 18 | jobs: 19 | build: 20 | strategy: 21 | matrix: 22 | include: 23 | - name: Ubuntu 22.04 - Release - x86_64 24 | runner: ubuntu-22.04 25 | cargo_flags: --release 26 | - name: Ubuntu 22.04 - Release - aarch64 27 | runner: ubuntu-22.04 28 | cargo_flags: --target aarch64-unknown-linux-gnu --release 29 | name: ${{ matrix.name }} 30 | runs-on: ${{ matrix.runner }} 31 | steps: 32 | - name: cache custom ubuntu packages 33 | uses: actions/cache@v4 34 | with: 35 | path: shotover-proxy/build/packages 36 | key: ubuntu-22.04-packages 37 | - uses: actions/checkout@v4 38 | # We purposefully dont cache here as build_and_test will always be the bottleneck 39 | # so we should leave the cache alone so build_and_test can make more use of it. 40 | - name: Install ubuntu packages 41 | run: shotover-proxy/build/install_ubuntu_packages.sh 42 | # This can be removed when we swap to aarch64 runners instead of cross compiling 43 | - name: Install aarch64 gcc 44 | run: sudo apt-get install -y gcc-aarch64-linux-gnu 45 | - name: Install cargo-hack 46 | uses: taiki-e/install-action@v2 47 | with: 48 | tool: cargo-hack@0.6.16 49 | - name: Ensure that dev tools compiles and has no warnings with no features enabled 50 | run: cargo clippy --locked ${{ matrix.cargo_flags }} --all-targets -- -D warnings 51 | - name: Ensure that shotover-proxy compiles and has no warnings under every possible combination of features 52 | # some things to explicitly point out: 53 | # * clippy also reports rustc warnings and errors 54 | # * clippy --all-targets is not run so we only build the shotover_proxy executable without the tests/benches 55 | run: cargo hack --feature-powerset --at-least-one-of valkey,cassandra,kafka,opensearch clippy --locked ${{ matrix.cargo_flags }} --package shotover-proxy -- -D warnings 56 | -------------------------------------------------------------------------------- /.github/workflows/docker.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "docker" 3 | 4 | on: 5 | push: 6 | branches: [ main ] 7 | pull_request: 8 | branches: [ main ] 9 | 10 | # Cancel already running jobs 11 | concurrency: 12 | group: docker_${{ github.head_ref }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | publish-image: 17 | name: "Build docker image and run smoke test" 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/checkout@v4 21 | - name: Push image 22 | run: | 23 | # build image 24 | docker build -t shotover:test . 25 | 26 | # run image for 5 seconds and then send it SIGTERM, if it returns a nonzero exit code fail CI 27 | timeout --preserve-status 5 docker run --mount type=bind,source="$(pwd)"/shotover-proxy/config,target=/config shotover:test 28 | -------------------------------------------------------------------------------- /.github/workflows/license_check.yaml: -------------------------------------------------------------------------------- 1 | name: License Check 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | # Cancel already running jobs 10 | concurrency: 11 | group: license_check_${{ github.head_ref }} 12 | cancel-in-progress: true 13 | 14 | env: 15 | CARGO_TERM_COLOR: always 16 | RUST_BACKTRACE: 1 17 | 18 | jobs: 19 | license_check: 20 | runs-on: ubuntu-22.04 21 | name: License Check 22 | steps: 23 | - uses: actions/checkout@v4 24 | - name: Install cargo deny 25 | uses: taiki-e/install-action@v2 26 | with: 27 | tool: cargo-deny@0.18.2 28 | - run: cargo deny check licenses 29 | -------------------------------------------------------------------------------- /.github/workflows/lint.yaml: -------------------------------------------------------------------------------- 1 | name: Formatting and lints 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | # Cancel already running jobs 10 | concurrency: 11 | group: lints_${{ github.head_ref }} 12 | cancel-in-progress: true 13 | 14 | env: 15 | CARGO_TERM_COLOR: always 16 | RUST_BACKTRACE: 1 17 | 18 | jobs: 19 | job: 20 | name: Formatting and lints 21 | runs-on: ubuntu-24.04 22 | steps: 23 | - uses: actions/checkout@v4 24 | - uses: Swatinem/rust-cache@v2 25 | with: 26 | # this line means that only the main branch writes to the cache 27 | # benefits: 28 | # * prevents main branch caches from being evicted in favor of a PR cache 29 | # * saves about 1min per workflow by skipping the actual cache write 30 | # downsides: 31 | # * PRs that update rust version or changes deps will be slow to iterate on due to changes not being cached. 32 | save-if: ${{ github.ref == 'refs/heads/main' }} 33 | - name: cache custom ubuntu packages 34 | uses: actions/cache@v4 35 | with: 36 | path: shotover-proxy/build/packages 37 | key: ubuntu-24.04-packages 38 | - name: Install ubuntu packages 39 | run: shotover-proxy/build/install_ubuntu_packages.sh 40 | - name: Install cargo-hack 41 | uses: taiki-e/install-action@v2 42 | with: 43 | tool: cargo-hack@0.6.16 44 | - name: Ensure `cargo fmt --all` was run 45 | run: cargo fmt --all -- --check 46 | - name: Ensure that all crates compile and have no warnings under every possible combination of features 47 | # some things to explicitly point out: 48 | # * clippy also reports rustc warnings and errors 49 | # * clippy --all-targets causes clippy to run against tests and examples which it doesnt do by default. 50 | run: cargo hack --feature-powerset --at-least-one-of valkey,cassandra,kafka,opensearch clippy --all-targets --locked -- -D warnings 51 | - name: Report disk usage 52 | run: | 53 | df -h 54 | 55 | echo -e "\ntarget dir usage:" 56 | du -h $PWD/target 57 | 58 | echo -e "\n.cargo dir usage:" 59 | du -h ~/.cargo 60 | - name: Ensure that tests did not create or modify any files that arent .gitignore'd 61 | run: | 62 | if [ -n "$(git status --porcelain)" ]; then 63 | git status 64 | exit 1 65 | fi 66 | -------------------------------------------------------------------------------- /.github/workflows/publish-to-pages.yaml: -------------------------------------------------------------------------------- 1 | name: publish to github pages 2 | 3 | on: 4 | push: 5 | # Trigger when there is a new commit on main branch 6 | branches: [ main ] 7 | # also trigger when a new release tag is added to the repo 8 | tags: 9 | - "v*" 10 | 11 | # Cancel already running jobs 12 | concurrency: 13 | group: publish_to_pages_${{ github.head_ref }} 14 | cancel-in-progress: true 15 | 16 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages 17 | permissions: 18 | contents: read 19 | pages: write 20 | id-token: write 21 | 22 | jobs: 23 | build: 24 | strategy: 25 | matrix: 26 | include: 27 | - name: Publish website to Github Pages 28 | runner: ubuntu-latest 29 | environment: 30 | name: github-pages 31 | url: ${{ steps.setup_pages.outputs.base_url }} 32 | name: ${{ matrix.name }} 33 | runs-on: ${{ matrix.runner }} 34 | steps: 35 | - uses: actions/checkout@v4 36 | - name: Build website 37 | run: cargo run -p website 38 | - name: Setup Pages 39 | uses: actions/configure-pages@v4 40 | - name: Upload pages 41 | uses: actions/upload-pages-artifact@v3 42 | with: 43 | path: 'website/root' 44 | - name: Deploy pages 45 | uses: actions/deploy-pages@v4 46 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "tagged-release" 3 | 4 | on: 5 | push: 6 | tags: 7 | - "v*" 8 | 9 | jobs: 10 | prepublish-check: 11 | name: "Check that the project is releaseable" 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v4 15 | - name: Install ubuntu packages 16 | run: shotover-proxy/build/install_ubuntu_packages.sh 17 | - name: Run checks 18 | run: shotover-proxy/build/is_releasable.sh 19 | 20 | publish-image: 21 | name: "Publish Docker Image to Docker Hub" 22 | needs: prepublish-check 23 | runs-on: ubuntu-latest 24 | steps: 25 | - name: Login to Docker Hub 26 | uses: docker/login-action@v1 27 | with: 28 | username: ${{ secrets.DOCKERHUB_USERNAME }} 29 | password: ${{ secrets.DOCKERHUB_TOKEN }} 30 | - uses: actions/checkout@v4 31 | - name: Push image 32 | run: | 33 | docker build -t shotover/shotover-proxy:latest -t shotover/shotover-proxy:${GITHUB_REF/refs\/tags\//} . 34 | docker push shotover/shotover-proxy:latest 35 | docker push shotover/shotover-proxy:${GITHUB_REF/refs\/tags\//} 36 | 37 | publish-binary: 38 | name: "Publish Binary to GitHub" 39 | needs: prepublish-check 40 | runs-on: ubuntu-22.04 41 | steps: 42 | - uses: actions/checkout@v4 43 | - name: Install ubuntu packages 44 | run: shotover-proxy/build/install_ubuntu_packages.sh 45 | - name: Build & test 46 | run: shotover-proxy/build/build_release.sh 47 | - name: Publish 48 | uses: marvinpinto/action-automatic-releases@latest 49 | with: 50 | repo_token: ${{ secrets.GITHUB_TOKEN }} 51 | prerelease: false 52 | files: | 53 | *.tar.gz 54 | 55 | publish-crates-io: 56 | name: "Publish to crates.io" 57 | needs: prepublish-check 58 | runs-on: ubuntu-22.04 59 | steps: 60 | - uses: actions/checkout@v4 61 | - name: Install ubuntu packages 62 | run: shotover-proxy/build/install_ubuntu_packages.sh 63 | - name: Publish 64 | run: | 65 | cd shotover 66 | cargo publish 67 | env: 68 | CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} 69 | -------------------------------------------------------------------------------- /.github/workflows/windsock_benches.yaml: -------------------------------------------------------------------------------- 1 | name: Windsock Benches 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | # Cancel already running jobs 10 | concurrency: 11 | group: windsock_benches_${{ github.head_ref }} 12 | cancel-in-progress: true 13 | 14 | env: 15 | CARGO_TERM_COLOR: always 16 | RUST_BACKTRACE: 1 17 | 18 | jobs: 19 | windsock_benches: 20 | name: "Windsock benches" 21 | runs-on: ubuntu-22.04 22 | steps: 23 | - uses: actions/checkout@v4 24 | - uses: Swatinem/rust-cache@v2 25 | with: 26 | # this line means that only the main branch writes to the cache 27 | # benefits: 28 | # * prevents main branch caches from being evicted in favor of a PR cache 29 | # * saves about 1min per workflow by skipping the actual cache write 30 | # downsides: 31 | # * PRs that update rust version or changes deps will be slow to iterate on due to changes not being cached. 32 | save-if: ${{ github.ref == 'refs/heads/main' }} 33 | - name: Ensure that custom benches run 34 | run: | 35 | # This isnt needed locally because we run profilers via sudo. 36 | # But for some reason on CI even with sudo we didnt have access to perf events. 37 | echo '1' | sudo tee /proc/sys/kernel/perf_event_paranoid 38 | 39 | # run some extra cases that arent handled by nextest 40 | cargo windsock local-run --bench-length-seconds 5 --operations-per-second 100 --profilers flamegraph db=cassandra,compression=none,connection_count=1,driver=scylla,operation=read_i64,protocol=v4,shotover=standard,topology=single 41 | cargo windsock local-run --bench-length-seconds 5 --operations-per-second 100 --profilers samply db=cassandra,compression=none,connection_count=1,driver=scylla,operation=read_i64,protocol=v4,shotover=standard,topology=single 42 | cargo windsock local-run --bench-length-seconds 5 --operations-per-second 100 --profilers sys_monitor db=kafka,shotover=standard,size=12B,topology=single 43 | cargo windsock local-run --bench-length-seconds 5 --operations-per-second 100 --profilers shotover_metrics db=valkey,encryption=none,operation=get,shotover=standard,topology=single 44 | - name: Ensure that tests did not create or modify any files that arent .gitignore'd 45 | run: | 46 | if [ -n "$(git status --porcelain)" ]; then 47 | git status 48 | exit 1 49 | fi 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | .idea/ 3 | *.gz 4 | /.vscode 5 | /shotover-proxy/tests/test-configs/valkey/tls/certs 6 | /shotover-proxy/tests/test-configs/cassandra/tls/certs 7 | /shotover-proxy/tests/test-configs/kafka/tls/certs 8 | .workspace.code-workspace 9 | **/.DS_Store 10 | /.project 11 | /docs/book 12 | /docs/mdbook_bin 13 | /website/root 14 | /website/shotover_repo_for_docs 15 | /shotover-proxy/build/packages 16 | /some_local_file 17 | /test-helpers/src/connection/kafka/node/node_modules -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/gitleaks/gitleaks 3 | rev: v8.16.1 4 | hooks: 5 | - id: gitleaks 6 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:bookworm as builder 2 | 3 | WORKDIR /shotover-proxy 4 | 5 | COPY ./ ./ 6 | 7 | RUN rustup toolchain install && cargo build -p shotover-proxy --release 8 | 9 | FROM debian:bookworm-slim 10 | 11 | COPY --from=builder /shotover-proxy/target/release/shotover-proxy /shotover-proxy 12 | 13 | ENTRYPOINT ["./shotover-proxy"] 14 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Shotover Proxy 2 | Copyright 2021-2021 Instaclustr Pty Ltd 3 | This product includes software developed at Instaclustr (http://www.instaclustr.com/). 4 | -------------------------------------------------------------------------------- /custom-transforms-example/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "custom-transforms-example" 3 | version = "0.0.1" 4 | authors = ["Ben "] 5 | edition = "2021" 6 | license = "Apache-2.0" 7 | 8 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 9 | 10 | [dependencies] 11 | shotover = { path = "../shotover", default-features = false} 12 | anyhow.workspace = true 13 | serde.workspace = true 14 | async-trait.workspace = true 15 | tracing.workspace = true 16 | typetag.workspace = true 17 | 18 | 19 | [dev-dependencies] 20 | test-helpers = {path = "../test-helpers"} 21 | tokio.workspace = true 22 | redis.workspace = true 23 | pretty_assertions.workspace = true 24 | 25 | [features] 26 | valkey = ["shotover/valkey"] 27 | default = ["valkey"] 28 | 29 | [[test]] 30 | name = "test" 31 | required-features = ["valkey"] 32 | -------------------------------------------------------------------------------- /custom-transforms-example/config/config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | main_log_level: "info,shotover_proxy=info" 3 | observability_interface: "0.0.0.0:9001" 4 | -------------------------------------------------------------------------------- /custom-transforms-example/config/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | valkey-one: 3 | image: bitnami/valkey:7.2.5-debian-12-r9 4 | ports: 5 | - "1111:6379" 6 | environment: 7 | ALLOW_EMPTY_PASSWORD: "yes" 8 | VALKEY_TLS_ENABLED: "no" 9 | -------------------------------------------------------------------------------- /custom-transforms-example/config/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | chain: 7 | - ValkeyGetRewrite: 8 | result: "Rewritten value" 9 | - ValkeySinkSingle: 10 | remote_address: "127.0.0.1:1111" 11 | connect_timeout_ms: 3000 12 | -------------------------------------------------------------------------------- /custom-transforms-example/src/main.rs: -------------------------------------------------------------------------------- 1 | use shotover::runner::Shotover; 2 | 3 | #[cfg(feature = "valkey")] 4 | mod valkey_get_rewrite; 5 | #[cfg(feature = "valkey")] 6 | shotover::import_transform!(valkey_get_rewrite::ValkeyGetRewriteConfig); 7 | 8 | fn main() { 9 | Shotover::new().run_block(); 10 | } 11 | -------------------------------------------------------------------------------- /docs/book.toml: -------------------------------------------------------------------------------- 1 | [book] 2 | authors = ["Shotover Team"] 3 | language = "en" 4 | multilingual = false 5 | src = "src" 6 | title = "Shotover" 7 | 8 | [output.html] 9 | additional-js = ["mermaid.min.js", "mermaid-init.js"] 10 | 11 | [output.linkcheck] 12 | # Should we check links on the internet? Enabling this option adds a 13 | # non-negligible performance impact 14 | follow-web-links = false 15 | 16 | warning-policy = "error" 17 | 18 | [preprocessor] 19 | 20 | [preprocessor.mermaid] 21 | command = "mdbook-mermaid" 22 | -------------------------------------------------------------------------------- /docs/mdbook.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e; set -u 4 | 5 | # Setup the the mdbook binaries if they havent already been setup 6 | if [ ! -d "mdbook_bin" ]; then 7 | mkdir -p mdbook_bin 8 | pushd mdbook_bin 9 | curl -L https://github.com/rust-lang/mdBook/releases/download/v0.4.13/mdbook-v0.4.13-x86_64-unknown-linux-gnu.tar.gz | tar xvz 10 | wget https://github.com/Michael-F-Bryan/mdbook-linkcheck/releases/download/v0.7.6/mdbook-linkcheck.x86_64-unknown-linux-gnu.zip -O linkcheck.zip 11 | unzip linkcheck.zip 12 | chmod +x mdbook-linkcheck 13 | popd 14 | fi 15 | 16 | # Need to set the PATH so that mdbook can find the mdbook-linkcheck plugin 17 | PATH="$PATH:$PWD/mdbook_bin" 18 | 19 | mdbook "$@" 20 | -------------------------------------------------------------------------------- /docs/mermaid-init.js: -------------------------------------------------------------------------------- 1 | (() => { 2 | const darkThemes = ['ayu', 'navy', 'coal']; 3 | const lightThemes = ['light', 'rust']; 4 | 5 | const classList = document.getElementsByTagName('html')[0].classList; 6 | 7 | let lastThemeWasLight = true; 8 | for (const cssClass of classList) { 9 | if (darkThemes.includes(cssClass)) { 10 | lastThemeWasLight = false; 11 | break; 12 | } 13 | } 14 | 15 | const theme = lastThemeWasLight ? 'default' : 'dark'; 16 | mermaid.initialize({ startOnLoad: true, theme }); 17 | 18 | // Simplest way to make mermaid re-render the diagrams in the new theme is via refreshing the page 19 | 20 | for (const darkTheme of darkThemes) { 21 | document.getElementById(darkTheme).addEventListener('click', () => { 22 | if (lastThemeWasLight) { 23 | window.location.reload(); 24 | } 25 | }); 26 | } 27 | 28 | for (const lightTheme of lightThemes) { 29 | document.getElementById(lightTheme).addEventListener('click', () => { 30 | if (!lastThemeWasLight) { 31 | window.location.reload(); 32 | } 33 | }); 34 | } 35 | })(); 36 | -------------------------------------------------------------------------------- /docs/src/SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | 3 | - [Home](./index.md) 4 | - [User Guide]() 5 | - [Introduction](./user-guide/introduction.md) 6 | - [Getting Started](./user-guide/getting-started.md) 7 | - [Concepts](./user-guide/concepts.md) 8 | - [Configuration](./user-guide/configuration.md) 9 | - [Observability](./user-guide/observability.md) 10 | - [Custom Transforms](./user-guide/writing-custom-transforms.md) 11 | - [Sources](./sources.md) 12 | - [Transforms](./transforms.md) 13 | - [Examples]() 14 | - [Valkey Cluster]() 15 | - [Unaware client](./examples/valkey-clustering-unaware.md) 16 | - [Aware client](./examples/valkey-clustering-aware.md) 17 | - [Cassandra Cluster]() 18 | - [Shotover sidecars](./examples/cassandra-cluster-shotover-sidecar.md) 19 | - [Contributing](./dev-docs/contributing.md) 20 | - [Debugging](./dev-docs/debugging.md) 21 | - [Internal End-to-End Overview](./dev-docs/end-to-end-overview.md) 22 | - [Setting up Linux](./dev-docs/setting-up-linux.md) 23 | - [Setting up macOS](./dev-docs/setting-up-macos.md) 24 | -------------------------------------------------------------------------------- /docs/src/dev-docs/end-to-end-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shotover/shotover-proxy/e6587b7bffc190ba1e034727a73bc674bf2e9ca0/docs/src/dev-docs/end-to-end-overview.png -------------------------------------------------------------------------------- /docs/src/dev-docs/setting-up-linux.md: -------------------------------------------------------------------------------- 1 | # Linux Specific setup 2 | 3 | ## Building shotover 4 | 5 | Shotover requires a single external dependencies to build on linux. 6 | `gcc` must be installed due to the `aws-lc-sys` and `ring` crate containing some C code. 7 | On Ubuntu you can install it via: 8 | 9 | ```shell 10 | sudo apt-get install gcc 11 | ``` 12 | 13 | ## Integration test dependencies 14 | 15 | Building and running integration tests and benchmarks requires many more external dependencies. 16 | To set them up on ubuntu run the script at `shotover-proxy/build/install_ubuntu_deps.sh`. 17 | Inspect the contents of the script to learn what it installs and why, some of the dependencies are optional, so feel free to skip those by installing the rest manually. 18 | If you already have docker installed make sure it is not a rootless install, see `install_ubuntu_deps.sh` for more information. 19 | -------------------------------------------------------------------------------- /docs/src/dev-docs/setting-up-macos.md: -------------------------------------------------------------------------------- 1 | # macOS Specific Setup 2 | 3 | ## Building shotover 4 | 5 | There are no external dependencies required for building shotover on macOS. 6 | 7 | ## Integration test dependencies 8 | 9 | To run the tests capable of running on macOS, install the following dependencies: 10 | 11 | ```shell 12 | brew install --cask docker 13 | brew install openssl@3 14 | brew install chipmk/tap/docker-mac-net-connect 15 | sudo brew services start chipmk/tap/docker-mac-net-connect 16 | ``` 17 | 18 | You may need to enable the option `Settings > Advanced > Allow the default Docker socket to be used (requires password)` in Docker Desktop, and restart Docker Desktop, for `docker-mac-net-connect` to work. 19 | 20 | Make sure that docker desktop is running when you run the tests. 21 | 22 | To continue running tests after a reboot, you will need to rerun: 23 | 24 | ```shell 25 | sudo brew services start chipmk/tap/docker-mac-net-connect 26 | ``` 27 | -------------------------------------------------------------------------------- /docs/src/index.md: -------------------------------------------------------------------------------- 1 |

2 | Shotover logo 3 |

4 | 5 | Shotover Proxy is an open source, high performance L7 data-layer proxy for controlling, managing and modifying the flow of database requests in transit. It can be used to solve many different operational and interoperability challenges for teams where polyglot persistence (many different databases) is common. 6 | 7 | The following pages are a good place to learn more: 8 | 9 | * [Introduction](user-guide/introduction.md) for more information on what Shotover Proxy is, why it exists and some of the underlying philosophies behind it. 10 | * [Getting started guide](user-guide/getting-started.md) for details on how to jump straight in and get up and running. 11 | * [Concepts](user-guide/concepts.md) for a deeper dive into some of the fundamental shotover concepts. 12 | 13 | ## Deploying Shotover 14 | 15 | Shotover can be deployed in a number of ways, it will generally be based on the problem you are trying to solve, but they all fall into three categories: 16 | 17 | * As an application sidecar - Shotover is pretty lightweight, so feel free to deploy it as a sidecar to each of your application instances. 18 | * As a stand alone proxy - If you are building a Service/DBaaS/Common data layer, you can deploy Shotover on standalone hardware and really let it fly. 19 | * As a sidecar to your database - You can also stick Shotover on the same instance/server as your database is running on, we do it, so we won't judge you. 20 | 21 | ## Roadmap 22 | 23 | * Support relevant xDS APIs (so Shotover can play nicely with service mesh implementations). 24 | * Support hot-reloads and a dynamic configuration API. 25 | * Additional sources (DynamoDB and PostgreSQL are good first candidates). 26 | * Add support for rate limiting, explicit back-pressure mechanisms, etc. 27 | * Additional Distributed algorithm transform primitives (e.g RAFT, 2PC, etc). 28 | * Additional sink transforms (these generally get implemented alongside sources). 29 | * Support user-defined / generated sources (e.g. thrift or a gRPC service from a proto definition). 30 | * Simulation testing once tokio-rs/simulation reaches compatibility with tokio-2.0 31 | * zero-copy pass-through transforms and in-place query editing (performance). 32 | 33 | ## Name 34 | 35 | Shotover refers to the Shotover (Kimi-ākau) river in Otago, New Zealand - close to Queenstown and eventually flowing into Lake Wakatipu via the Kawarau River, it's famous for white water rafting, bungy-jumping, fast rapids and jet boating. 36 | -------------------------------------------------------------------------------- /docs/src/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shotover/shotover-proxy/e6587b7bffc190ba1e034727a73bc674bf2e9ca0/docs/src/logo.png -------------------------------------------------------------------------------- /docs/src/user-guide/concepts.md: -------------------------------------------------------------------------------- 1 | # Core Concepts 2 | 3 | Shotover has a small number of core concepts or components that make up the bulk of it's architecture. Once understood, quite complex behaviour and environments can be managed with Shotover. 4 | 5 | ## Source 6 | 7 | A source is the main component that listens for traffic from your application and decodes it into an internal object that all Shotover transforms can understand. The source will then send the message to a transform chain for processing / routing. 8 | 9 | ## Transform 10 | 11 | Transforms are where Shotover does the bulk of it's work. A transform is a single unit of operation that does something to the database request that is in flight. This may be logging it, modifying it, sending it to an external system or anything else you can think of. Transforms can either be terminating (pass messages on to subsequent transforms on the chain) or non-terminating (return a response without calling the rest of the chain). Transforms that send messages to external systems are called sinks. 12 | 13 | ## Transform Chain 14 | 15 | A transform chain is a ordered list of transforms that a message will pass through. Messages are received from a source. Transform chains can be of arbitary complexity and a transform can even have its own set of sub chains. Transform chains are defined by the user in Shotover's configuration file and are linked to sources. 16 | 17 | 18 | ## Topology 19 | 20 | A topology is how you configure Shotover. You define your sources, your transforms in a transform chain and then assign the chain to a source. 21 | -------------------------------------------------------------------------------- /docs/src/user-guide/deployment.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shotover/shotover-proxy/e6587b7bffc190ba1e034727a73bc674bf2e9ca0/docs/src/user-guide/deployment.md -------------------------------------------------------------------------------- /docs/src/user-guide/getting-started.md: -------------------------------------------------------------------------------- 1 | # Getting Started 2 | 3 | ## Setup 4 | 5 | 1. **Download & Extract** - You can find the latest release of Shotover Proxy at our GitHub [release page](https://github.com/shotover/shotover-proxy/releases). So download and extract from there onto your Linux machine. Alternatively you can [build and run from source](../dev-docs/contributing.md). 6 | 2. **Run** - `cd` into the extracted `shotover` folder and run `./shotover-proxy`. Shotover will launch and display some logs. 7 | 3. **Examine Config** - Shotover has two configuration files: 8 | * `config/config.yaml` - This is used to configure logging and metrics. 9 | * `config/topology.yaml` - This defines how Shotover receives, transforms and delivers messages. 10 | 4. **Configure topology** - Open `topology.yaml` in your text editor and edit it to define the sources and transforms you need, the comments in the file will direct you to suitable documentation. Alternatively you can refer to the [Deployment Scenarios](#deployment-scenarios) section for full `topology.yaml` examples. 11 | 5. **Rerun** - Shotover currently doesn't support hot-reloading config, so first shut it down with `CTRL-C`. Then rerun `./shotover-proxy` for your new config to take effect. 12 | 6. **Test** - Send a message to Shotover as per your configuration and observe it is delivered to it's configured destination database. 13 | 14 | To see Shotover's command line arguments run: `./shotover-proxy --help` 15 | 16 | ## Deployment scenarios 17 | 18 | Full `topology.yaml` examples configured for a specific use case: 19 | 20 | * [valkey clustering](../examples/valkey-clustering-unaware.md) 21 | -------------------------------------------------------------------------------- /docs/src/user-guide/writing-custom-transforms.md: -------------------------------------------------------------------------------- 1 | # Writing Custom Transforms 2 | 3 | Shotover supports implementing your own custom transforms. 4 | Shotover exposes an API via a rust crate from which you can both implement your own transforms and build those transforms into a final shotover binary that can be run in production. 5 | 6 | ## Required Knowledge 7 | 8 | This approach is taken to make the most of rust's speed and type safety. 9 | However this does come at an up front cost of you needing a basic familiarity with rust. 10 | If you have never worked with rust before, you should first spend a day [familiarising yourself with it](https://doc.rust-lang.org/book/title-page.html). 11 | 12 | ## Start with the template 13 | 14 | To get started writing custom transforms first clone this template: 15 | 16 | ```shell 17 | git clone https://github.com/shotover/shotover-custom-transforms-template 18 | ``` 19 | 20 | The template comes with: 21 | 22 | * two example transforms: the `valkey-get-rewrite` and `kafka-fetch-rewrite` crates 23 | * By convention, each transform is its own rust crate 24 | * the final shotover binary: the `shotover-bin` crate 25 | * this also contains integration tests in `shotover-bin/tests`, make sure to utiilize them! 26 | 27 | Use an example transform that matches the protocol you are working with as a base. e.g. 28 | 29 | * valkey-get-rewrite - for valkey 30 | * kafka-fetch-rewrite - for kafka 31 | 32 | ## Running the project 33 | 34 | To run the shotover binary containing your project just run: 35 | 36 | ```shell 37 | cargo run --release 38 | ``` 39 | 40 | This also creates a binary at `target/release/shotover-bin` which can be used in production. 41 | 42 | To run the integration tests run: 43 | 44 | ```shell 45 | cargo test --release 46 | ``` 47 | 48 | ## A little cleanup 49 | 50 | Feel free to delete transforms that you do not need. 51 | That would involve deleting: 52 | 53 | * The entire crate folder 54 | * The `members` entry in the workspace level `Cargo.toml` 55 | * The corresponding `shotover::import_transform!` line in `shotover-bin/src/main.rs` 56 | 57 | ## Development 58 | 59 | To understand your transform you are using as a base you will want to consult the [shotover API documentation](https://docs.rs/crate/shotover/latest) 60 | From there explore the API to find how to 61 | -------------------------------------------------------------------------------- /ec2-cargo/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ec2-cargo" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "Apache-2.0" 6 | description = "A tool for running cargo commands in an EC2 instance" 7 | 8 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 9 | 10 | [dependencies] 11 | tokio.workspace = true 12 | clap.workspace = true 13 | tracing-subscriber.workspace = true 14 | aws-throwaway.workspace = true 15 | tracing-appender.workspace = true 16 | shellfish = { version = "0.10.0", features = ["async"] } 17 | cargo_metadata = "0.19.0" 18 | shell-quote.workspace = true 19 | -------------------------------------------------------------------------------- /ec2-cargo/readme.md: -------------------------------------------------------------------------------- 1 | # ec2-cargo 2 | 3 | ec2-cargo is a small tool for running shotover tests on an EC2 instance. 4 | 5 | ## AWS credentials 6 | 7 | Refer to the [aws-sdk docs](https://docs.aws.amazon.com/sdk-for-rust/latest/dg/credentials.html) for full information on credentials. 8 | 9 | But two easiest ways are: 10 | 11 | * Setting the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables 12 | * Logging in from the AWS CLI 13 | 14 | ## Script setup 15 | 16 | To run ec2-cargo authenticated with AWS through environment variables, create a script like the following *OUTSIDE* of any repository. 17 | 18 | ```bash 19 | cd shotover-proxy/ec2-cargo 20 | AWS_ACCESS_KEY_ID=TODO AWS_SECRET_ACCESS_KEY=TODO cargo run "$@" 21 | ``` 22 | 23 | Replace `TODO` with your credentials. 24 | 25 | Then invoke like `ec2-cargo.sh your-flags-here` 26 | 27 | ## Running 28 | 29 | ec2-cargo has reasonable default configurations so you can just: 30 | 31 | * `cargo run` the project to use default credentials 32 | * `ec2-cargo.sh` to use specific env vars as described above. 33 | 34 | You can also specify the instance type e.g. `ec2-cargo.sh --instance-type c7g.2xlarge` 35 | 36 | ## Usage 37 | 38 | While the tool is running it will present you with a shell in which to enter commands. 39 | 40 | Some possible commands are: 41 | 42 | * `test $args` - Uploads shotover project source code and runs `cargo nextest run $args` 43 | * `ssh-instructions` - Print a bash snippet that can be used to ssh into the machine 44 | * `exit` - Exits the shell and terminates the EC2 instance. 45 | * `help` - Display all possible commands 46 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.86" 3 | components = ["rustfmt", "clippy"] 4 | targets = ["aarch64-unknown-linux-gnu"] 5 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | use_field_init_shorthand = true 2 | edition = "2021" 3 | style_edition = "2024" 4 | -------------------------------------------------------------------------------- /shotover-proxy/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "shotover-proxy" 3 | version = "0.6.0" 4 | authors = ["Ben "] 5 | edition = "2021" 6 | license = "Apache-2.0" 7 | 8 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 9 | 10 | [dependencies] 11 | shotover = { path = "../shotover", default-features = false } 12 | 13 | [dev-dependencies] 14 | prometheus-parse = "0.2.4" 15 | reqwest.workspace = true 16 | scylla.workspace = true 17 | anyhow.workspace = true 18 | tokio.workspace = true 19 | tracing.workspace = true 20 | rstest = "0.25.0" 21 | rstest_reuse = "0.7.0" 22 | test-helpers = { path = "../test-helpers" } 23 | redis.workspace = true 24 | chacha20poly1305.workspace = true 25 | serde.workspace = true 26 | csv.workspace = true 27 | uuid.workspace = true 28 | itertools.workspace = true 29 | cdrs-tokio.workspace = true 30 | redis-protocol.workspace = true 31 | bincode.workspace = true 32 | futures.workspace = true 33 | hex.workspace = true 34 | cassandra-protocol.workspace = true 35 | bytes.workspace = true 36 | rand.workspace = true 37 | async-trait.workspace = true 38 | tracing-subscriber.workspace = true 39 | tracing-appender.workspace = true 40 | fred = { version = "10.0.0", features = ["enable-rustls"] } 41 | tokio-bin-process.workspace = true 42 | rustls-pemfile = "2.0.0" 43 | rustls-pki-types = "1.1.0" 44 | aws-throwaway.workspace = true 45 | windsock = "0.2.0" 46 | regex = "1.7.0" 47 | opensearch = { version = "2.1.0", default-features = false, features = [ 48 | "native-tls", 49 | ] } 50 | serde_json = "1.0.103" 51 | time = { version = "0.3.25" } 52 | shell-quote.workspace = true 53 | pretty_assertions.workspace = true 54 | 55 | [features] 56 | # Include WIP alpha transforms in the public API 57 | alpha-transforms = ["shotover/alpha-transforms"] 58 | cassandra = ["shotover/cassandra"] 59 | kafka = ["shotover/kafka"] 60 | valkey = ["shotover/valkey"] 61 | opensearch = ["shotover/opensearch"] 62 | cassandra-cpp-driver-tests = ["test-helpers/cassandra-cpp-driver-tests"] 63 | kafka-cpp-driver-tests = ["test-helpers/kafka-cpp-driver-tests"] 64 | default = ["cassandra", "kafka", "valkey", "opensearch"] 65 | 66 | [[bench]] 67 | name = "windsock" 68 | harness = false 69 | # windsock is dependent on the DebugForceEncode transform for the shotover=message-parsed benches. 70 | # rather than manually adding #[cfg(feature = "alpha-transforms")] everywhere we just forbid compilation entirely without the alpha-transforms feature 71 | required-features = ["alpha-transforms"] 72 | -------------------------------------------------------------------------------- /shotover-proxy/benches/windsock/common.rs: -------------------------------------------------------------------------------- 1 | use shotover::{config::topology::Topology as ShotoverTopology, sources::SourceConfig}; 2 | 3 | #[derive(Clone, Copy)] 4 | pub enum Shotover { 5 | None, 6 | Standard, 7 | ForcedMessageParsed, 8 | } 9 | 10 | impl Shotover { 11 | pub fn to_tag(self) -> (String, String) { 12 | ( 13 | "shotover".to_owned(), 14 | match self { 15 | Shotover::None => "none".to_owned(), 16 | Shotover::Standard => "standard".to_owned(), 17 | Shotover::ForcedMessageParsed => "message-parsed".to_owned(), 18 | }, 19 | ) 20 | } 21 | } 22 | 23 | pub fn generate_topology(source: SourceConfig) -> String { 24 | ShotoverTopology { 25 | sources: vec![source], 26 | } 27 | .serialize() 28 | .unwrap() 29 | } 30 | -------------------------------------------------------------------------------- /shotover-proxy/benches/windsock/kafka/mod.rs: -------------------------------------------------------------------------------- 1 | mod bench; 2 | 3 | use crate::ShotoverBench; 4 | use crate::common::*; 5 | use bench::*; 6 | 7 | pub fn benches() -> Vec { 8 | itertools::iproduct!( 9 | [ 10 | Shotover::None, 11 | Shotover::Standard, 12 | Shotover::ForcedMessageParsed 13 | ], 14 | [ 15 | KafkaTopology::Single, 16 | KafkaTopology::Cluster1, 17 | KafkaTopology::Cluster3 18 | ], 19 | [Size::B12, Size::KB1, Size::KB100] 20 | ) 21 | .map(|(shotover, topology, size)| { 22 | Box::new(KafkaBench::new(shotover, topology, size)) as ShotoverBench 23 | }) 24 | .collect() 25 | } 26 | -------------------------------------------------------------------------------- /shotover-proxy/benches/windsock/main.rs: -------------------------------------------------------------------------------- 1 | // Allow dead code if any of the protocol features are disabled 2 | #![cfg_attr( 3 | any( 4 | not(feature = "cassandra"), 5 | not(feature = "valkey"), 6 | not(all(feature = "kafka-cpp-driver-tests", feature = "kafka")) 7 | ), 8 | allow(dead_code, unused_imports, unused_variables, unused_mut) 9 | )] 10 | 11 | #[cfg(feature = "cassandra")] 12 | mod cassandra; 13 | mod cloud; 14 | mod common; 15 | #[cfg(all(feature = "kafka-cpp-driver-tests", feature = "kafka"))] 16 | mod kafka; 17 | mod profilers; 18 | mod shotover; 19 | #[cfg(feature = "valkey")] 20 | mod valkey; 21 | 22 | use cloud::CloudResources; 23 | use cloud::CloudResourcesRequired; 24 | use fred::rustls::crypto::ring::default_provider; 25 | use std::path::Path; 26 | use tracing_subscriber::EnvFilter; 27 | use windsock::{Bench, Windsock}; 28 | 29 | pub type ShotoverBench = Box< 30 | dyn Bench, 31 | >; 32 | 33 | fn main() { 34 | default_provider().install_default().ok(); 35 | 36 | let (non_blocking, _guard) = tracing_appender::non_blocking(std::io::stdout()); 37 | tracing_subscriber::fmt() 38 | .with_env_filter(EnvFilter::from_default_env()) 39 | .with_writer(non_blocking) 40 | .init(); 41 | 42 | // The benches and tests automatically set the working directory to CARGO_MANIFEST_DIR. 43 | // We need to do the same as the DockerCompose + ShotoverProcess types rely on this. 44 | if Path::new(env!("CARGO_MANIFEST_DIR")).exists() { 45 | std::env::set_current_dir( 46 | Path::new(env!("CARGO_MANIFEST_DIR")) 47 | .parent() 48 | .unwrap() 49 | .join("shotover-proxy"), 50 | ) 51 | .unwrap(); 52 | } 53 | 54 | let mut benches = vec![]; 55 | 56 | #[cfg(feature = "cassandra")] 57 | benches.extend(cassandra::benches()); 58 | #[cfg(all(feature = "kafka-cpp-driver-tests", feature = "kafka"))] 59 | benches.extend(kafka::benches()); 60 | #[cfg(feature = "valkey")] 61 | benches.extend(valkey::benches()); 62 | 63 | Windsock::new(benches, cloud::AwsCloud::new_boxed(), &["release"]).run(); 64 | } 65 | -------------------------------------------------------------------------------- /shotover-proxy/benches/windsock/readme.md: -------------------------------------------------------------------------------- 1 | # Shotover windsock 2 | 3 | ## Running locally 4 | 5 | Just `cargo windsock` will run every bench. 6 | Refer to the windsock docs and `cargo windsock --help` for more flags. 7 | 8 | ## Running in AWS 9 | 10 | First ensure you have the [AWS CLI V2 installed locally](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html). 11 | 12 | ### Aws credentials 13 | 14 | Refer to the [aws-sdk docs](https://docs.aws.amazon.com/sdk-for-rust/latest/dg/credentials.html) for full information on credentials. 15 | 16 | But two easiest ways are: 17 | 18 | * Setting the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables 19 | * Logging in from the AWS CLI 20 | 21 | ### Script setup 22 | 23 | To run the shotover windsock benches in AWS through the environment variables, create a script like the following *OUTSIDE* of any repository. 24 | 25 | ```bash 26 | cd shotover-proxy 27 | AWS_ACCESS_KEY_ID=TODO AWS_SECRET_ACCESS_KEY=TODO cargo windsock "$@" 28 | ``` 29 | 30 | Replace `TODO` with your credentials. 31 | 32 | Then invoke like `aws-windsock.sh your-windsock-flags-here` 33 | -------------------------------------------------------------------------------- /shotover-proxy/benches/windsock/shotover.rs: -------------------------------------------------------------------------------- 1 | use crate::profilers::ProfilerRunner; 2 | use test_helpers::shotover_process::ShotoverProcessBuilder; 3 | use tokio_bin_process::{BinProcess, bin_path}; 4 | use uuid::Uuid; 5 | 6 | pub async fn shotover_process_custom_topology( 7 | topology_contents: &str, 8 | profiler: &ProfilerRunner, 9 | ) -> BinProcess { 10 | let topology_path = std::env::temp_dir().join(Uuid::new_v4().to_string()); 11 | std::fs::write(&topology_path, topology_contents).unwrap(); 12 | ShotoverProcessBuilder::new_with_topology(topology_path.to_str().unwrap()) 13 | .with_config("config/config.yaml") 14 | .with_bin(bin_path!("shotover-proxy")) 15 | .with_profile(profiler.shotover_profile()) 16 | .start() 17 | .await 18 | } 19 | -------------------------------------------------------------------------------- /shotover-proxy/benches/windsock/valkey/mod.rs: -------------------------------------------------------------------------------- 1 | mod bench; 2 | 3 | use crate::ShotoverBench; 4 | use crate::common::*; 5 | use bench::*; 6 | 7 | pub fn benches() -> Vec { 8 | itertools::iproduct!( 9 | [ValkeyTopology::Cluster3, ValkeyTopology::Single], 10 | [ 11 | Shotover::None, 12 | Shotover::Standard, 13 | Shotover::ForcedMessageParsed 14 | ], 15 | [ValkeyOperation::Get, ValkeyOperation::Set], 16 | [Encryption::None, Encryption::Tls] 17 | ) 18 | .map(|(topology, shotover, operation, encryption)| { 19 | Box::new(ValkeyBench::new(topology, shotover, operation, encryption)) as ShotoverBench 20 | }) 21 | .collect() 22 | } 23 | -------------------------------------------------------------------------------- /shotover-proxy/build/build_release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | SCRIPT_DIR="$(cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 6 | 7 | cd $SCRIPT_DIR 8 | 9 | cargo build --release 10 | 11 | mkdir -p shotover 12 | cp ../../target/release/shotover-proxy shotover 13 | cp -r ../config shotover 14 | 15 | # extract the crate version from Cargo.toml 16 | CRATE_VERSION="$(cargo metadata --format-version 1 --offline --no-deps | jq -c -M -r '.packages[] | select(.name == "shotover-proxy") | .version')" 17 | tar -cvzf out.tar.gz shotover 18 | mv out.tar.gz ../../shotover-proxy-linux_amd64-${CRATE_VERSION}.tar.gz 19 | 20 | rm -rf shotover 21 | -------------------------------------------------------------------------------- /shotover-proxy/build/cassandra-cpp-driver.control: -------------------------------------------------------------------------------- 1 | Package: cassandra-cpp-driver 2 | Version: VERSION 3 | Section: base 4 | Priority: optional 5 | Architecture: amd64 6 | Maintainer: Shotover team 7 | Description: cassandra cpp-driver installed by shotover 8 | -------------------------------------------------------------------------------- /shotover-proxy/build/install_ubuntu_deps.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | if [ "$EUID" -e 0 ] 6 | then echo "This script should not be run as root. It will run sudo itself as required." 7 | exit 8 | fi 9 | cd "$(dirname "$0")" 10 | 11 | sudo apt-get update 12 | 13 | # Install dependencies for openssl, needed by `redis` crate 14 | sudo apt-get install -y pkg-config 15 | 16 | # Install docker 17 | curl -sSL https://get.docker.com/ | sudo sh 18 | # Do not use the rootless install of docker as many of our tests rely on the user created bridge networks 19 | # having their interface exposed to the host, which rootless install does not support. 20 | # Instead add your user to the docker group: 21 | usermod -aG docker $USER` 22 | 23 | # Install dependencies for kafka java driver tests 24 | sudo apt-get install -y default-jre-headless` 25 | 26 | # Install dependencies for npm tests 27 | sudo apt-get install -y npm` 28 | 29 | # The remaining dependencies are for tests behind optional features. 30 | # So feel free to skip them. 31 | 32 | # Install dependencies needed for `--features kafka-cpp-driver-tests` 33 | sudo apt-get install -y cmake g++ 34 | 35 | ## Install dependencies for `--features cassandra-cpp-driver-tests` 36 | ./install_ubuntu_packages.sh 37 | -------------------------------------------------------------------------------- /shotover-proxy/build/install_ubuntu_packages.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # The cassandra integration tests also support other drivers that don't rely on external dependencies. 4 | # But if you want to test the cassandra-cpp driver, you will need to install it. 5 | # 6 | # Upstream installation information and dependencies for the Cassandra CPP driver can be found [here](https://docs.datastax.com/en/developer/cpp-driver/2.16/). 7 | # 8 | # However that is likely unusable because datastax do not ship packages for modern ubuntu so we have our own script which will compile, package and install the driver on a modern ubuntu. 9 | # So to install the driver on ubuntu run this script. 10 | 11 | set -e 12 | 13 | cd "$(dirname "$0")" 14 | 15 | # Install dependencies of the cpp-driver even if they are already on CI so that we can run this locally 16 | sudo apt-get update 17 | sudo apt-get install -y libuv1 libuv1-dev cmake g++ libssl-dev zlib1g-dev 18 | 19 | # set VERSION to one of the tags here: https://github.com/datastax/cpp-driver/tags 20 | VERSION=2.16.2 21 | 22 | PACKAGE_NAME="cassandra-cpp-driver_${VERSION}-1_amd64" 23 | FILE_PATH="packages/${PACKAGE_NAME}.deb" 24 | 25 | # Create package if it doesnt already exist 26 | if [ ! -f "$FILE_PATH" ]; then 27 | rm -rf cpp-driver # Clean just in case the script failed halfway through last time 28 | git clone --depth 1 --branch $VERSION https://github.com/datastax/cpp-driver 29 | pushd cpp-driver 30 | 31 | cmake -DCMAKE_POLICY_VERSION_MINIMUM=3.5 -DCMAKE_INSTALL_PREFIX:PATH=/usr -DCMAKE_INSTALL_LIBDIR:PATH=/usr/lib -Wno-error . 32 | make 33 | 34 | mkdir -p $PACKAGE_NAME/DEBIAN 35 | make DESTDIR="$PACKAGE_NAME/" install 36 | 37 | cp ../cassandra-cpp-driver.control $PACKAGE_NAME/DEBIAN/control 38 | sed -i "s/VERSION/${VERSION}/g" $PACKAGE_NAME/DEBIAN/control 39 | dpkg-deb --build $PACKAGE_NAME 40 | 41 | mkdir -p ../packages 42 | cp ${PACKAGE_NAME}.deb ../$FILE_PATH 43 | 44 | popd 45 | rm -rf cpp-driver 46 | fi 47 | 48 | sudo dpkg -i $FILE_PATH 49 | -------------------------------------------------------------------------------- /shotover-proxy/build/is_releasable.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | set -u 5 | 6 | TAG=$(git tag --points-at HEAD) 7 | 8 | if [ -z "$TAG" ]; 9 | then 10 | echo "Failed: The current commit has no git tags" 11 | exit 1 12 | fi 13 | 14 | if [[ $TAG == *$'\n'* ]]; 15 | then 16 | echo "Failed: multiple git tags are on the latest commit, but only one tag is allowed" 17 | echo "$TAG" 18 | exit 1 19 | fi 20 | 21 | TAG_VERSION=$(echo $TAG | sed -e "s/^v//") 22 | 23 | if [ -z "$TAG_VERSION" ]; 24 | then 25 | echo "Failed: git tag not valid: '$TAG'" 26 | exit 1 27 | fi 28 | 29 | BIN_VERSION="$(cargo metadata --format-version 1 --offline --no-deps | jq -c -M -r '.packages[] | select(.name == "shotover-proxy") | .version')" 30 | if [ "$TAG_VERSION" != "$BIN_VERSION" ]; 31 | then 32 | echo "Failed: git tag '$TAG_VERSION' did not match shotover-proxy version '$BIN_VERSION'" 33 | exit 1 34 | fi 35 | 36 | LIB_VERSION="$(cargo metadata --format-version 1 --offline --no-deps | jq -c -M -r '.packages[] | select(.name == "shotover") | .version')" 37 | if [ "$TAG_VERSION" != "$LIB_VERSION" ]; 38 | then 39 | echo "Failed: git tag '$TAG_VERSION' did not match shotover version '$LIB_VERSION'" 40 | exit 1 41 | fi 42 | 43 | cd shotover 44 | cargo publish --dry-run 45 | 46 | echo "Shotover repository is ready for publishing" 47 | -------------------------------------------------------------------------------- /shotover-proxy/config/config.yaml: -------------------------------------------------------------------------------- 1 | # configure the first `info` to set the log level for dependencies 2 | # configure `shotover=info` to set the log level for shotover itself 3 | # set `shotover::connection_span=info` to `shotover::connection_span=debug` to attach connection info to most log events, this is disabled by default due to a minor performance hit. 4 | main_log_level: "info, shotover=info, shotover::connection_span=info" 5 | observability_interface: "0.0.0.0:9001" 6 | -------------------------------------------------------------------------------- /shotover-proxy/config/topology.yaml: -------------------------------------------------------------------------------- 1 | # For an overview of topology configuration: https://shotover.io/docs/latest/user-guide/configuration/#topologyyaml 2 | --- 3 | # The list of sources. 4 | sources: 5 | # The source, change from Valkey to the source type of the database protocol you are receiving messages in. 6 | # For a list of possible sources: https://shotover.io/docs/latest/sources 7 | - Valkey: 8 | name: "valkey" 9 | listen_addr: "127.0.0.1:6379" 10 | chain: 11 | # A DebugPrinter transform, reports an INFO log for every message that passes through this transform. 12 | # You should delete this transform and add as many other transforms in this chain as you need. 13 | # For a list of possible transforms: https://shotover.io/docs/latest/transforms/#transforms-1 14 | - DebugPrinter 15 | 16 | # A NullSink transform, drops all messages it receives. 17 | # You will want to replace this with a sink transform to send the message to a database. 18 | # For a list of possible transforms: https://shotover.io/docs/latest/transforms/#transforms-1 19 | - NullSink 20 | -------------------------------------------------------------------------------- /shotover-proxy/src/main.rs: -------------------------------------------------------------------------------- 1 | use shotover::runner::Shotover; 2 | 3 | fn main() { 4 | Shotover::new().run_block(); 5 | } 6 | -------------------------------------------------------------------------------- /shotover-proxy/tests/cassandra_int_tests/collections/vector.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | // We dont use the collection abstractions used by list/map/set since vectors only support a small subset of data types. 4 | 5 | async fn create(connection: &CassandraConnection) { 6 | run_query( 7 | connection, 8 | "CREATE TABLE collections.vector (id int PRIMARY KEY, col0 vector, col1 vector, col2 vector, col3 vector);", 9 | ) 10 | .await; 11 | } 12 | 13 | async fn insert(connection: &CassandraConnection) { 14 | run_query( 15 | connection, 16 | "INSERT INTO collections.vector (id, col0, col1, col2, col3) VALUES (1, [1], [2, 3], [4.1, 5.2], [6.1, 7.2]);", 17 | ) 18 | .await; 19 | } 20 | 21 | async fn select(connection: &CassandraConnection) { 22 | let results = vec![ 23 | ResultValue::Vector(vec![ResultValue::Int(1)]), 24 | ResultValue::Vector(vec![ResultValue::BigInt(2), ResultValue::BigInt(3)]), 25 | ResultValue::Vector(vec![ 26 | ResultValue::Float(4.1.into()), 27 | ResultValue::Float(5.2.into()), 28 | ]), 29 | ResultValue::Vector(vec![ 30 | ResultValue::Double(6.1.into()), 31 | ResultValue::Double(7.2.into()), 32 | ]), 33 | ]; 34 | 35 | assert_query_result( 36 | connection, 37 | "SELECT col0, col1, col2, col3 FROM collections.vector;", 38 | &[&results], 39 | ) 40 | .await; 41 | } 42 | 43 | pub async fn test(connection: &CassandraConnection) { 44 | create(connection).await; 45 | insert(connection).await; 46 | select(connection).await; 47 | } 48 | -------------------------------------------------------------------------------- /shotover-proxy/tests/cassandra_int_tests/functions.rs: -------------------------------------------------------------------------------- 1 | use test_helpers::connection::cassandra::{ 2 | CassandraConnection, ResultValue, assert_query_result, run_query, 3 | }; 4 | 5 | async fn drop_function(session: &CassandraConnection) { 6 | assert_query_result( 7 | session, 8 | "SELECT test_function_keyspace.my_function(x, y) FROM test_function_keyspace.test_function_table WHERE id=1;", 9 | &[&[ResultValue::Int(4)]] 10 | ).await; 11 | 12 | run_query(session, "DROP FUNCTION test_function_keyspace.my_function").await; 13 | } 14 | 15 | async fn create_function(session: &CassandraConnection) { 16 | run_query( 17 | session, 18 | "CREATE FUNCTION test_function_keyspace.my_function (a int, b int) 19 | RETURNS NULL ON NULL INPUT 20 | RETURNS int 21 | LANGUAGE java 22 | AS $$ return a * b; $$;", 23 | ) 24 | .await; 25 | 26 | assert_query_result( 27 | session, 28 | "SELECT test_function_keyspace.my_function(x, y) FROM test_function_keyspace.test_function_table;", 29 | &[&[ResultValue::Int(4)], &[ResultValue::Int(9)], &[ResultValue::Int(16)]] 30 | ).await; 31 | } 32 | 33 | pub async fn test(session: &CassandraConnection) { 34 | run_query( 35 | session, 36 | "CREATE KEYSPACE test_function_keyspace WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };" 37 | ).await; 38 | 39 | run_query( 40 | session, 41 | "CREATE TABLE test_function_keyspace.test_function_table (id int PRIMARY KEY, x int, y int);", 42 | ).await; 43 | 44 | run_query( 45 | session, 46 | r#"BEGIN BATCH 47 | INSERT INTO test_function_keyspace.test_function_table (id, x, y) VALUES (1, 2, 2); 48 | INSERT INTO test_function_keyspace.test_function_table (id, x, y) VALUES (2, 3, 3); 49 | INSERT INTO test_function_keyspace.test_function_table (id, x, y) VALUES (3, 4, 4); 50 | APPLY BATCH;"#, 51 | ) 52 | .await; 53 | 54 | create_function(session).await; 55 | drop_function(session).await; 56 | } 57 | -------------------------------------------------------------------------------- /shotover-proxy/tests/cassandra_int_tests/timestamp.rs: -------------------------------------------------------------------------------- 1 | use std::time::{SystemTime, UNIX_EPOCH}; 2 | use test_helpers::connection::cassandra::{ 3 | CassandraConnection, ResultValue, assert_query_result, run_query, 4 | }; 5 | 6 | async fn flag(connection: &CassandraConnection) { 7 | let timestamp = get_timestamp(); 8 | 9 | connection 10 | .execute_with_timestamp( 11 | "UPDATE test_timestamps.test_table SET a = 'a1-modified-1' WHERE id = 0;", 12 | timestamp, 13 | ) 14 | .await 15 | .unwrap(); 16 | 17 | assert_query_result( 18 | connection, 19 | "SELECT WRITETIME(a) FROM test_timestamps.test_table WHERE id = 0;", 20 | &[&[ResultValue::BigInt(timestamp)]], 21 | ) 22 | .await; 23 | } 24 | 25 | async fn query(connection: &CassandraConnection) { 26 | let timestamp = get_timestamp(); 27 | 28 | run_query( 29 | connection, 30 | &format!( 31 | "UPDATE test_timestamps.test_table USING TIMESTAMP {} SET a = 'a1-modified-1' WHERE id = 0;", 32 | timestamp 33 | ), 34 | ) 35 | .await; 36 | 37 | assert_query_result( 38 | connection, 39 | "SELECT WRITETIME(a) FROM test_timestamps.test_table WHERE id = 0;", 40 | &[&[ResultValue::BigInt(timestamp)]], 41 | ) 42 | .await; 43 | } 44 | 45 | // use the current system timestamp because if we send one with an 46 | // earlier timestamp than Cassandra has as the last write 47 | // (the insert in `fn test`) it will be ignored. 48 | fn get_timestamp() -> i64 { 49 | SystemTime::now() 50 | .duration_since(UNIX_EPOCH) 51 | .unwrap() 52 | .as_micros() 53 | .try_into() 54 | .unwrap() 55 | } 56 | 57 | pub async fn test(connection: &CassandraConnection) { 58 | run_query(connection, "CREATE KEYSPACE test_timestamps WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor': 1 };").await; 59 | run_query( 60 | connection, 61 | "CREATE TABLE test_timestamps.test_table (id int PRIMARY KEY, a text);", 62 | ) 63 | .await; 64 | 65 | assert_query_result( 66 | connection, 67 | "INSERT INTO test_timestamps.test_table (id, a) VALUES (0, 'a1');", 68 | &[], 69 | ) 70 | .await; 71 | assert_query_result( 72 | connection, 73 | "INSERT INTO test_timestamps.test_table (id, a) VALUES (1, 'a2');", 74 | &[], 75 | ) 76 | .await; 77 | 78 | query(connection).await; 79 | flag(connection).await; 80 | } 81 | -------------------------------------------------------------------------------- /shotover-proxy/tests/cassandra_int_tests/udt.rs: -------------------------------------------------------------------------------- 1 | use test_helpers::connection::cassandra::{CassandraConnection, run_query}; 2 | 3 | async fn test_create_udt(session: &CassandraConnection) { 4 | run_query( 5 | session, 6 | "CREATE TYPE test_udt_keyspace.test_type_name (foo text, bar int)", 7 | ) 8 | .await; 9 | run_query( 10 | session, 11 | "CREATE TABLE test_udt_keyspace.test_table (id int PRIMARY KEY, foo test_type_name);", 12 | ) 13 | .await; 14 | run_query( 15 | session, 16 | "INSERT INTO test_udt_keyspace.test_table (id, foo) VALUES (1, {foo: 'yes', bar: 1})", 17 | ) 18 | .await; 19 | } 20 | 21 | async fn test_drop_udt(session: &CassandraConnection) { 22 | run_query( 23 | session, 24 | "CREATE TYPE test_udt_keyspace.test_type_drop_me (foo text, bar int)", 25 | ) 26 | .await; 27 | run_query(session, "DROP TYPE test_udt_keyspace.test_type_drop_me;").await; 28 | // TODO: This exposes a bug in at least cassandra 3 29 | // The error is supposed to be a 0x2200 syntax error but rarely cassandra will return a 0x0000 server error instead. 30 | // The cpp driver interprets 0x0000 as different to 0x2200 and considers the node as down and will no longer talk to it. 31 | // We should eventually reenable this test when we upgrade to cassandra 4 (the bug may also need to be reported and fixed in cassandra 3 and/or 4) 32 | // session.execute_expect_err_contains( 33 | // "CREATE TABLE test_udt_keyspace.test_delete_table (id int PRIMARY KEY, foo test_type_drop_me);", 34 | // "Unknown type test_udt_keyspace.test_type_drop_me", 35 | // ); 36 | } 37 | 38 | pub async fn test(session: &CassandraConnection) { 39 | run_query(session, "CREATE KEYSPACE test_udt_keyspace WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };").await; 40 | test_create_udt(session).await; 41 | test_drop_udt(session).await; 42 | } 43 | -------------------------------------------------------------------------------- /shotover-proxy/tests/lib.rs: -------------------------------------------------------------------------------- 1 | #[allow(clippy::single_component_path_imports, unused_imports)] 2 | use rstest_reuse; 3 | 4 | use test_helpers::shotover_process::ShotoverProcessBuilder; 5 | use tokio_bin_process::bin_path; 6 | 7 | #[cfg(feature = "cassandra")] 8 | mod cassandra_int_tests; 9 | #[cfg(feature = "kafka")] 10 | mod kafka_int_tests; 11 | #[cfg(all(feature = "alpha-transforms", feature = "opensearch"))] 12 | mod opensearch_int_tests; 13 | #[cfg(feature = "valkey")] 14 | mod runner; 15 | #[cfg(feature = "valkey")] 16 | mod transforms; 17 | #[cfg(feature = "valkey")] 18 | mod valkey_int_tests; 19 | 20 | pub fn shotover_process(topology_path: &str) -> ShotoverProcessBuilder { 21 | ShotoverProcessBuilder::new_with_topology(topology_path) 22 | .with_bin(bin_path!("shotover-proxy")) 23 | .with_config("tests/test-configs/shotover-config/config1.yaml") 24 | } 25 | 26 | #[cfg(target_os = "macos")] 27 | #[cfg(any(feature = "cassandra", feature = "valkey"))] 28 | const CONNECTION_REFUSED_OS_ERROR: i32 = 61; 29 | 30 | #[cfg(not(target_os = "macos"))] 31 | #[cfg(any(feature = "cassandra", feature = "valkey"))] 32 | const CONNECTION_REFUSED_OS_ERROR: i32 = 111; 33 | -------------------------------------------------------------------------------- /shotover-proxy/tests/runner/mod.rs: -------------------------------------------------------------------------------- 1 | mod observability_int_tests; 2 | mod runner_int_tests; 3 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/cassandra-5/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | cassandra-one: 3 | image: shotover/cassandra-test:5.0-rc1-r3 4 | ports: 5 | - "9043:9042" 6 | environment: 7 | MAX_HEAP_SIZE: "400M" 8 | MIN_HEAP_SIZE: "400M" 9 | HEAP_NEWSIZE: "48M" 10 | USER_DEFINED_FUNCTIONS_ENABLED: "true" 11 | volumes: 12 | - type: tmpfs 13 | target: /var/lib/cassandra 14 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/cassandra-5/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - CassandraSinkSingle: 8 | remote_address: "127.0.0.1:9043" 9 | connect_timeout_ms: 3000 10 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/cluster-multi-rack-2-per-rack/topology_rack1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - CassandraSinkCluster: 8 | first_contact_points: ["172.16.1.2:9042", "172.16.1.3:9042"] 9 | local_shotover_host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 10 | shotover_nodes: 11 | - address: "127.0.0.1:9042" 12 | data_center: "datacenter1" 13 | rack: "rack1" 14 | host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 15 | - address: "127.0.0.2:9042" 16 | data_center: "datacenter1" 17 | rack: "rack2" 18 | host_id: "3c3c4e2d-ba74-4f76-b52e-fb5bcee6a9f4" 19 | - address: "127.0.0.3:9042" 20 | data_center: "datacenter1" 21 | rack: "rack3" 22 | host_id: "fa74d7ec-1223-472b-97de-04a32ccdb70b" 23 | connect_timeout_ms: 3000 24 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/cluster-multi-rack-2-per-rack/topology_rack2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.2:9042" 6 | chain: 7 | - CassandraSinkCluster: 8 | first_contact_points: ["172.16.1.2:9042", "172.16.1.3:9042"] 9 | local_shotover_host_id: "3c3c4e2d-ba74-4f76-b52e-fb5bcee6a9f4" 10 | shotover_nodes: 11 | - address: "127.0.0.1:9042" 12 | data_center: "datacenter1" 13 | rack: "rack1" 14 | host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 15 | - address: "127.0.0.2:9042" 16 | data_center: "datacenter1" 17 | rack: "rack2" 18 | host_id: "3c3c4e2d-ba74-4f76-b52e-fb5bcee6a9f4" 19 | - address: "127.0.0.3:9042" 20 | data_center: "datacenter1" 21 | rack: "rack3" 22 | host_id: "fa74d7ec-1223-472b-97de-04a32ccdb70b" 23 | connect_timeout_ms: 3000 24 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/cluster-multi-rack-2-per-rack/topology_rack3.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.3:9042" 6 | chain: 7 | - CassandraSinkCluster: 8 | first_contact_points: ["172.16.1.2:9042", "172.16.1.3:9042"] 9 | local_shotover_host_id: "fa74d7ec-1223-472b-97de-04a32ccdb70b" 10 | shotover_nodes: 11 | - address: "127.0.0.1:9042" 12 | data_center: "datacenter1" 13 | rack: "rack1" 14 | host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 15 | - address: "127.0.0.2:9042" 16 | data_center: "datacenter1" 17 | rack: "rack2" 18 | host_id: "3c3c4e2d-ba74-4f76-b52e-fb5bcee6a9f4" 19 | - address: "127.0.0.3:9042" 20 | data_center: "datacenter1" 21 | rack: "rack3" 22 | host_id: "fa74d7ec-1223-472b-97de-04a32ccdb70b" 23 | connect_timeout_ms: 3000 24 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/cluster-multi-rack/topology_rack1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - CassandraSinkCluster: 8 | first_contact_points: ["172.16.1.2:9042", "172.16.1.3:9042"] 9 | local_shotover_host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 10 | shotover_nodes: 11 | - address: "127.0.0.1:9042" 12 | data_center: "datacenter1" 13 | rack: "rack1" 14 | host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 15 | - address: "127.0.0.2:9042" 16 | data_center: "datacenter1" 17 | rack: "rack2" 18 | host_id: "3c3c4e2d-ba74-4f76-b52e-fb5bcee6a9f4" 19 | - address: "127.0.0.3:9042" 20 | data_center: "datacenter1" 21 | rack: "rack3" 22 | host_id: "fa74d7ec-1223-472b-97de-04a32ccdb70b" 23 | connect_timeout_ms: 3000 24 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/cluster-multi-rack/topology_rack2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.2:9042" 6 | chain: 7 | - CassandraSinkCluster: 8 | first_contact_points: ["172.16.1.2:9042", "172.16.1.3:9042"] 9 | local_shotover_host_id: "3c3c4e2d-ba74-4f76-b52e-fb5bcee6a9f4" 10 | shotover_nodes: 11 | - address: "127.0.0.1:9042" 12 | data_center: "datacenter1" 13 | rack: "rack1" 14 | host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 15 | - address: "127.0.0.2:9042" 16 | data_center: "datacenter1" 17 | rack: "rack2" 18 | host_id: "3c3c4e2d-ba74-4f76-b52e-fb5bcee6a9f4" 19 | - address: "127.0.0.3:9042" 20 | data_center: "datacenter1" 21 | rack: "rack3" 22 | host_id: "fa74d7ec-1223-472b-97de-04a32ccdb70b" 23 | connect_timeout_ms: 3000 24 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/cluster-multi-rack/topology_rack3.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.3:9042" 6 | chain: 7 | - CassandraSinkCluster: 8 | first_contact_points: ["172.16.1.2:9042", "172.16.1.3:9042"] 9 | local_shotover_host_id: "fa74d7ec-1223-472b-97de-04a32ccdb70b" 10 | shotover_nodes: 11 | - address: "127.0.0.1:9042" 12 | data_center: "datacenter1" 13 | rack: "rack1" 14 | host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 15 | - address: "127.0.0.2:9042" 16 | data_center: "datacenter1" 17 | rack: "rack2" 18 | host_id: "3c3c4e2d-ba74-4f76-b52e-fb5bcee6a9f4" 19 | - address: "127.0.0.3:9042" 20 | data_center: "datacenter1" 21 | rack: "rack3" 22 | host_id: "fa74d7ec-1223-472b-97de-04a32ccdb70b" 23 | connect_timeout_ms: 3000 24 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/cluster-tls/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | tls: 7 | certificate_path: "tests/test-configs/cassandra/tls/certs/localhost.crt" 8 | private_key_path: "tests/test-configs/cassandra/tls/certs/localhost.key" 9 | chain: 10 | - CassandraSinkCluster: 11 | first_contact_points: ["172.16.1.2:9042", "172.16.1.3:9042"] 12 | local_shotover_host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 13 | shotover_nodes: 14 | - address: "127.0.0.1:9042" 15 | data_center: "datacenter1" 16 | rack: "rack1" 17 | host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 18 | connect_timeout_ms: 3000 19 | tls: 20 | certificate_authority_path: "tests/test-configs/cassandra/tls/certs/localhost_CA.crt" 21 | certificate_path: "tests/test-configs/cassandra/tls/certs/localhost.crt" 22 | private_key_path: "tests/test-configs/cassandra/tls/certs/localhost.key" 23 | verify_hostname: false 24 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/cluster-v3/topology-dummy-peers.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - CassandraSinkCluster: 8 | first_contact_points: ["172.16.1.2:9042", "172.16.1.3:9042"] 9 | local_shotover_host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 10 | connect_timeout_ms: 3000 11 | shotover_nodes: 12 | - address: "127.0.0.1:9042" 13 | data_center: "datacenter1" 14 | rack: "rack1" 15 | host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 16 | # These extra nodes dont really make sense, its pointing at the same address as the local shotover node. 17 | # It is however useful for testing the functionality of the system.peers rewriting. 18 | # We can make stronger assertions against the values returned by system.peers with this config because 19 | # more system.peers fields are static due to always being queried against this one shotover instance. 20 | - address: "127.0.0.1:9042" 21 | data_center: "datacenter1" 22 | rack: "rack1" 23 | host_id: "3c3c4e2d-ba74-4f76-b52e-fb5bcee6a9f4" 24 | - address: "127.0.0.1:9042" 25 | data_center: "datacenter1" 26 | rack: "rack1" 27 | host_id: "fa74d7ec-1223-472b-97de-04a32ccdb70b" 28 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/cluster-v3/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - CassandraSinkCluster: 8 | first_contact_points: ["172.16.1.2:9042", "172.16.1.3:9042"] 9 | local_shotover_host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 10 | shotover_nodes: 11 | - address: "127.0.0.1:9042" 12 | data_center: "datacenter1" 13 | rack: "rack1" 14 | host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 15 | connect_timeout_ms: 3000 16 | 17 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/cluster-v4/topology-dummy-peers.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - CassandraSinkCluster: 8 | first_contact_points: ["172.16.1.2:9044", "172.16.1.3:9044"] 9 | local_shotover_host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 10 | shotover_nodes: 11 | - address: "127.0.0.1:9042" 12 | data_center: "datacenter1" 13 | rack: "rack1" 14 | host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 15 | # These extra nodes dont really make sense, its pointing at the same address as the local shotover node. 16 | # It is however useful for testing the functionality of the system.peers rewriting. 17 | # We can make stronger assertions against the values returned by system.peers with this config because 18 | # more system.peers fields are static due to always being queried against this one shotover instance. 19 | - address: "127.0.0.1:9042" 20 | data_center: "datacenter1" 21 | rack: "rack1" 22 | host_id: "3c3c4e2d-ba74-4f76-b52e-fb5bcee6a9f4" 23 | - address: "127.0.0.1:9042" 24 | data_center: "datacenter1" 25 | rack: "rack1" 26 | host_id: "fa74d7ec-1223-472b-97de-04a32ccdb70b" 27 | connect_timeout_ms: 3000 28 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/cluster-v4/topology-encode.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - DebugForceEncode: 8 | encode_requests: true 9 | encode_responses: true 10 | - CassandraSinkCluster: 11 | first_contact_points: ["172.16.1.2:9044", "172.16.1.3:9044"] 12 | local_shotover_host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 13 | shotover_nodes: 14 | - address: "127.0.0.1:9042" 15 | data_center: "datacenter1" 16 | rack: "rack1" 17 | host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 18 | connect_timeout_ms: 3000 19 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/cluster-v4/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - CassandraSinkCluster: 8 | first_contact_points: ["172.16.1.2:9044", "172.16.1.3:9044"] 9 | local_shotover_host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 10 | shotover_nodes: 11 | - address: "127.0.0.1:9042" 12 | data_center: "datacenter1" 13 | rack: "rack1" 14 | host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 15 | connect_timeout_ms: 3000 16 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/cluster-v5/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - CassandraSinkCluster: 8 | first_contact_points: ["172.16.1.2:9044", "172.16.1.3:9044"] 9 | local_shotover_host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 10 | shotover_nodes: 11 | - address: "127.0.0.1:9042" 12 | data_center: "datacenter1" 13 | rack: "rack1" 14 | host_id: "2dd022d6-2937-4754-89d6-02d2933a8f7a" 15 | connect_timeout_ms: 3000 16 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/passthrough-parse-request/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | cassandra-one: 3 | image: shotover/cassandra-test:4.0.6-r1 4 | ports: 5 | - "9043:9042" 6 | environment: 7 | MAX_HEAP_SIZE: "400M" 8 | MIN_HEAP_SIZE: "400M" 9 | HEAP_NEWSIZE: "48M" 10 | volumes: 11 | - type: tmpfs 12 | target: /var/lib/cassandra 13 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/passthrough-parse-request/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - DebugForceParse: 8 | parse_requests: true 9 | parse_responses: false 10 | - CassandraSinkSingle: 11 | remote_address: "127.0.0.1:9043" 12 | connect_timeout_ms: 3000 13 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/passthrough-parse-response/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | cassandra-one: 3 | image: shotover/cassandra-test:4.0.6-r1 4 | ports: 5 | - "9043:9042" 6 | environment: 7 | MAX_HEAP_SIZE: "400M" 8 | MIN_HEAP_SIZE: "400M" 9 | HEAP_NEWSIZE: "48M" 10 | volumes: 11 | - type: tmpfs 12 | target: /var/lib/cassandra 13 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/passthrough-parse-response/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - DebugForceParse: 8 | parse_requests: false 9 | parse_responses: true 10 | - CassandraSinkSingle: 11 | remote_address: "127.0.0.1:9043" 12 | connect_timeout_ms: 3000 13 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/passthrough-websocket-tls/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | cassandra-one: 3 | image: shotover/cassandra-test:4.0.6-r1 4 | ports: 5 | - "9043:9042" 6 | environment: 7 | MAX_HEAP_SIZE: "400M" 8 | MIN_HEAP_SIZE: "400M" 9 | HEAP_NEWSIZE: "48M" 10 | volumes: 11 | - type: tmpfs 12 | target: /var/lib/cassandra 13 | - type: bind 14 | source: "../tls/certs/keystore.p12" 15 | target: "/etc/cassandra/certs/keystore.p12" 16 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/passthrough-websocket-tls/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | transport: WebSocket 7 | tls: 8 | certificate_path: "tests/test-configs/cassandra/tls/certs/localhost.crt" 9 | private_key_path: "tests/test-configs/cassandra/tls/certs/localhost.key" 10 | chain: 11 | - CassandraSinkSingle: 12 | remote_address: "127.0.0.1:9043" 13 | connect_timeout_ms: 3000 14 | tls: 15 | certificate_authority_path: "tests/test-configs/cassandra/tls/certs/localhost_CA.crt" 16 | certificate_path: "tests/test-configs/cassandra/tls/certs/localhost.crt" 17 | private_key_path: "tests/test-configs/cassandra/tls/certs/localhost.key" 18 | verify_hostname: false 19 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/passthrough-websocket/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | cassandra-one: 3 | image: shotover/cassandra-test:4.0.6-r1 4 | ports: 5 | - "9043:9042" 6 | environment: 7 | MAX_HEAP_SIZE: "400M" 8 | MIN_HEAP_SIZE: "400M" 9 | HEAP_NEWSIZE: "48M" 10 | volumes: 11 | - type: tmpfs 12 | target: /var/lib/cassandra 13 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/passthrough-websocket/topology-encode.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | transport: WebSocket 7 | chain: 8 | - DebugForceEncode: 9 | encode_requests: true 10 | encode_responses: true 11 | - CassandraSinkSingle: 12 | remote_address: "127.0.0.1:9043" 13 | connect_timeout_ms: 3000 14 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/passthrough-websocket/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | transport: WebSocket 7 | chain: 8 | - CassandraSinkSingle: 9 | remote_address: "127.0.0.1:9043" 10 | connect_timeout_ms: 3000 11 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/passthrough/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | cassandra-one: 3 | image: shotover/cassandra-test:4.0.6-r1 4 | ports: 5 | - "9043:9042" 6 | environment: 7 | MAX_HEAP_SIZE: "400M" 8 | MIN_HEAP_SIZE: "400M" 9 | HEAP_NEWSIZE: "48M" 10 | volumes: 11 | - type: tmpfs 12 | target: /var/lib/cassandra 13 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/passthrough/topology-encode.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - DebugForceEncode: 8 | encode_requests: true 9 | encode_responses: true 10 | - CassandraSinkSingle: 11 | remote_address: "127.0.0.1:9043" 12 | connect_timeout_ms: 3000 13 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/passthrough/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - CassandraSinkSingle: 8 | remote_address: "127.0.0.1:9043" 9 | connect_timeout_ms: 3000 10 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/peers-rewrite/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra1" 5 | listen_addr: "127.0.0.1:9043" 6 | chain: 7 | - CassandraSinkSingle: 8 | remote_address: "172.16.1.2:9042" 9 | connect_timeout_ms: 3000 10 | 11 | - Cassandra: 12 | name: "cassandra2" 13 | listen_addr: "127.0.0.1:9044" 14 | chain: 15 | - CassandraPeersRewrite: 16 | port: 9044 17 | - CassandraSinkSingle: 18 | remote_address: "172.16.1.2:9042" 19 | connect_timeout_ms: 3000 20 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/protect-aws/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | cassandra-one: 3 | image: shotover/cassandra-test:4.0.6-r1 4 | ports: 5 | - "9043:9042" 6 | environment: 7 | MAX_HEAP_SIZE: "400M" 8 | MIN_HEAP_SIZE: "400M" 9 | HEAP_NEWSIZE: "48M" 10 | volumes: 11 | - type: tmpfs 12 | target: /var/lib/cassandra 13 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/protect-aws/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - Protect: 8 | key_manager: 9 | AWSKms: 10 | endpoint: "http://localhost:5000" 11 | region: "us-east-1" 12 | cmk_id: "alias/aws/secretsmanager" 13 | number_of_bytes: 32 14 | keyspace_table_columns: 15 | test_protect_keyspace: 16 | test_table: 17 | - col1 18 | - CassandraSinkSingle: 19 | remote_address: "127.0.0.1:9043" 20 | connect_timeout_ms: 3000 21 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/protect-local/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | cassandra-one: 3 | image: shotover/cassandra-test:4.0.6-r1 4 | ports: 5 | - "9043:9042" 6 | environment: 7 | MAX_HEAP_SIZE: "400M" 8 | MIN_HEAP_SIZE: "400M" 9 | HEAP_NEWSIZE: "48M" 10 | volumes: 11 | - type: tmpfs 12 | target: /var/lib/cassandra 13 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/protect-local/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - Protect: 8 | key_manager: 9 | Local: 10 | kek: Ht8M1nDO/7fay+cft71M2Xy7j30EnLAsA84hSUMCm1k= 11 | kek_id: "" 12 | keyspace_table_columns: 13 | test_protect_keyspace: 14 | test_table: 15 | - col1 16 | - CassandraSinkSingle: 17 | remote_address: "127.0.0.1:9043" 18 | connect_timeout_ms: 3000 19 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/request-throttling.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - RequestThrottling: 8 | max_requests_per_second: 50 9 | - CassandraSinkSingle: 10 | remote_address: "127.0.0.1:9043" 11 | connect_timeout_ms: 3000 12 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/request-throttling/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | cassandra-one: 3 | image: shotover/cassandra-test:4.0.6-r1 4 | ports: 5 | - "9043:9042" 6 | environment: 7 | MAX_HEAP_SIZE: "400M" 8 | MIN_HEAP_SIZE: "400M" 9 | HEAP_NEWSIZE: "48M" 10 | volumes: 11 | - type: tmpfs 12 | target: /var/lib/cassandra 13 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/request-throttling/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - RequestThrottling: 8 | max_requests_per_second: 100000 9 | - CassandraSinkSingle: 10 | remote_address: "127.0.0.1:9043" 11 | connect_timeout_ms: 3000 12 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/tls/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | cassandra-one: 3 | image: shotover/cassandra-test:4.0.6-r1 4 | ports: 5 | - "9042:9042" 6 | environment: 7 | MAX_HEAP_SIZE: "400M" 8 | MIN_HEAP_SIZE: "400M" 9 | HEAP_NEWSIZE: "48M" 10 | volumes: 11 | - type: tmpfs 12 | target: /var/lib/cassandra 13 | - type: bind 14 | source: "./certs/keystore.p12" 15 | target: "/etc/cassandra/certs/keystore.p12" 16 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/tls/topology-with-key.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9043" 6 | tls: 7 | certificate_path: "tests/test-configs/cassandra/tls/certs/localhost.crt" 8 | private_key_path: "tests/test-configs/cassandra/tls/certs/localhost.key" 9 | chain: 10 | - CassandraSinkSingle: 11 | remote_address: "localhost:9042" 12 | connect_timeout_ms: 3000 13 | tls: 14 | certificate_authority_path: "tests/test-configs/cassandra/tls/certs/localhost_CA.crt" 15 | certificate_path: "tests/test-configs/cassandra/tls/certs/localhost.crt" 16 | private_key_path: "tests/test-configs/cassandra/tls/certs/localhost.key" 17 | verify_hostname: true 18 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/tls/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9043" 6 | tls: 7 | certificate_path: "tests/test-configs/cassandra/tls/certs/localhost.crt" 8 | private_key_path: "tests/test-configs/cassandra/tls/certs/localhost.key" 9 | chain: 10 | - CassandraSinkSingle: 11 | remote_address: "localhost:9042" 12 | connect_timeout_ms: 3000 13 | tls: 14 | certificate_authority_path: "tests/test-configs/cassandra/tls/certs/localhost_CA.crt" 15 | verify_hostname: true 16 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/valkey-cache/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | valkey-one: 3 | image: bitnami/valkey:7.2.5-debian-12-r9 4 | ports: 5 | - "6379:6379" 6 | environment: 7 | ALLOW_EMPTY_PASSWORD: "yes" 8 | VALKEY_TLS_ENABLED: "no" 9 | 10 | cassandra-one: 11 | image: shotover/cassandra-test:4.0.6-r1 12 | ports: 13 | - "9043:9042" 14 | environment: 15 | MAX_HEAP_SIZE: "400M" 16 | MIN_HEAP_SIZE: "400M" 17 | HEAP_NEWSIZE: "48M" 18 | volumes: 19 | - type: tmpfs 20 | target: /var/lib/cassandra 21 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/cassandra/valkey-cache/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - ValkeyCache: 8 | caching_schema: 9 | test_cache_keyspace_batch_insert.test_table: 10 | partition_key: [id] 11 | range_key: [] 12 | test_cache_keyspace_simple.test_table: 13 | partition_key: [id] 14 | range_key: [] 15 | chain: 16 | - ValkeySinkSingle: 17 | remote_address: "127.0.0.1:6379" 18 | connect_timeout_ms: 3000 19 | - CassandraSinkSingle: 20 | remote_address: "127.0.0.1:9043" 21 | connect_timeout_ms: 3000 22 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/invalid_non_terminating_last.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | chain: 7 | - DebugPrinter 8 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/invalid_protocol_mismatch.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:6379" 6 | chain: 7 | - QueryCounter: 8 | name: cassandra 9 | - ValkeySinkSingle: 10 | remote_address: "127.0.0.1:1111" 11 | connect_timeout_ms: 3000 12 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/invalid_subchains.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey1" 5 | listen_addr: "127.0.0.1:6379" 6 | chain: 7 | - NullSink 8 | - NullSink 9 | - DebugPrinter 10 | - Valkey: 11 | name: "valkey2" 12 | listen_addr: "127.0.0.1:6379" 13 | chain: 14 | - DebugPrinter 15 | - ParallelMap: 16 | chain: 17 | - NullSink 18 | - DebugPrinter 19 | parallelism: 1 20 | ordered_results: false 21 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/invalid_terminating_not_last.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | chain: 7 | - NullSink 8 | - DebugPrinter 9 | - NullSink 10 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/bench/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | kafka0: 3 | image: 'bitnami/kafka:3.9.0-debian-12-r6' 4 | ports: 5 | - '9192:9192' 6 | environment: 7 | - KAFKA_CFG_LISTENERS=BROKER://:9192,CONTROLLER://:9093 8 | - KAFKA_CFG_ADVERTISED_LISTENERS=BROKER://127.0.0.1:9192 9 | - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,BROKER:PLAINTEXT 10 | - KAFKA_CFG_INTER_BROKER_LISTENER_NAME=BROKER 11 | - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER 12 | - KAFKA_CFG_PROCESS_ROLES=controller,broker 13 | - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka0:9093 14 | - KAFKA_CFG_NODE_ID=0 15 | - ALLOW_PLAINTEXT_LISTENER=yes 16 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-1-rack/topology-single.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "127.0.0.1:9192" 10 | address_for_clients: "127.0.0.1:9192" 11 | rack: "rack0" 12 | broker_id: 0 13 | local_shotover_broker_id: 0 14 | first_contact_points: ["172.16.1.2:9092"] 15 | connect_timeout_ms: 3000 16 | check_shotover_peers_delay_ms: 3000 17 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-1-rack/topology1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9191" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "127.0.0.1:9191" 10 | address_for_clients: "127.0.0.1:9191" 11 | rack: "rack0" 12 | broker_id: 0 13 | - address_for_peers: "127.0.0.1:9192" 14 | address_for_clients: "127.0.0.1:9192" 15 | rack: "rack0" 16 | broker_id: 1 17 | - address_for_peers: "127.0.0.1:9193" 18 | address_for_clients: "127.0.0.1:9193" 19 | rack: "rack0" 20 | broker_id: 2 21 | local_shotover_broker_id: 0 22 | first_contact_points: ["172.16.1.2:9092"] 23 | connect_timeout_ms: 3000 24 | check_shotover_peers_delay_ms: 3000 25 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-1-rack/topology2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "127.0.0.1:9191" 10 | address_for_clients: "127.0.0.1:9191" 11 | rack: "rack0" 12 | broker_id: 0 13 | - address_for_peers: "127.0.0.1:9192" 14 | address_for_clients: "127.0.0.1:9192" 15 | rack: "rack0" 16 | broker_id: 1 17 | - address_for_peers: "127.0.0.1:9193" 18 | address_for_clients: "127.0.0.1:9193" 19 | rack: "rack0" 20 | broker_id: 2 21 | local_shotover_broker_id: 1 22 | first_contact_points: ["172.16.1.2:9092"] 23 | connect_timeout_ms: 3000 24 | check_shotover_peers_delay_ms: 3000 25 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-1-rack/topology3.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9193" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "127.0.0.1:9191" 10 | address_for_clients: "127.0.0.1:9191" 11 | rack: "rack0" 12 | broker_id: 0 13 | - address_for_peers: "127.0.0.1:9192" 14 | address_for_clients: "127.0.0.1:9192" 15 | rack: "rack0" 16 | broker_id: 1 17 | - address_for_peers: "127.0.0.1:9193" 18 | address_for_clients: "127.0.0.1:9193" 19 | rack: "rack0" 20 | broker_id: 2 21 | local_shotover_broker_id: 2 22 | first_contact_points: ["172.16.1.2:9092"] 23 | connect_timeout_ms: 3000 24 | check_shotover_peers_delay_ms: 3000 25 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-2-racks/topology-rack1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9191" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "localhost:9191" 10 | address_for_clients: "127.0.0.1:9191" 11 | rack: "rack1" 12 | broker_id: 0 13 | - address_for_peers: "localhost:9192" 14 | address_for_clients: "127.0.0.1:9192" 15 | rack: "rack2" 16 | broker_id: 1 17 | local_shotover_broker_id: 0 18 | first_contact_points: ["172.16.1.2:9092"] 19 | connect_timeout_ms: 3000 20 | check_shotover_peers_delay_ms: 3000 21 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-2-racks/topology-rack2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "localhost:9191" 10 | address_for_clients: "127.0.0.1:9191" 11 | rack: "rack1" 12 | broker_id: 0 13 | - address_for_peers: "localhost:9192" 14 | address_for_clients: "127.0.0.1:9192" 15 | rack: "rack2" 16 | broker_id: 1 17 | local_shotover_broker_id: 1 18 | first_contact_points: ["172.16.1.5:9092"] 19 | connect_timeout_ms: 3000 20 | check_shotover_peers_delay_ms: 3000 21 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-3-racks/topology-rack1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9191" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "localhost:9191" 10 | address_for_clients: "localhost:9191" 11 | rack: "rack1" 12 | broker_id: 0 13 | - address_for_peers: "localhost:9192" 14 | address_for_clients: "localhost:9192" 15 | rack: "rack2" 16 | broker_id: 1 17 | - address_for_peers: "localhost:9193" 18 | address_for_clients: "localhost:9193" 19 | rack: "rack3" 20 | broker_id: 2 21 | local_shotover_broker_id: 0 22 | first_contact_points: ["172.16.1.2:9092"] 23 | connect_timeout_ms: 3000 24 | check_shotover_peers_delay_ms: 3000 25 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-3-racks/topology-rack2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "localhost:9191" 10 | address_for_clients: "localhost:9191" 11 | rack: "rack1" 12 | broker_id: 0 13 | - address_for_peers: "localhost:9192" 14 | address_for_clients: "localhost:9192" 15 | rack: "rack2" 16 | broker_id: 1 17 | - address_for_peers: "localhost:9193" 18 | address_for_clients: "localhost:9193" 19 | rack: "rack3" 20 | broker_id: 2 21 | local_shotover_broker_id: 1 22 | first_contact_points: ["172.16.1.3:9092"] 23 | connect_timeout_ms: 3000 24 | check_shotover_peers_delay_ms: 3000 25 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-3-racks/topology-rack3.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9193" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "localhost:9191" 10 | address_for_clients: "localhost:9191" 11 | rack: "rack1" 12 | broker_id: 0 13 | - address_for_peers: "localhost:9192" 14 | address_for_clients: "localhost:9192" 15 | rack: "rack2" 16 | broker_id: 1 17 | - address_for_peers: "localhost:9193" 18 | address_for_clients: "localhost:9193" 19 | rack: "rack3" 20 | broker_id: 2 21 | local_shotover_broker_id: 2 22 | first_contact_points: ["172.16.1.4:9092"] 23 | connect_timeout_ms: 3000 24 | check_shotover_peers_delay_ms: 3000 25 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-mtls/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | tls: 7 | certificate_path: "tests/test-configs/kafka/tls/certs/localhost.crt" 8 | private_key_path: "tests/test-configs/kafka/tls/certs/localhost.key" 9 | chain: 10 | - KafkaSinkCluster: 11 | shotover_nodes: 12 | - address_for_peers: "localhost:9192" 13 | address_for_clients: "localhost:9192" 14 | rack: "rack0" 15 | broker_id: 0 16 | local_shotover_broker_id: 0 17 | first_contact_points: ["172.16.1.2:9092"] 18 | connect_timeout_ms: 3000 19 | check_shotover_peers_delay_ms: 3000 20 | tls: 21 | certificate_authority_path: "tests/test-configs/kafka/tls/certs/localhost_CA.crt" 22 | certificate_path: "tests/test-configs/kafka/tls/certs/localhost.crt" 23 | private_key_path: "tests/test-configs/kafka/tls/certs/localhost.key" 24 | verify_hostname: true 25 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-sasl-plain/topology-single.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "127.0.0.1:9192" 10 | address_for_clients: "127.0.0.1:9192" 11 | rack: "rack0" 12 | broker_id: 0 13 | local_shotover_broker_id: 0 14 | first_contact_points: ["172.16.1.2:9092"] 15 | connect_timeout_ms: 3000 16 | check_shotover_peers_delay_ms: 3000 17 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-sasl-plain/topology1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9191" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "127.0.0.1:9191" 10 | address_for_clients: "127.0.0.1:9191" 11 | rack: "rack0" 12 | broker_id: 0 13 | - address_for_peers: "127.0.0.1:9192" 14 | address_for_clients: "127.0.0.1:9192" 15 | rack: "rack0" 16 | broker_id: 1 17 | - address_for_peers: "127.0.0.1:9193" 18 | address_for_clients: "127.0.0.1:9193" 19 | rack: "rack0" 20 | broker_id: 2 21 | local_shotover_broker_id: 0 22 | first_contact_points: ["172.16.1.2:9092"] 23 | connect_timeout_ms: 3000 24 | check_shotover_peers_delay_ms: 3000 25 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-sasl-plain/topology2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "127.0.0.1:9191" 10 | address_for_clients: "127.0.0.1:9191" 11 | rack: "rack0" 12 | broker_id: 0 13 | - address_for_peers: "127.0.0.1:9192" 14 | address_for_clients: "127.0.0.1:9192" 15 | rack: "rack0" 16 | broker_id: 1 17 | - address_for_peers: "127.0.0.1:9193" 18 | address_for_clients: "127.0.0.1:9193" 19 | rack: "rack0" 20 | broker_id: 2 21 | local_shotover_broker_id: 1 22 | first_contact_points: ["172.16.1.2:9092"] 23 | connect_timeout_ms: 3000 24 | check_shotover_peers_delay_ms: 3000 25 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-sasl-plain/topology3.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9193" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "127.0.0.1:9191" 10 | address_for_clients: "127.0.0.1:9191" 11 | rack: "rack0" 12 | broker_id: 0 13 | - address_for_peers: "127.0.0.1:9192" 14 | address_for_clients: "127.0.0.1:9192" 15 | rack: "rack0" 16 | broker_id: 1 17 | - address_for_peers: "127.0.0.1:9193" 18 | address_for_clients: "127.0.0.1:9193" 19 | rack: "rack0" 20 | broker_id: 2 21 | local_shotover_broker_id: 2 22 | first_contact_points: ["172.16.1.2:9092"] 23 | connect_timeout_ms: 3000 24 | check_shotover_peers_delay_ms: 3000 25 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-sasl-scram-over-mtls/topology-single.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "127.0.0.1:9192" 10 | address_for_clients: "127.0.0.1:9192" 11 | rack: "rack0" 12 | broker_id: 0 13 | local_shotover_broker_id: 0 14 | first_contact_points: ["172.16.1.2:9092"] 15 | authorize_scram_over_mtls: 16 | mtls_port_contact_points: ["172.16.1.2:9094"] 17 | tls: 18 | certificate_authority_path: "tests/test-configs/kafka/tls/certs/localhost_CA.crt" 19 | certificate_path: "tests/test-configs/kafka/tls/certs/localhost.crt" 20 | private_key_path: "tests/test-configs/kafka/tls/certs/localhost.key" 21 | verify_hostname: true 22 | delegation_token_lifetime_seconds: 86400 # 1 day 23 | connect_timeout_ms: 3000 24 | check_shotover_peers_delay_ms: 3000 25 | tls: 26 | certificate_authority_path: "tests/test-configs/kafka/tls/certs/localhost_CA.crt" 27 | certificate_path: "tests/test-configs/kafka/tls/certs/localhost.crt" 28 | private_key_path: "tests/test-configs/kafka/tls/certs/localhost.key" 29 | verify_hostname: true 30 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-sasl-scram-over-mtls/topology1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9191" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "127.0.0.1:9191" 10 | address_for_clients: "127.0.0.1:9191" 11 | rack: "rack0" 12 | broker_id: 0 13 | - address_for_peers: "127.0.0.1:9192" 14 | address_for_clients: "127.0.0.1:9192" 15 | rack: "rack0" 16 | broker_id: 1 17 | - address_for_peers: "127.0.0.1:9193" 18 | address_for_clients: "127.0.0.1:9193" 19 | rack: "rack0" 20 | broker_id: 2 21 | local_shotover_broker_id: 0 22 | first_contact_points: ["172.16.1.2:9092"] 23 | authorize_scram_over_mtls: 24 | # every shotover node purposefully tests a different number of contact points 25 | mtls_port_contact_points: ["172.16.1.2:9094"] 26 | tls: 27 | certificate_authority_path: "tests/test-configs/kafka/tls/certs/localhost_CA.crt" 28 | certificate_path: "tests/test-configs/kafka/tls/certs/localhost.crt" 29 | private_key_path: "tests/test-configs/kafka/tls/certs/localhost.key" 30 | verify_hostname: true 31 | delegation_token_lifetime_seconds: 15 32 | connect_timeout_ms: 3000 33 | check_shotover_peers_delay_ms: 3000 34 | tls: 35 | certificate_authority_path: "tests/test-configs/kafka/tls/certs/localhost_CA.crt" 36 | certificate_path: "tests/test-configs/kafka/tls/certs/localhost.crt" 37 | private_key_path: "tests/test-configs/kafka/tls/certs/localhost.key" 38 | verify_hostname: true 39 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-sasl-scram-over-mtls/topology2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "127.0.0.1:9191" 10 | address_for_clients: "127.0.0.1:9191" 11 | rack: "rack0" 12 | broker_id: 0 13 | - address_for_peers: "127.0.0.1:9192" 14 | address_for_clients: "127.0.0.1:9192" 15 | rack: "rack0" 16 | broker_id: 1 17 | - address_for_peers: "127.0.0.1:9193" 18 | address_for_clients: "127.0.0.1:9193" 19 | rack: "rack0" 20 | broker_id: 2 21 | local_shotover_broker_id: 1 22 | first_contact_points: ["172.16.1.2:9092"] 23 | authorize_scram_over_mtls: 24 | # every shotover node purposefully tests a different number of contact points 25 | mtls_port_contact_points: ["172.16.1.2:9094", "172.16.1.3:9094"] 26 | tls: 27 | certificate_authority_path: "tests/test-configs/kafka/tls/certs/localhost_CA.crt" 28 | certificate_path: "tests/test-configs/kafka/tls/certs/localhost.crt" 29 | private_key_path: "tests/test-configs/kafka/tls/certs/localhost.key" 30 | verify_hostname: true 31 | delegation_token_lifetime_seconds: 15 32 | connect_timeout_ms: 3000 33 | check_shotover_peers_delay_ms: 3000 34 | tls: 35 | certificate_authority_path: "tests/test-configs/kafka/tls/certs/localhost_CA.crt" 36 | certificate_path: "tests/test-configs/kafka/tls/certs/localhost.crt" 37 | private_key_path: "tests/test-configs/kafka/tls/certs/localhost.key" 38 | verify_hostname: true 39 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-sasl-scram-over-mtls/topology3.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9193" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "127.0.0.1:9191" 10 | address_for_clients: "127.0.0.1:9191" 11 | rack: "rack0" 12 | broker_id: 0 13 | - address_for_peers: "127.0.0.1:9192" 14 | address_for_clients: "127.0.0.1:9192" 15 | rack: "rack0" 16 | broker_id: 1 17 | - address_for_peers: "127.0.0.1:9193" 18 | address_for_clients: "127.0.0.1:9193" 19 | rack: "rack0" 20 | broker_id: 2 21 | local_shotover_broker_id: 2 22 | first_contact_points: ["172.16.1.2:9092"] 23 | authorize_scram_over_mtls: 24 | # every shotover node purposefully tests a different number of contact points 25 | mtls_port_contact_points: ["172.16.1.2:9094", "172.16.1.3:9094", "172.16.1.4:9094"] 26 | tls: 27 | certificate_authority_path: "tests/test-configs/kafka/tls/certs/localhost_CA.crt" 28 | certificate_path: "tests/test-configs/kafka/tls/certs/localhost.crt" 29 | private_key_path: "tests/test-configs/kafka/tls/certs/localhost.key" 30 | verify_hostname: true 31 | delegation_token_lifetime_seconds: 15 32 | connect_timeout_ms: 3000 33 | check_shotover_peers_delay_ms: 3000 34 | tls: 35 | certificate_authority_path: "tests/test-configs/kafka/tls/certs/localhost_CA.crt" 36 | certificate_path: "tests/test-configs/kafka/tls/certs/localhost.crt" 37 | private_key_path: "tests/test-configs/kafka/tls/certs/localhost.key" 38 | verify_hostname: true 39 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-sasl-scram/topology-single.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | chain: 7 | - KafkaSinkCluster: 8 | shotover_nodes: 9 | - address_for_peers: "127.0.0.1:9192" 10 | address_for_clients: "127.0.0.1:9192" 11 | rack: "rack0" 12 | broker_id: 0 13 | local_shotover_broker_id: 0 14 | first_contact_points: ["172.16.1.2:9092"] 15 | connect_timeout_ms: 3000 16 | check_shotover_peers_delay_ms: 3000 17 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/cluster-tls/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | tls: 7 | certificate_path: "tests/test-configs/kafka/tls/certs/localhost.crt" 8 | private_key_path: "tests/test-configs/kafka/tls/certs/localhost.key" 9 | chain: 10 | - KafkaSinkCluster: 11 | shotover_nodes: 12 | - address_for_peers: "127.0.0.1:9192" 13 | address_for_clients: "127.0.0.1:9192" 14 | rack: "rack0" 15 | broker_id: 0 16 | local_shotover_broker_id: 0 17 | first_contact_points: ["172.16.1.2:9092"] 18 | connect_timeout_ms: 3000 19 | check_shotover_peers_delay_ms: 3000 20 | tls: 21 | certificate_authority_path: "tests/test-configs/kafka/tls/certs/localhost_CA.crt" 22 | verify_hostname: true 23 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/passthrough-mtls/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | kafka0: 3 | image: 'bitnami/kafka:3.9.0-debian-12-r6' 4 | ports: 5 | - '9092:9092' 6 | environment: 7 | - KAFKA_CFG_LISTENERS=BROKER://:9092,CONTROLLER://:9093 8 | - KAFKA_CFG_ADVERTISED_LISTENERS=BROKER://127.0.0.1:9092 9 | - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:SSL,BROKER:SSL 10 | - KAFKA_CFG_INTER_BROKER_LISTENER_NAME=BROKER 11 | - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER 12 | - KAFKA_CFG_PROCESS_ROLES=controller,broker 13 | - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka0:9093 14 | - KAFKA_CFG_NODE_ID=0 15 | - KAFKA_CERTIFICATE_PASSWORD=password 16 | - KAFKA_TLS_CLIENT_AUTH=required 17 | - KAFKA_CFG_AUTHORIZER_CLASS_NAME=org.apache.kafka.metadata.authorizer.StandardAuthorizer 18 | - KAFKA_CFG_SUPER_USERS=User:O=ShotoverTestCertificate,CN=Generic-Cert 19 | volumes: 20 | - type: tmpfs 21 | target: /bitnami/kafka 22 | - type: bind 23 | source: "../tls/certs" 24 | target: "/opt/bitnami/kafka/config/certs" 25 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/passthrough-mtls/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | tls: 7 | certificate_path: "tests/test-configs/kafka/tls/certs/localhost.crt" 8 | private_key_path: "tests/test-configs/kafka/tls/certs/localhost.key" 9 | chain: 10 | - KafkaSinkSingle: 11 | destination_port: 9092 12 | connect_timeout_ms: 3000 13 | tls: 14 | certificate_authority_path: "tests/test-configs/kafka/tls/certs/localhost_CA.crt" 15 | certificate_path: "tests/test-configs/kafka/tls/certs/localhost.crt" 16 | private_key_path: "tests/test-configs/kafka/tls/certs/localhost.key" 17 | verify_hostname: true 18 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/passthrough-sasl-plain/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | kafka: 3 | image: 'bitnami/kafka:3.9.0-debian-12-r6' 4 | ports: 5 | - '9092:9092' 6 | - '9093:9093' 7 | environment: 8 | - KAFKA_CFG_NODE_ID=0 9 | - KAFKA_CFG_PROCESS_ROLES=controller,broker 10 | - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka:9093 11 | - KAFKA_CFG_LISTENERS=BROKER://:9092,CONTROLLER://:9093 12 | - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:SASL_PLAINTEXT,BROKER:SASL_PLAINTEXT 13 | - KAFKA_CFG_ADVERTISED_LISTENERS=BROKER://127.0.0.1:9092 14 | - KAFKA_CLIENT_USERS=user 15 | - KAFKA_CLIENT_PASSWORDS=password 16 | - KAFKA_INTER_BROKER_PASSWORD=password 17 | - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER 18 | - KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL=PLAIN 19 | - KAFKA_CFG_INTER_BROKER_LISTENER_NAME=BROKER 20 | - KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL=PLAIN 21 | volumes: 22 | - type: tmpfs 23 | target: /bitnami/kafka 24 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/passthrough-sasl-plain/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | chain: 7 | - DebugForceEncode: 8 | encode_requests: true 9 | encode_responses: true 10 | - KafkaSinkSingle: 11 | destination_port: 9092 12 | connect_timeout_ms: 3000 13 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/passthrough-sasl-scram/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | kafka: 3 | image: 'bitnami/kafka:3.9.0-debian-12-r6' 4 | ports: 5 | - '9092:9092' 6 | environment: 7 | - KAFKA_CFG_NODE_ID=0 8 | - KAFKA_CFG_PROCESS_ROLES=controller,broker 9 | - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka:9093 10 | - KAFKA_CFG_LISTENERS=BROKER://:9092,CONTROLLER://:9093 11 | - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:SASL_PLAINTEXT,BROKER:SASL_PLAINTEXT 12 | - KAFKA_CFG_ADVERTISED_LISTENERS=BROKER://127.0.0.1:9092 13 | - KAFKA_CLIENT_USERS=user 14 | - KAFKA_CLIENT_PASSWORDS=password 15 | - KAFKA_INTER_BROKER_PASSWORD=password 16 | - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER 17 | - KAFKA_CFG_INTER_BROKER_LISTENER_NAME=BROKER 18 | - KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL=PLAIN 19 | - KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL=SCRAM-SHA-256 20 | volumes: 21 | - type: tmpfs 22 | target: /bitnami/kafka 23 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/passthrough-sasl-scram/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | chain: 7 | - KafkaSinkSingle: 8 | destination_port: 9092 9 | connect_timeout_ms: 3000 10 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/passthrough-tls/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | kafka0: 3 | image: 'bitnami/kafka:3.9.0-debian-12-r6' 4 | ports: 5 | - '9092:9092' 6 | environment: 7 | - KAFKA_CFG_LISTENERS=BROKER://:9092,CONTROLLER://:9093 8 | - KAFKA_CFG_ADVERTISED_LISTENERS=BROKER://127.0.0.1:9092 9 | - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,BROKER:SSL 10 | - KAFKA_CFG_INTER_BROKER_LISTENER_NAME=BROKER 11 | - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER 12 | - KAFKA_CFG_PROCESS_ROLES=controller,broker 13 | - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka0:9093 14 | - KAFKA_CFG_NODE_ID=0 15 | - KAFKA_CERTIFICATE_PASSWORD=password 16 | - KAFKA_TLS_CLIENT_AUTH=none 17 | volumes: 18 | - type: tmpfs 19 | target: /bitnami/kafka 20 | - type: bind 21 | source: "../tls/certs" 22 | target: "/opt/bitnami/kafka/config/certs" 23 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/passthrough-tls/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | chain: 7 | - KafkaSinkSingle: 8 | destination_port: 9092 9 | connect_timeout_ms: 3000 10 | tls: 11 | certificate_authority_path: "tests/test-configs/kafka/tls/certs/localhost_CA.crt" 12 | verify_hostname: true 13 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/passthrough/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | kafka0: 3 | image: 'bitnami/kafka:3.9.0-debian-12-r6' 4 | ports: 5 | - '9092:9092' 6 | environment: 7 | - KAFKA_CFG_LISTENERS=BROKER://:9092,CONTROLLER://:9093 8 | - KAFKA_CFG_ADVERTISED_LISTENERS=BROKER://127.0.0.1:9092 9 | - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,BROKER:PLAINTEXT 10 | - KAFKA_CFG_INTER_BROKER_LISTENER_NAME=BROKER 11 | - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER 12 | - KAFKA_CFG_PROCESS_ROLES=controller,broker 13 | - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka0:9093 14 | - KAFKA_CFG_NODE_ID=0 15 | - ALLOW_PLAINTEXT_LISTENER=yes 16 | volumes: 17 | - type: tmpfs 18 | target: /bitnami/kafka 19 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/passthrough/topology-encode.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | chain: 7 | - DebugForceEncode: 8 | encode_requests: true 9 | encode_responses: true 10 | - KafkaSinkSingle: 11 | destination_port: 9092 12 | connect_timeout_ms: 3000 13 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/passthrough/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | chain: 7 | - KafkaSinkSingle: 8 | destination_port: 9092 9 | connect_timeout_ms: 3000 10 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/single-sasl-scram-plaintext-source-tls-sink/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | kafka: 3 | image: 'bitnami/kafka:3.9.0-debian-12-r6' 4 | ports: 5 | - '9092:9092' 6 | environment: 7 | - KAFKA_CFG_NODE_ID=0 8 | - KAFKA_CFG_PROCESS_ROLES=controller,broker 9 | - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka:9093 10 | - KAFKA_CFG_LISTENERS=BROKER://:9092,CONTROLLER://:9093 11 | - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:SASL_PLAINTEXT,BROKER:SASL_SSL 12 | - KAFKA_CFG_ADVERTISED_LISTENERS=BROKER://127.0.0.1:9092 13 | - KAFKA_CLIENT_USERS=user 14 | - KAFKA_CLIENT_PASSWORDS=password 15 | - KAFKA_INTER_BROKER_PASSWORD=password 16 | - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER 17 | - KAFKA_CFG_INTER_BROKER_LISTENER_NAME=BROKER 18 | - KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL=PLAIN 19 | - KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL=SCRAM-SHA-256 20 | - KAFKA_CERTIFICATE_PASSWORD=password 21 | - KAFKA_TLS_CLIENT_AUTH=none 22 | volumes: 23 | - type: tmpfs 24 | target: /bitnami/kafka 25 | - type: bind 26 | source: "../tls/certs" 27 | target: "/opt/bitnami/kafka/config/certs" 28 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/kafka/single-sasl-scram-plaintext-source-tls-sink/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Kafka: 4 | name: "kafka" 5 | listen_addr: "127.0.0.1:9192" 6 | chain: 7 | - KafkaSinkSingle: 8 | destination_port: 9092 9 | connect_timeout_ms: 3000 10 | tls: 11 | certificate_authority_path: "tests/test-configs/kafka/tls/certs/localhost_CA.crt" 12 | verify_hostname: true 13 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/log-to-file/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | valkey-one: 3 | image: bitnami/valkey:7.2.5-debian-12-r9 4 | ports: 5 | - "1111:6379" 6 | environment: 7 | ALLOW_EMPTY_PASSWORD: "yes" 8 | VALKEY_TLS_ENABLED: "no" 9 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/log-to-file/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | chain: 7 | - DebugLogToFile 8 | - ValkeySinkSingle: 9 | remote_address: "127.0.0.1:1111" 10 | connect_timeout_ms: 3000 11 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/null-cassandra/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Cassandra: 4 | name: "cassandra" 5 | listen_addr: "127.0.0.1:9042" 6 | chain: 7 | - NullSink 8 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/null-valkey/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | chain: 7 | - QueryCounter: 8 | name: valkey-chain 9 | - NullSink 10 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/opensearch-passthrough/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | opensearch-node1: 3 | image: opensearchproject/opensearch:2.9.0 4 | container_name: opensearch-node1 5 | environment: 6 | - cluster.name=opensearch-cluster 7 | - node.name=opensearch-node1 8 | - bootstrap.memory_lock=true 9 | - "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" 10 | - discovery.type=single-node 11 | - plugins.security.disabled=true 12 | ulimits: 13 | memlock: 14 | soft: -1 15 | hard: -1 16 | nofile: 17 | soft: 65536 18 | hard: 65536 19 | volumes: 20 | - type: volume 21 | target: /usr/share/opensearch/data 22 | ports: 23 | - 9200:9200 24 | - 9600:9600 25 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/opensearch-passthrough/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - OpenSearch: 4 | name: "OpenSearch" 5 | listen_addr: "127.0.0.1:9201" 6 | chain: 7 | - OpenSearchSinkSingle: 8 | remote_address: "127.0.0.1:9200" 9 | connect_timeout_ms: 3000 10 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/query_type_filter/simple.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey1" 5 | listen_addr: "127.0.0.1:6379" 6 | connection_limit: 3000000 7 | chain: 8 | - QueryTypeFilter: 9 | DenyList: [Write] 10 | - DebugReturner: 11 | Valkey: "42" 12 | - Valkey: 13 | name: "valkey2" 14 | listen_addr: "127.0.0.1:6380" 15 | connection_limit: 3000000 16 | chain: 17 | - QueryTypeFilter: 18 | AllowList: [Read] 19 | - DebugReturner: 20 | Valkey: "42" 21 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/shotover-config/config1.yaml: -------------------------------------------------------------------------------- 1 | main_log_level: "info, shotover::connection_span=debug" 2 | observability_interface: "0.0.0.0:9001" 3 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/shotover-config/config2.yaml: -------------------------------------------------------------------------------- 1 | main_log_level: "info, shotover::connection_span=debug" 2 | observability_interface: "0.0.0.0:9002" 3 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/shotover-config/config3.yaml: -------------------------------------------------------------------------------- 1 | main_log_level: "info, shotover::connection_span=debug" 2 | observability_interface: "0.0.0.0:9003" 3 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/shotover-config/config_metrics_disabled.yaml: -------------------------------------------------------------------------------- 1 | main_log_level: "info, shotover::connection_span=debug" 2 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/tee/fail.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | connection_limit: 3000000 7 | chain: 8 | - Tee: 9 | behavior: FailOnMismatch 10 | buffer_size: 10000 11 | chain: 12 | - QueryTypeFilter: 13 | DenyList: [Read] 14 | - DebugReturner: 15 | Valkey: "42" 16 | - DebugReturner: 17 | Valkey: "42" 18 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/tee/fail_with_mismatch.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | connection_limit: 3000000 7 | chain: 8 | - Tee: 9 | behavior: FailOnMismatch 10 | buffer_size: 10000 11 | chain: 12 | - QueryTypeFilter: 13 | DenyList: [Read] 14 | - DebugReturner: 15 | Valkey: "41" 16 | - DebugReturner: 17 | Valkey: "42" 18 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/tee/ignore.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | connection_limit: 3000000 7 | chain: 8 | - Tee: 9 | behavior: Ignore 10 | buffer_size: 10000 11 | chain: 12 | - QueryTypeFilter: 13 | DenyList: [Read] 14 | - DebugReturner: 15 | Valkey: "42" 16 | - DebugReturner: 17 | Valkey: "42" 18 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/tee/ignore_with_mismatch.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | connection_limit: 3000000 7 | chain: 8 | - Tee: 9 | behavior: Ignore 10 | buffer_size: 10000 11 | chain: 12 | - QueryTypeFilter: 13 | DenyList: [Read] 14 | - DebugReturner: 15 | Valkey: "41" 16 | - DebugReturner: 17 | Valkey: "42" 18 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/tee/log.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | connection_limit: 3000000 7 | chain: 8 | - Tee: 9 | behavior: LogWarningOnMismatch 10 | buffer_size: 10000 11 | chain: 12 | - QueryTypeFilter: 13 | DenyList: [Read] 14 | - DebugReturner: 15 | Valkey: "42" 16 | - DebugReturner: 17 | Valkey: "42" 18 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/tee/log_with_mismatch.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | connection_limit: 3000000 7 | chain: 8 | - Tee: 9 | behavior: LogWarningOnMismatch 10 | buffer_size: 10000 11 | chain: 12 | - QueryTypeFilter: 13 | DenyList: [Read] 14 | - DebugReturner: 15 | Valkey: "41" 16 | - DebugReturner: 17 | Valkey: "42" 18 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/tee/subchain.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | connection_limit: 3000000 7 | chain: 8 | - Tee: 9 | behavior: 10 | SubchainOnMismatch: 11 | - QueryTypeFilter: 12 | DenyList: [Read] 13 | - ValkeySinkSingle: 14 | remote_address: "127.0.0.1:1111" 15 | connect_timeout_ms: 3000 16 | buffer_size: 10000 17 | chain: 18 | - DebugReturner: 19 | Valkey: "42" 20 | - DebugReturner: 21 | Valkey: "42" 22 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/tee/subchain_with_mismatch.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | connection_limit: 3000000 7 | chain: 8 | - Tee: 9 | buffer_size: 10000 10 | behavior: 11 | SubchainOnMismatch: 12 | - QueryTypeFilter: 13 | DenyList: [Read] 14 | - ValkeySinkSingle: 15 | remote_address: "127.0.0.1:1111" 16 | connect_timeout_ms: 3000 17 | chain: 18 | - QueryTypeFilter: 19 | DenyList: [Read] 20 | - DebugReturner: 21 | Valkey: "41" 22 | - DebugReturner: 23 | Valkey: "42" 24 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/tee/switch_chain.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey-1" 5 | listen_addr: "127.0.0.1:6371" 6 | connection_limit: 7 | chain: 8 | - Tee: 9 | behavior: Ignore 10 | buffer_size: 10000 11 | switch_port: 1231 12 | chain: 13 | - DebugReturner: 14 | Valkey: "b" 15 | - DebugReturner: 16 | Valkey: "a" 17 | - Valkey: 18 | name: "valkey-2" 19 | listen_addr: "127.0.0.1:6372" 20 | connection_limit: 21 | chain: 22 | - Tee: 23 | behavior: 24 | SubchainOnMismatch: 25 | - NullSink 26 | buffer_size: 10000 27 | switch_port: 1232 28 | chain: 29 | - DebugReturner: 30 | Valkey: "b" 31 | - DebugReturner: 32 | Valkey: "a" 33 | - Valkey: 34 | name: "valkey-3" 35 | listen_addr: "127.0.0.1:6373" 36 | connection_limit: 37 | chain: 38 | - Tee: 39 | behavior: LogWarningOnMismatch 40 | buffer_size: 10000 41 | switch_port: 1233 42 | chain: 43 | - DebugReturner: 44 | Valkey: "b" 45 | - DebugReturner: 46 | Valkey: "a" 47 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/cluster-auth/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | valkey-node-0: 3 | image: &image bitnami/valkey-cluster:7.2.5-debian-12-r4 4 | ports: 5 | - "2230:6379" 6 | environment: &environment 7 | - 'VALKEY_PASSWORD=shotover' 8 | - 'VALKEY_NODES=valkey-node-0 valkey-node-1 valkey-node-2 valkey-node-3 valkey-node-4 valkey-node-5' 9 | 10 | valkey-node-1: 11 | image: *image 12 | ports: 13 | - "2231:6379" 14 | environment: *environment 15 | 16 | valkey-node-2: 17 | image: *image 18 | ports: 19 | - "2232:6379" 20 | environment: *environment 21 | 22 | valkey-node-3: 23 | image: *image 24 | ports: 25 | - "2233:6379" 26 | environment: *environment 27 | 28 | valkey-node-4: 29 | image: *image 30 | ports: 31 | - "2234:6379" 32 | environment: *environment 33 | 34 | valkey-node-5: 35 | image: *image 36 | ports: 37 | - "2235:6379" 38 | environment: *environment 39 | 40 | valkey-cluster-init: 41 | image: *image 42 | depends_on: 43 | - valkey-node-0 44 | - valkey-node-1 45 | - valkey-node-2 46 | - valkey-node-3 47 | - valkey-node-4 48 | - valkey-node-5 49 | environment: 50 | - 'VALKEY_PASSWORD=shotover' 51 | - 'REDISCLI_AUTH=shotover' 52 | - 'VALKEY_CLUSTER_REPLICAS=1' 53 | - 'VALKEY_NODES=valkey-node-0 valkey-node-1 valkey-node-2 valkey-node-3 valkey-node-4 valkey-node-5' 54 | - 'VALKEY_CLUSTER_CREATOR=yes' 55 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/cluster-auth/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | chain: 7 | # This transform is only here to ensure that the transform correctly handles the case where 8 | # valkey returns an error due to being unauthenticated 9 | - ValkeyClusterPortsRewrite: 10 | new_port: 6379 11 | - ValkeySinkCluster: 12 | first_contact_points: 13 | [ 14 | "127.0.0.1:2230", 15 | "127.0.0.1:2231", 16 | "127.0.0.1:2232", 17 | "127.0.0.1:2233", 18 | "127.0.0.1:2234", 19 | "127.0.0.1:2235", 20 | ] 21 | connect_timeout_ms: 3000 22 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/cluster-dr/topology.yaml: -------------------------------------------------------------------------------- 1 | # This example will replicate all commands to the DR datacenter on a best effort basis 2 | --- 3 | sources: 4 | - Valkey: 5 | name: "valkey" 6 | listen_addr: "127.0.0.1:6379" 7 | connection_limit: 3000000 8 | chain: 9 | - Tee: 10 | behavior: Ignore 11 | buffer_size: 10000 12 | chain: 13 | - QueryTypeFilter: 14 | DenyList: [Read] 15 | - Coalesce: 16 | flush_when_buffered_message_count: 2000 17 | # Use an unreasonably large timeout here so that integration tests dont break on slow hardware or a performance regression 18 | flush_when_millis_since_last_flush: 1000000000 19 | - QueryCounter: 20 | name: "DR chain" 21 | - ValkeySinkCluster: 22 | first_contact_points: [ "127.0.0.1:2120", "127.0.0.1:2121", "127.0.0.1:2122", "127.0.0.1:2123", "127.0.0.1:2124", "127.0.0.1:2125" ] 23 | connect_timeout_ms: 3000 24 | - QueryCounter: 25 | name: "Main chain" 26 | - ValkeySinkCluster: 27 | first_contact_points: [ "127.0.0.1:2220", "127.0.0.1:2221", "127.0.0.1:2222", "127.0.0.1:2223", "127.0.0.1:2224", "127.0.0.1:2225" ] 28 | connect_timeout_ms: 3000 29 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/cluster-handling/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | networks: 2 | cluster_subnet: 3 | name: cluster_subnet 4 | driver: bridge 5 | ipam: 6 | driver: default 7 | config: 8 | - subnet: 172.16.1.0/24 9 | gateway: 172.16.1.1 10 | services: 11 | valkey-node-0: 12 | networks: 13 | cluster_subnet: 14 | ipv4_address: 172.16.1.2 15 | image: &image bitnami/valkey-cluster:7.2.5-debian-12-r4 16 | environment: &environment 17 | - 'ALLOW_EMPTY_PASSWORD=yes' 18 | - 'VALKEY_NODES=valkey-node-0 valkey-node-1 valkey-node-2 valkey-node-3 valkey-node-4 valkey-node-5' 19 | 20 | valkey-node-1: 21 | networks: 22 | cluster_subnet: 23 | ipv4_address: 172.16.1.3 24 | image: *image 25 | environment: *environment 26 | 27 | valkey-node-2: 28 | networks: 29 | cluster_subnet: 30 | ipv4_address: 172.16.1.4 31 | image: *image 32 | environment: *environment 33 | 34 | valkey-node-3: 35 | networks: 36 | cluster_subnet: 37 | ipv4_address: 172.16.1.5 38 | image: *image 39 | environment: *environment 40 | 41 | valkey-node-4: 42 | networks: 43 | cluster_subnet: 44 | ipv4_address: 172.16.1.6 45 | image: *image 46 | environment: *environment 47 | 48 | valkey-node-5: 49 | networks: 50 | cluster_subnet: 51 | ipv4_address: 172.16.1.7 52 | image: *image 53 | environment: *environment 54 | 55 | valkey-cluster-init: 56 | networks: 57 | cluster_subnet: 58 | ipv4_address: 172.16.1.8 59 | image: *image 60 | depends_on: 61 | - valkey-node-0 62 | - valkey-node-1 63 | - valkey-node-2 64 | - valkey-node-3 65 | - valkey-node-4 66 | - valkey-node-5 67 | environment: 68 | - 'ALLOW_EMPTY_PASSWORD=yes' 69 | - 'VALKEY_CLUSTER_REPLICAS=1' 70 | - 'VALKEY_NODES=valkey-node-0 valkey-node-1 valkey-node-2 valkey-node-3 valkey-node-4 valkey-node-5' 71 | - 'VALKEY_CLUSTER_CREATOR=yes' 72 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/cluster-handling/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | chain: 7 | - ValkeySinkCluster: 8 | first_contact_points: 9 | - "172.16.1.2:6379" 10 | - "172.16.1.3:6379" 11 | - "172.16.1.4:6379" 12 | - "172.16.1.5:6379" 13 | - "172.16.1.6:6379" 14 | - "172.16.1.7:6379" 15 | direct_destination: "172.16.1.2:6379" 16 | connect_timeout_ms: 3000 17 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/cluster-hiding/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | networks: 2 | cluster_subnet: 3 | name: cluster_subnet 4 | driver: bridge 5 | ipam: 6 | driver: default 7 | config: 8 | - subnet: 172.16.1.0/24 9 | gateway: 172.16.1.1 10 | services: 11 | valkey-node-0: 12 | networks: 13 | cluster_subnet: 14 | ipv4_address: 172.16.1.2 15 | image: &image bitnami/valkey-cluster:7.2.5-debian-12-r4 16 | environment: &environment 17 | - 'ALLOW_EMPTY_PASSWORD=yes' 18 | - 'VALKEY_NODES=valkey-node-0 valkey-node-1 valkey-node-2 valkey-node-3 valkey-node-4 valkey-node-5' 19 | 20 | valkey-node-1: 21 | networks: 22 | cluster_subnet: 23 | ipv4_address: 172.16.1.3 24 | image: *image 25 | environment: *environment 26 | 27 | valkey-node-2: 28 | networks: 29 | cluster_subnet: 30 | ipv4_address: 172.16.1.4 31 | image: *image 32 | environment: *environment 33 | 34 | valkey-node-3: 35 | networks: 36 | cluster_subnet: 37 | ipv4_address: 172.16.1.5 38 | image: *image 39 | environment: *environment 40 | 41 | valkey-node-4: 42 | networks: 43 | cluster_subnet: 44 | ipv4_address: 172.16.1.6 45 | image: *image 46 | environment: *environment 47 | 48 | valkey-node-5: 49 | networks: 50 | cluster_subnet: 51 | ipv4_address: 172.16.1.7 52 | image: *image 53 | environment: *environment 54 | 55 | valkey-cluster-init: 56 | networks: 57 | cluster_subnet: 58 | ipv4_address: 172.16.1.8 59 | image: *image 60 | depends_on: 61 | - valkey-node-0 62 | - valkey-node-1 63 | - valkey-node-2 64 | - valkey-node-3 65 | - valkey-node-4 66 | - valkey-node-5 67 | environment: 68 | - 'ALLOW_EMPTY_PASSWORD=yes' 69 | - 'VALKEY_CLUSTER_REPLICAS=1' 70 | - 'VALKEY_NODES=valkey-node-0 valkey-node-1 valkey-node-2 valkey-node-3 valkey-node-4 valkey-node-5' 71 | - 'VALKEY_CLUSTER_CREATOR=yes' 72 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/cluster-hiding/topology-encode.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | chain: 7 | - DebugForceEncode: 8 | encode_requests: true 9 | encode_responses: true 10 | - ValkeySinkCluster: 11 | first_contact_points: 12 | - "172.16.1.2:6379" 13 | - "172.16.1.3:6379" 14 | - "172.16.1.4:6379" 15 | - "172.16.1.5:6379" 16 | - "172.16.1.6:6379" 17 | - "172.16.1.7:6379" 18 | connect_timeout_ms: 3000 -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/cluster-hiding/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | chain: 7 | - ValkeySinkCluster: 8 | first_contact_points: 9 | - "172.16.1.2:6379" 10 | - "172.16.1.3:6379" 11 | - "172.16.1.4:6379" 12 | - "172.16.1.5:6379" 13 | - "172.16.1.6:6379" 14 | - "172.16.1.7:6379" 15 | connect_timeout_ms: 3000 -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/cluster-ports-rewrite/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | valkey-node-0: 3 | image: &image bitnami/valkey-cluster:7.2.5-debian-12-r4 4 | ports: 5 | - "2220:6379" 6 | environment: &environment 7 | - 'ALLOW_EMPTY_PASSWORD=yes' 8 | - 'VALKEY_NODES=valkey-node-0 valkey-node-1 valkey-node-2 valkey-node-3 valkey-node-4 valkey-node-5' 9 | 10 | valkey-node-1: 11 | image: *image 12 | ports: 13 | - "2221:6379" 14 | environment: *environment 15 | 16 | valkey-node-2: 17 | image: *image 18 | ports: 19 | - "2222:6379" 20 | environment: *environment 21 | 22 | valkey-node-3: 23 | image: *image 24 | ports: 25 | - "2223:6379" 26 | environment: *environment 27 | 28 | valkey-node-4: 29 | image: *image 30 | ports: 31 | - "2224:6379" 32 | environment: *environment 33 | 34 | valkey-node-5: 35 | image: *image 36 | ports: 37 | - "2225:6379" 38 | environment: *environment 39 | 40 | valkey-cluster-init: 41 | image: *image 42 | depends_on: 43 | - valkey-node-0 44 | - valkey-node-1 45 | - valkey-node-2 46 | - valkey-node-3 47 | - valkey-node-4 48 | - valkey-node-5 49 | environment: 50 | - 'ALLOW_EMPTY_PASSWORD=yes' 51 | - 'VALKEY_CLUSTER_REPLICAS=1' 52 | - 'VALKEY_NODES=valkey-node-0 valkey-node-1 valkey-node-2 valkey-node-3 valkey-node-4 valkey-node-5' 53 | - 'VALKEY_CLUSTER_CREATOR=yes' 54 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/cluster-ports-rewrite/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6380" 6 | chain: 7 | - ValkeyClusterPortsRewrite: 8 | new_port: 6380 9 | - ValkeySinkCluster: 10 | first_contact_points: ["127.0.0.1:2220", "127.0.0.1:2221", "127.0.0.1:2222", "127.0.0.1:2223", "127.0.0.1:2224", "127.0.0.1:2225"] 11 | connect_timeout_ms: 3000 12 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/cluster-tls/topology-encode.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | tls: 7 | certificate_path: "tests/test-configs/valkey/tls/certs/localhost.crt" 8 | private_key_path: "tests/test-configs/valkey/tls/certs/localhost.key" 9 | chain: 10 | - DebugForceEncode: 11 | encode_requests: true 12 | encode_responses: true 13 | - ValkeySinkCluster: 14 | first_contact_points: 15 | - "172.16.1.2:6379" 16 | - "172.16.1.3:6379" 17 | - "172.16.1.4:6379" 18 | - "172.16.1.5:6379" 19 | - "172.16.1.6:6379" 20 | - "172.16.1.7:6379" 21 | connect_timeout_ms: 3000 22 | tls: 23 | certificate_authority_path: "tests/test-configs/valkey/tls/certs/localhost_CA.crt" 24 | certificate_path: "tests/test-configs/valkey/tls/certs/localhost.crt" 25 | private_key_path: "tests/test-configs/valkey/tls/certs/localhost.key" 26 | verify_hostname: true 27 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/cluster-tls/topology-no-source-encryption.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | chain: 7 | - ValkeySinkCluster: 8 | first_contact_points: 9 | - "172.16.1.2:6379" 10 | - "172.16.1.3:6379" 11 | - "172.16.1.4:6379" 12 | - "172.16.1.5:6379" 13 | - "172.16.1.6:6379" 14 | - "172.16.1.7:6379" 15 | connect_timeout_ms: 3000 16 | tls: 17 | certificate_authority_path: "tests/test-configs/valkey/tls/certs/localhost_CA.crt" 18 | certificate_path: "tests/test-configs/valkey/tls/certs/localhost.crt" 19 | private_key_path: "tests/test-configs/valkey/tls/certs/localhost.key" 20 | verify_hostname: true 21 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/cluster-tls/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | tls: 7 | certificate_path: "tests/test-configs/valkey/tls/certs/localhost.crt" 8 | private_key_path: "tests/test-configs/valkey/tls/certs/localhost.key" 9 | chain: 10 | - ValkeySinkCluster: 11 | first_contact_points: 12 | - "172.16.1.2:6379" 13 | - "172.16.1.3:6379" 14 | - "172.16.1.4:6379" 15 | - "172.16.1.5:6379" 16 | - "172.16.1.6:6379" 17 | - "172.16.1.7:6379" 18 | connect_timeout_ms: 3000 19 | tls: 20 | certificate_authority_path: "tests/test-configs/valkey/tls/certs/localhost_CA.crt" 21 | certificate_path: "tests/test-configs/valkey/tls/certs/localhost.crt" 22 | private_key_path: "tests/test-configs/valkey/tls/certs/localhost.key" 23 | verify_hostname: true 24 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/passthrough/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | redis-one: 3 | image: library/redis:5.0.9 4 | ports: 5 | - "1111:6379" 6 | volumes: 7 | - ./redis.conf:/usr/local/etc/redis/redis.conf 8 | command: [ "redis-server", "/usr/local/etc/redis/redis.conf" ] 9 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/passthrough/topology-encode.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | chain: 7 | - DebugForceEncode: 8 | encode_requests: true 9 | encode_responses: true 10 | - ValkeySinkSingle: 11 | remote_address: "127.0.0.1:1111" 12 | connect_timeout_ms: 3000 13 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/passthrough/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | chain: 7 | - ValkeySinkSingle: 8 | remote_address: "127.0.0.1:1111" 9 | connect_timeout_ms: 3000 10 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/tls-no-client-auth/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | valkey-one: 3 | image: bitnami/valkey:7.2.5-debian-12-r9 4 | ports: 5 | - "1111:6379" 6 | volumes: 7 | - ../tls/certs:/certs 8 | environment: 9 | ALLOW_EMPTY_PASSWORD: "yes" 10 | VALKEY_TLS_ENABLED: "yes" 11 | VALKEY_TLS_CERT_FILE: "/certs/localhost.crt" 12 | VALKEY_TLS_KEY_FILE: "/certs/localhost.key" 13 | VALKEY_TLS_CA_FILE: "/certs/localhost_CA.crt" 14 | VALKEY_TLS_AUTH_CLIENTS: "no" 15 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/tls-no-client-auth/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | tls: 7 | certificate_path: "tests/test-configs/valkey/tls/certs/localhost.crt" 8 | private_key_path: "tests/test-configs/valkey/tls/certs/localhost.key" 9 | chain: 10 | - ValkeySinkSingle: 11 | remote_address: "localhost:1111" 12 | connect_timeout_ms: 3000 13 | tls: 14 | certificate_authority_path: "tests/test-configs/valkey/tls/certs/localhost_CA.crt" 15 | verify_hostname: true 16 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/tls-no-verify-hostname/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | valkey-one: 3 | image: bitnami/valkey:7.2.5-debian-12-r9 4 | ports: 5 | - "1111:6379" 6 | volumes: 7 | - ../tls/certs:/certs 8 | environment: 9 | ALLOW_EMPTY_PASSWORD: "yes" 10 | VALKEY_TLS_ENABLED: "yes" 11 | VALKEY_TLS_CERT_FILE: "/certs/localhost.crt" 12 | VALKEY_TLS_KEY_FILE: "/certs/localhost.key" 13 | VALKEY_TLS_CA_FILE: "/certs/localhost_CA.crt" 14 | VALKEY_TLS_AUTH_CLIENTS: "yes" 15 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/tls-no-verify-hostname/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | tls: 7 | certificate_path: "tests/test-configs/valkey/tls/certs/localhost.crt" 8 | private_key_path: "tests/test-configs/valkey/tls/certs/localhost.key" 9 | chain: 10 | - ValkeySinkSingle: 11 | remote_address: "localhost:1111" 12 | connect_timeout_ms: 3000 13 | tls: 14 | certificate_authority_path: "tests/test-configs/valkey/tls/certs/localhost_CA.crt" 15 | certificate_path: "tests/test-configs/valkey/tls/certs/localhost.crt" 16 | private_key_path: "tests/test-configs/valkey/tls/certs/localhost.key" 17 | verify_hostname: false 18 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/tls/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | valkey-one: 3 | image: bitnami/valkey:7.2.5-debian-12-r9 4 | ports: 5 | - "1111:6379" 6 | volumes: 7 | - ./certs:/certs 8 | environment: 9 | ALLOW_EMPTY_PASSWORD: "yes" 10 | VALKEY_TLS_ENABLED: "yes" 11 | VALKEY_TLS_CERT_FILE: "/certs/localhost.crt" 12 | VALKEY_TLS_KEY_FILE: "/certs/localhost.key" 13 | VALKEY_TLS_CA_FILE: "/certs/localhost_CA.crt" 14 | VALKEY_TLS_AUTH_CLIENTS: "yes" 15 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/tls/redis-cli.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | redis-cli --tls --cert certs/localhost.crt --key certs/redis.key --cacert certs/localhost_CA.crt "$@" 4 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/tls/topology-encode.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | tls: 7 | certificate_path: "tests/test-configs/valkey/tls/certs/localhost.crt" 8 | private_key_path: "tests/test-configs/valkey/tls/certs/localhost.key" 9 | chain: 10 | - DebugForceEncode: 11 | encode_requests: true 12 | encode_responses: true 13 | - ValkeySinkSingle: 14 | remote_address: "localhost:1111" 15 | connect_timeout_ms: 3000 16 | tls: 17 | certificate_authority_path: "tests/test-configs/valkey/tls/certs/localhost_CA.crt" 18 | certificate_path: "tests/test-configs/valkey/tls/certs/localhost.crt" 19 | private_key_path: "tests/test-configs/valkey/tls/certs/localhost.key" 20 | verify_hostname: true 21 | -------------------------------------------------------------------------------- /shotover-proxy/tests/test-configs/valkey/tls/topology.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | - Valkey: 4 | name: "valkey" 5 | listen_addr: "127.0.0.1:6379" 6 | tls: 7 | certificate_path: "tests/test-configs/valkey/tls/certs/localhost.crt" 8 | private_key_path: "tests/test-configs/valkey/tls/certs/localhost.key" 9 | chain: 10 | - ValkeySinkSingle: 11 | remote_address: "localhost:1111" 12 | connect_timeout_ms: 3000 13 | tls: 14 | certificate_authority_path: "tests/test-configs/valkey/tls/certs/localhost_CA.crt" 15 | certificate_path: "tests/test-configs/valkey/tls/certs/localhost.crt" 16 | private_key_path: "tests/test-configs/valkey/tls/certs/localhost.key" 17 | verify_hostname: true 18 | -------------------------------------------------------------------------------- /shotover-proxy/tests/transforms/docker-compose-moto.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | moto: 3 | image: "motoserver/moto" 4 | ports: 5 | - "5000:5000" 6 | -------------------------------------------------------------------------------- /shotover-proxy/tests/transforms/log_to_file.rs: -------------------------------------------------------------------------------- 1 | use crate::shotover_process; 2 | use crate::valkey_int_tests::assert::assert_ok; 3 | use pretty_assertions::assert_eq; 4 | use test_helpers::connection::valkey_connection::ValkeyConnectionCreator; 5 | use test_helpers::docker_compose::docker_compose; 6 | 7 | #[cfg(feature = "alpha-transforms")] 8 | #[tokio::test(flavor = "multi_thread")] 9 | async fn log_to_file() { 10 | let _compose = docker_compose("tests/test-configs/log-to-file/docker-compose.yaml"); 11 | let shotover = shotover_process("tests/test-configs/log-to-file/topology.yaml") 12 | .start() 13 | .await; 14 | 15 | // CLIENT SETINFO requests sent by driver during connection handshake 16 | let mut connection = ValkeyConnectionCreator { 17 | address: "127.0.0.1".into(), 18 | port: 6379, 19 | tls: false, 20 | } 21 | .new_async() 22 | .await; 23 | let request = std::fs::read("message-log/1/requests/message1.bin").unwrap(); 24 | assert_eq_string( 25 | &request, 26 | "*4\r\n$6\r\nCLIENT\r\n$7\r\nSETINFO\r\n$8\r\nLIB-NAME\r\n$8\r\nredis-rs\r\n", 27 | ); 28 | let response = std::fs::read("message-log/1/responses/message1.bin").unwrap(); 29 | assert_eq_string(&response, "+OK\r\n"); 30 | let request = std::fs::read("message-log/1/requests/message2.bin").unwrap(); 31 | assert_eq_string( 32 | &request, 33 | "*4\r\n$6\r\nCLIENT\r\n$7\r\nSETINFO\r\n$7\r\nLIB-VER\r\n$6\r\n0.29.5\r\n", 34 | ); 35 | let response = std::fs::read("message-log/1/responses/message2.bin").unwrap(); 36 | assert_eq_string(&response, "+OK\r\n"); 37 | 38 | // SET sent by command 39 | assert_ok(redis::cmd("SET").arg("foo").arg(42), &mut connection).await; 40 | let request = std::fs::read("message-log/1/requests/message3.bin").unwrap(); 41 | assert_eq_string(&request, "*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$2\r\n42\r\n"); 42 | let response = std::fs::read("message-log/1/responses/message3.bin").unwrap(); 43 | assert_eq_string(&response, "+OK\r\n"); 44 | 45 | shotover.shutdown_and_then_consume_events(&[]).await; 46 | 47 | std::fs::remove_dir_all("message-log").unwrap(); 48 | } 49 | 50 | /// Gives useful error message when both expected and actual data are valid utf8 strings 51 | fn assert_eq_string(actual_bytes: &[u8], expected_str: &str) { 52 | match std::str::from_utf8(actual_bytes) { 53 | Ok(actual) => assert_eq!(actual, expected_str), 54 | Err(_) => assert_eq!(actual_bytes, expected_str.as_bytes()), 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /shotover-proxy/tests/transforms/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "alpha-transforms")] 2 | pub mod log_to_file; 3 | pub mod query_type_filter; 4 | pub mod tee; 5 | -------------------------------------------------------------------------------- /shotover-proxy/tests/valkey_int_tests/assert.rs: -------------------------------------------------------------------------------- 1 | use pretty_assertions::assert_eq; 2 | use redis::Cmd; 3 | use redis::aio::MultiplexedConnection; 4 | 5 | pub async fn assert_nil(cmd: &mut Cmd, connection: &mut MultiplexedConnection) { 6 | assert_eq!( 7 | cmd.query_async::>(connection).await, 8 | Ok(None) 9 | ); 10 | } 11 | 12 | pub async fn assert_ok(cmd: &mut Cmd, connection: &mut MultiplexedConnection) { 13 | assert_eq!(cmd.query_async(connection).await, Ok("OK".to_string())); 14 | } 15 | 16 | pub async fn assert_int(cmd: &mut Cmd, connection: &mut MultiplexedConnection, value: i64) { 17 | assert_eq!(cmd.query_async(connection).await, Ok(value)); 18 | } 19 | 20 | pub async fn assert_bytes(cmd: &mut Cmd, connection: &mut MultiplexedConnection, value: &[u8]) { 21 | assert_eq!(cmd.query_async(connection).await, Ok(value.to_vec())); 22 | } 23 | -------------------------------------------------------------------------------- /shotover/benches/benches/codec/kafka_requests/fetch.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shotover/shotover-proxy/e6587b7bffc190ba1e034727a73bc674bf2e9ca0/shotover/benches/benches/codec/kafka_requests/fetch.bin -------------------------------------------------------------------------------- /shotover/benches/benches/codec/kafka_requests/list_offsets.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shotover/shotover-proxy/e6587b7bffc190ba1e034727a73bc674bf2e9ca0/shotover/benches/benches/codec/kafka_requests/list_offsets.bin -------------------------------------------------------------------------------- /shotover/benches/benches/codec/kafka_requests/metadata.bin: -------------------------------------------------------------------------------- 1 |  rdkafka -------------------------------------------------------------------------------- /shotover/benches/benches/codec/kafka_requests/produce.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shotover/shotover-proxy/e6587b7bffc190ba1e034727a73bc674bf2e9ca0/shotover/benches/benches/codec/kafka_requests/produce.bin -------------------------------------------------------------------------------- /shotover/benches/benches/codec/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cassandra; 2 | pub mod kafka; 3 | -------------------------------------------------------------------------------- /shotover/benches/benches/main.rs: -------------------------------------------------------------------------------- 1 | use criterion::criterion_main; 2 | use metrics_exporter_prometheus::PrometheusBuilder; 3 | 4 | mod chain; 5 | mod codec; 6 | 7 | fn init() { 8 | std::env::set_var("RUST_BACKTRACE", "1"); 9 | std::env::set_var("RUST_LIB_BACKTRACE", "0"); 10 | 11 | let recorder = PrometheusBuilder::new().build_recorder(); 12 | metrics::set_global_recorder(recorder).ok(); 13 | } 14 | 15 | criterion_main!( 16 | chain::benches, 17 | codec::kafka::benches, 18 | codec::cassandra::benches 19 | ); 20 | -------------------------------------------------------------------------------- /shotover/src/config/mod.rs: -------------------------------------------------------------------------------- 1 | //! Config types, used for serializing/deserializing shotover configuration files 2 | 3 | use anyhow::{Context, Result}; 4 | use serde::Deserialize; 5 | 6 | pub mod chain; 7 | pub mod topology; 8 | 9 | #[derive(Deserialize, Debug, Clone)] 10 | #[serde(deny_unknown_fields)] 11 | pub struct Config { 12 | pub main_log_level: String, 13 | pub observability_interface: Option, 14 | } 15 | 16 | impl Config { 17 | pub fn from_file(filepath: String) -> Result { 18 | let file = std::fs::File::open(&filepath) 19 | .with_context(|| format!("Couldn't open the config file {}", &filepath))?; 20 | serde_yaml::from_reader(file) 21 | .with_context(|| format!("Failed to parse config file {}", &filepath)) 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /shotover/src/connection_span.rs: -------------------------------------------------------------------------------- 1 | //! This module purposefully only contains this one function to create a span. 2 | //! This allows us to enable/disable just this one span via the tracing filter: `shotover::connection_span=debug` 3 | //! 4 | //! Do not add more code here! 5 | 6 | use tracing::Span; 7 | 8 | pub fn span(connection_count: u64, source: &str) -> Span { 9 | tracing::debug_span!("connection", id = connection_count, source = source) 10 | } 11 | -------------------------------------------------------------------------------- /shotover/src/frame/opensearch.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use bytes::Bytes; 3 | use http::{HeaderMap, Method, StatusCode, Uri, Version}; 4 | 5 | #[derive(Debug, Clone, PartialEq)] 6 | pub struct ResponseParts { 7 | pub status: StatusCode, 8 | pub version: Version, 9 | pub headers: HeaderMap, 10 | } 11 | 12 | #[derive(Debug, Clone, PartialEq)] 13 | pub struct RequestParts { 14 | pub method: Method, 15 | pub uri: Uri, 16 | pub version: Version, 17 | pub headers: HeaderMap, 18 | } 19 | 20 | #[derive(Debug, Clone, PartialEq)] 21 | pub enum HttpHead { 22 | Response(ResponseParts), 23 | Request(RequestParts), 24 | } 25 | 26 | #[derive(Debug, Clone, PartialEq)] 27 | pub struct OpenSearchFrame { 28 | pub headers: HttpHead, 29 | pub body: Bytes, 30 | } 31 | 32 | impl OpenSearchFrame { 33 | pub fn new(headers: HttpHead, body: Bytes) -> Self { 34 | Self { headers, body } 35 | } 36 | 37 | pub fn from_bytes(_bytes: &Bytes) -> Result { 38 | todo!(); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /shotover/src/frame/valkey.rs: -------------------------------------------------------------------------------- 1 | use crate::frame::ValkeyFrame; 2 | use crate::message::QueryType; 3 | 4 | #[inline] 5 | pub fn valkey_query_type(frame: &ValkeyFrame) -> QueryType { 6 | if let ValkeyFrame::Array(frames) = frame { 7 | if let Some(ValkeyFrame::BulkString(bytes)) = frames.first() { 8 | return match bytes.to_ascii_uppercase().as_slice() { 9 | b"APPEND" | b"BITCOUNT" | b"STRLEN" | b"GET" | b"GETRANGE" | b"MGET" 10 | | b"LRANGE" | b"LINDEX" | b"LLEN" | b"SCARD" | b"SISMEMBER" | b"SMEMBERS" 11 | | b"SUNION" | b"SINTER" | b"ZCARD" | b"ZCOUNT" | b"ZRANGE" | b"ZRANK" 12 | | b"ZSCORE" | b"ZRANGEBYSCORE" | b"HGET" | b"HGETALL" | b"HEXISTS" | b"HKEYS" 13 | | b"HLEN" | b"HSTRLEN" | b"HVALS" | b"PFCOUNT" => QueryType::Read, 14 | _ => QueryType::Write, 15 | }; 16 | } 17 | } 18 | QueryType::Write 19 | } 20 | 21 | pub fn valkey_query_name(frame: &ValkeyFrame) -> Option { 22 | if let ValkeyFrame::Array(array) = frame { 23 | if let Some(ValkeyFrame::BulkString(v)) = array.first() { 24 | let upper_bytes = v.to_ascii_uppercase(); 25 | match String::from_utf8(upper_bytes) { 26 | Ok(query_type) => { 27 | return Some(query_type); 28 | } 29 | Err(err) => { 30 | tracing::error!("Failed to convert valkey bulkstring to string, err: {err:?}") 31 | } 32 | } 33 | } 34 | } 35 | None 36 | } 37 | -------------------------------------------------------------------------------- /shotover/src/frame/value.rs: -------------------------------------------------------------------------------- 1 | //! Generic representations of data types that appear in messages 2 | #[cfg(feature = "cassandra")] 3 | pub mod cassandra; 4 | #[cfg(feature = "valkey")] 5 | mod valkey; 6 | 7 | use bigdecimal::BigDecimal; 8 | use bytes::Bytes; 9 | use num_bigint::BigInt; 10 | use ordered_float::OrderedFloat; 11 | use serde::{Deserialize, Serialize}; 12 | use std::collections::{BTreeMap, BTreeSet}; 13 | use std::net::IpAddr; 14 | use uuid::Uuid; 15 | 16 | #[derive(PartialEq, Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialOrd, Ord)] 17 | pub enum GenericValue { 18 | Null, 19 | Bytes(Bytes), 20 | Ascii(String), 21 | Strings(String), 22 | Integer(i64, IntSize), 23 | Double(OrderedFloat), 24 | Float(OrderedFloat), 25 | Boolean(bool), 26 | Inet(IpAddr), 27 | List(Vec), 28 | Set(BTreeSet), 29 | Map(BTreeMap), 30 | Varint(BigInt), 31 | Decimal(BigDecimal), 32 | Date(i32), 33 | Timestamp(i64), 34 | Duration(Duration), 35 | Timeuuid(Uuid), 36 | Varchar(String), 37 | Uuid(Uuid), 38 | Time(i64), 39 | Counter(i64), 40 | Tuple(Vec), 41 | Udt(BTreeMap), 42 | Custom(Bytes), 43 | } 44 | 45 | #[derive(PartialEq, Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialOrd, Ord)] 46 | pub enum IntSize { 47 | I64, // BigInt 48 | I32, // Int 49 | I16, // Smallint 50 | I8, // Tinyint 51 | } 52 | 53 | // TODO: This is tailored directly to cassandras Duration and will need to be adjusted once we add another protocol that uses it 54 | #[derive(PartialEq, Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialOrd, Ord)] 55 | pub struct Duration { 56 | pub months: i32, 57 | pub days: i32, 58 | pub nanoseconds: i64, 59 | } 60 | -------------------------------------------------------------------------------- /shotover/src/http.rs: -------------------------------------------------------------------------------- 1 | use axum::http::StatusCode; 2 | use axum::response::{IntoResponse, Response}; 3 | 4 | // Make our own error that wraps `anyhow::Error`. 5 | pub(crate) struct HttpServerError(pub anyhow::Error); 6 | 7 | // Tell axum how to convert `AppError` into a response. 8 | impl IntoResponse for HttpServerError { 9 | fn into_response(self) -> Response { 10 | ( 11 | StatusCode::INTERNAL_SERVER_ERROR, 12 | format!("HTTP 500 error: {}", self.0), 13 | ) 14 | .into_response() 15 | } 16 | } 17 | 18 | // This enables using `?` on functions that return `Result<_, anyhow::Error>` to turn them into 19 | // `Result<_, AppError>`. That way you don't need to do that manually. 20 | impl From for HttpServerError 21 | where 22 | E: Into, 23 | { 24 | fn from(err: E) -> Self { 25 | Self(err.into()) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /shotover/src/tcp.rs: -------------------------------------------------------------------------------- 1 | //! Use to establish a TCP connection to a DB in a sink transform 2 | 3 | use anyhow::{Context, Result, anyhow}; 4 | use std::time::Duration; 5 | use tokio::{ 6 | net::{TcpStream, ToSocketAddrs}, 7 | time::timeout, 8 | }; 9 | 10 | pub async fn tcp_stream( 11 | connect_timeout: Duration, 12 | destination: A, 13 | ) -> Result { 14 | timeout(connect_timeout, TcpStream::connect(&destination)) 15 | .await 16 | .map_err(|_| { 17 | anyhow!( 18 | "destination {destination:?} did not respond to connection attempt within {connect_timeout:?}" 19 | ) 20 | })? 21 | .with_context(|| format!("Failed to connect to destination {destination:?}")) 22 | } 23 | -------------------------------------------------------------------------------- /shotover/src/transforms/cassandra/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod peers_rewrite; 2 | pub mod sink_cluster; 3 | pub mod sink_single; 4 | -------------------------------------------------------------------------------- /shotover/src/transforms/debug/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "alpha-transforms")] 2 | pub mod force_parse; 3 | #[cfg(feature = "alpha-transforms")] 4 | pub mod log_to_file; 5 | pub mod printer; 6 | pub mod returner; 7 | -------------------------------------------------------------------------------- /shotover/src/transforms/debug/printer.rs: -------------------------------------------------------------------------------- 1 | use crate::message::Messages; 2 | use crate::transforms::{ 3 | ChainState, DownChainProtocol, Transform, TransformBuilder, TransformConfig, 4 | TransformContextBuilder, TransformContextConfig, UpChainProtocol, 5 | }; 6 | use anyhow::Result; 7 | use async_trait::async_trait; 8 | use serde::{Deserialize, Serialize}; 9 | use tracing::info; 10 | 11 | #[derive(Serialize, Deserialize, Debug)] 12 | #[serde(deny_unknown_fields)] 13 | pub struct DebugPrinterConfig; 14 | 15 | const NAME: &str = "DebugPrinter"; 16 | #[typetag::serde(name = "DebugPrinter")] 17 | #[async_trait(?Send)] 18 | impl TransformConfig for DebugPrinterConfig { 19 | async fn get_builder( 20 | &self, 21 | _transform_context: TransformContextConfig, 22 | ) -> Result> { 23 | Ok(Box::new(DebugPrinter::new())) 24 | } 25 | 26 | fn up_chain_protocol(&self) -> UpChainProtocol { 27 | UpChainProtocol::Any 28 | } 29 | 30 | fn down_chain_protocol(&self) -> DownChainProtocol { 31 | DownChainProtocol::SameAsUpChain 32 | } 33 | } 34 | 35 | #[derive(Clone)] 36 | pub(crate) struct DebugPrinter { 37 | counter: i32, 38 | } 39 | 40 | impl Default for DebugPrinter { 41 | fn default() -> Self { 42 | Self::new() 43 | } 44 | } 45 | 46 | impl DebugPrinter { 47 | pub(crate) fn new() -> DebugPrinter { 48 | DebugPrinter { counter: 0 } 49 | } 50 | } 51 | 52 | impl TransformBuilder for DebugPrinter { 53 | fn build(&self, _transform_context: TransformContextBuilder) -> Box { 54 | Box::new(self.clone()) 55 | } 56 | 57 | fn get_name(&self) -> &'static str { 58 | NAME 59 | } 60 | } 61 | 62 | #[async_trait] 63 | impl Transform for DebugPrinter { 64 | fn get_name(&self) -> &'static str { 65 | NAME 66 | } 67 | 68 | async fn transform<'shorter, 'longer: 'shorter>( 69 | &mut self, 70 | chain_state: &'shorter mut ChainState<'longer>, 71 | ) -> Result { 72 | for request in &mut chain_state.requests { 73 | info!("Request: {}", request.to_high_level_string()); 74 | } 75 | 76 | self.counter += 1; 77 | let mut responses = chain_state.call_next_transform().await?; 78 | 79 | for response in &mut responses { 80 | info!("Response: {}", response.to_high_level_string()); 81 | } 82 | Ok(responses) 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /shotover/src/transforms/kafka/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod sink_cluster; 2 | pub mod sink_single; 3 | -------------------------------------------------------------------------------- /shotover/src/transforms/kafka/sink_cluster/scram_over_mtls/recreate_token_queue.rs: -------------------------------------------------------------------------------- 1 | use std::collections::VecDeque; 2 | use std::time::{Duration, Instant}; 3 | 4 | /// Keeps track of when tokens need to be recreated. 5 | pub(crate) struct RecreateTokenQueue { 6 | queue: VecDeque, 7 | token_lifetime: Duration, 8 | } 9 | 10 | impl RecreateTokenQueue { 11 | /// token_lifetime must be the lifetime that all tokens are created with. 12 | pub(crate) fn new(token_lifetime: Duration) -> Self { 13 | RecreateTokenQueue { 14 | queue: VecDeque::new(), 15 | token_lifetime, 16 | } 17 | } 18 | 19 | /// Returns the username of a token that needs to be recreated now. 20 | /// It will wait asynchronously until there is a token ready for recreation. 21 | /// If there are no pending token recreations this method will never return. 22 | /// 23 | /// # Cancel safety 24 | /// 25 | /// This method is cancel safe. 26 | /// If it is cancelled, it is guaranteed that no element was removed from the queue. 27 | pub(crate) async fn next(&mut self) -> String { 28 | if let Some(token) = self.queue.front() { 29 | tokio::time::sleep_until(token.recreate_at.into()).await; 30 | self.queue.pop_front().unwrap().username 31 | } else { 32 | futures::future::pending::().await 33 | } 34 | } 35 | 36 | /// Adds a token to the queue with the provided username 37 | /// token_lifetime is the lifetime that the existing token was created with. 38 | pub(crate) fn push(&mut self, username: String) { 39 | self.queue.push_back(TokenToRecreate { 40 | // recreate the token when it is halfway through its lifetime 41 | recreate_at: Instant::now() + self.token_lifetime / 2, 42 | username, 43 | }) 44 | } 45 | } 46 | 47 | struct TokenToRecreate { 48 | recreate_at: Instant, 49 | username: String, 50 | } 51 | -------------------------------------------------------------------------------- /shotover/src/transforms/loopback.rs: -------------------------------------------------------------------------------- 1 | use super::TransformContextBuilder; 2 | use crate::message::Messages; 3 | use crate::transforms::{ChainState, Transform, TransformBuilder}; 4 | use anyhow::Result; 5 | use async_trait::async_trait; 6 | 7 | const NAME: &str = "Loopback"; 8 | 9 | #[derive(Clone, Default)] 10 | pub struct Loopback {} 11 | 12 | impl TransformBuilder for Loopback { 13 | fn build(&self, _transform_context: TransformContextBuilder) -> Box { 14 | Box::new(self.clone()) 15 | } 16 | 17 | fn get_name(&self) -> &'static str { 18 | NAME 19 | } 20 | 21 | fn is_terminating(&self) -> bool { 22 | true 23 | } 24 | } 25 | 26 | #[async_trait] 27 | impl Transform for Loopback { 28 | fn get_name(&self) -> &'static str { 29 | NAME 30 | } 31 | 32 | async fn transform<'shorter, 'longer: 'shorter>( 33 | &mut self, 34 | chain_state: &'shorter mut ChainState<'longer>, 35 | ) -> Result { 36 | // This transform ultimately doesnt make a lot of sense semantically 37 | // but make a vague attempt to follow transform invariants anyway. 38 | for request in &mut chain_state.requests { 39 | request.set_request_id(request.id()); 40 | } 41 | Ok(std::mem::take(&mut chain_state.requests)) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /shotover/src/transforms/null.rs: -------------------------------------------------------------------------------- 1 | use super::{DownChainProtocol, TransformContextBuilder, TransformContextConfig, UpChainProtocol}; 2 | use crate::message::Messages; 3 | use crate::transforms::{ChainState, Transform, TransformBuilder, TransformConfig}; 4 | use anyhow::Result; 5 | use async_trait::async_trait; 6 | use serde::{Deserialize, Serialize}; 7 | 8 | #[derive(Serialize, Deserialize, Debug)] 9 | #[serde(deny_unknown_fields)] 10 | pub struct NullSinkConfig; 11 | 12 | const NAME: &str = "NullSink"; 13 | #[typetag::serde(name = "NullSink")] 14 | #[async_trait(?Send)] 15 | impl TransformConfig for NullSinkConfig { 16 | async fn get_builder( 17 | &self, 18 | _transform_context: TransformContextConfig, 19 | ) -> Result> { 20 | Ok(Box::new(NullSink {})) 21 | } 22 | 23 | fn up_chain_protocol(&self) -> UpChainProtocol { 24 | UpChainProtocol::Any 25 | } 26 | 27 | fn down_chain_protocol(&self) -> DownChainProtocol { 28 | DownChainProtocol::Terminating 29 | } 30 | } 31 | 32 | #[derive(Default)] 33 | pub struct NullSink {} 34 | 35 | impl TransformBuilder for NullSink { 36 | fn build(&self, _transform_context: TransformContextBuilder) -> Box { 37 | Box::new(NullSink {}) 38 | } 39 | 40 | fn get_name(&self) -> &'static str { 41 | NAME 42 | } 43 | 44 | fn is_terminating(&self) -> bool { 45 | true 46 | } 47 | } 48 | 49 | #[async_trait] 50 | impl Transform for NullSink { 51 | fn get_name(&self) -> &'static str { 52 | NAME 53 | } 54 | 55 | async fn transform<'shorter, 'longer: 'shorter>( 56 | &mut self, 57 | chain_state: &'shorter mut ChainState<'longer>, 58 | ) -> Result { 59 | for request in &mut chain_state.requests { 60 | // reuse the requests to hold the responses to avoid an allocation 61 | *request = request 62 | .from_request_to_error_response("Handled by shotover null transform".to_string())?; 63 | } 64 | Ok(std::mem::take(&mut chain_state.requests)) 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /shotover/src/transforms/protect/local_kek.rs: -------------------------------------------------------------------------------- 1 | use crate::transforms::protect::crypto::{gen_key, gen_nonce}; 2 | use crate::transforms::protect::key_management::KeyMaterial; 3 | use anyhow::{Result, anyhow}; 4 | use bytes::Bytes; 5 | use chacha20poly1305::aead::Aead; 6 | use chacha20poly1305::{ChaCha20Poly1305, Key, KeyInit, Nonce}; 7 | use serde::{Deserialize, Serialize}; 8 | 9 | #[derive(Clone, Debug)] 10 | pub struct LocalKeyManagement { 11 | pub kek: Key, 12 | pub kek_id: String, 13 | } 14 | 15 | #[derive(Clone, Serialize, Deserialize)] 16 | #[serde(deny_unknown_fields)] 17 | pub struct DEKStructure { 18 | pub nonce: Nonce, 19 | pub key: Vec, 20 | } 21 | 22 | impl LocalKeyManagement { 23 | pub fn get_key(&self, dek: Option>) -> Result { 24 | match dek { 25 | None => { 26 | let plaintext_dek = gen_key(); 27 | let nonce = gen_nonce(); 28 | 29 | let cipher = ChaCha20Poly1305::new(Key::from_slice(&plaintext_dek)); 30 | 31 | let encrypted_dek = cipher 32 | .encrypt(&nonce, &*plaintext_dek) 33 | .map_err(|_| anyhow!("couldn't encrypt value"))?; 34 | 35 | let dek_struct = DEKStructure { 36 | nonce, 37 | key: encrypted_dek, 38 | }; 39 | let cipher_blob = serde_json::to_string(&dek_struct)?; 40 | Ok(KeyMaterial { 41 | ciphertext_blob: Bytes::from(cipher_blob), 42 | key_id: self.kek_id.clone(), 43 | plaintext: plaintext_dek, 44 | }) 45 | } 46 | Some(dek) => { 47 | let dek_struct: DEKStructure = serde_json::from_slice(dek.as_slice())?; 48 | 49 | let cipher = ChaCha20Poly1305::new(Key::from_slice(&self.kek)); 50 | 51 | let plaintext_dek = cipher 52 | .decrypt(Nonce::from_slice(&dek_struct.nonce), &*dek_struct.key) 53 | .map_err(|_| anyhow!("couldn't decrypt DEK"))?; 54 | 55 | Ok(KeyMaterial { 56 | ciphertext_blob: Bytes::from(dek), 57 | key_id: self.kek_id.clone(), 58 | plaintext: *Key::from_slice(plaintext_dek.as_slice()), 59 | }) 60 | } 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /shotover/src/transforms/protect/pkcs_11.rs: -------------------------------------------------------------------------------- 1 | // TODO -> https://github.com/mheese/rust-pkcs11 2 | -------------------------------------------------------------------------------- /shotover/src/transforms/util/mod.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{Error, Result}; 2 | use std::fmt; 3 | 4 | use crate::message::Message; 5 | 6 | pub mod cluster_connection_pool; 7 | 8 | /// Represents a `Request` to a connection within Shotover 9 | #[derive(Debug)] 10 | pub struct Request { 11 | // Message to send upstream to connection 12 | pub message: Message, 13 | // Channel to return the response to 14 | pub return_chan: Option>, 15 | } 16 | 17 | /// Represents a `Response` to a `Request` 18 | #[derive(Debug)] 19 | pub struct Response { 20 | // Response to the original `Message` 21 | pub response: Result, 22 | } 23 | 24 | #[derive(thiserror::Error, Debug)] 25 | pub enum ConnectionError { 26 | #[error("authenticator error: {0}")] 27 | Authenticator(E), 28 | 29 | #[error(transparent)] 30 | Other(#[from] Error), 31 | } 32 | -------------------------------------------------------------------------------- /shotover/src/transforms/valkey/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::transforms::util::ConnectionError; 2 | 3 | #[cfg(all(feature = "valkey", feature = "cassandra"))] 4 | pub mod cache; 5 | pub mod cluster_ports_rewrite; 6 | pub mod sink_cluster; 7 | pub mod sink_single; 8 | pub mod timestamp_tagging; 9 | 10 | #[derive(thiserror::Error, Clone, Debug)] 11 | pub enum ValkeyError { 12 | #[error("authentication is required")] 13 | NotAuthenticated, 14 | 15 | #[error("user not authorized to perform action")] 16 | NotAuthorized, 17 | 18 | #[error("username or password is incorrect")] 19 | BadCredentials, 20 | 21 | #[error("{0}")] 22 | Other(String), 23 | } 24 | 25 | impl ValkeyError { 26 | fn from_message(error: &str) -> ValkeyError { 27 | match error.split_once(' ').map(|x| x.0) { 28 | Some("NOAUTH") => ValkeyError::NotAuthenticated, 29 | Some("NOPERM") => ValkeyError::NotAuthorized, 30 | Some("WRONGPASS") => ValkeyError::BadCredentials, 31 | _ => ValkeyError::Other(error.to_string()), 32 | } 33 | } 34 | } 35 | 36 | #[derive(thiserror::Error, Debug)] 37 | pub enum TransformError { 38 | #[error(transparent)] 39 | Upstream(#[from] ValkeyError), 40 | 41 | #[error("protocol error: {0}")] 42 | Protocol(String), 43 | 44 | #[error(transparent)] 45 | Other(#[from] anyhow::Error), 46 | } 47 | 48 | impl TransformError { 49 | fn choose_upstream_or_first(errors: Vec) -> Option { 50 | errors 51 | .iter() 52 | .find_map(|e| match e { 53 | TransformError::Upstream(e) => Some(TransformError::Upstream(e.clone())), 54 | _ => None, 55 | }) 56 | .or_else(|| errors.into_iter().next()) 57 | } 58 | } 59 | 60 | impl From> for TransformError { 61 | fn from(error: ConnectionError) -> Self { 62 | match error { 63 | ConnectionError::Other(e) => TransformError::Other(e), 64 | ConnectionError::Authenticator(e) => e, 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /shotover/src/transforms/valkey/timestamp_tagging.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /test-helpers/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test-helpers" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "Apache-2.0" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [features] 10 | cassandra-cpp-driver-tests = ["cassandra-cpp"] 11 | kafka-cpp-driver-tests = ["rdkafka"] 12 | 13 | [dependencies] 14 | tracing.workspace = true 15 | subprocess.workspace = true 16 | tokio-bin-process.workspace = true 17 | cdrs-tokio.workspace = true 18 | cassandra-protocol.workspace = true 19 | cassandra-cpp = { version = "3.0.0", default-features = false, features = [ 20 | "log", 21 | ], optional = true } 22 | scylla.workspace = true 23 | openssl.workspace = true 24 | bytes.workspace = true 25 | ordered-float.workspace = true 26 | tokio.workspace = true 27 | uuid.workspace = true 28 | redis.workspace = true 29 | itertools.workspace = true 30 | reqwest.workspace = true 31 | tracing-subscriber.workspace = true 32 | anyhow.workspace = true 33 | rcgen.workspace = true 34 | rdkafka = { version = "0.37", features = ["cmake-build"], optional = true } 35 | docker-compose-runner = "0.3.0" 36 | j4rs = "0.22.0" 37 | futures-util = "0.3.28" 38 | http = "1.1.0" 39 | rustls = { version = "0.23.18", default-features = false, features = ["aws_lc_rs"] } 40 | rustls-pki-types = "1.0.1" 41 | rustls-pemfile = "2.0.0" 42 | tokio-tungstenite = { version = "0.26", features = ["rustls-tls-native-roots"] } 43 | pretty_assertions.workspace = true 44 | serde.workspace = true 45 | -------------------------------------------------------------------------------- /test-helpers/src/connection/cassandra/go.rs: -------------------------------------------------------------------------------- 1 | use crate::run_command_async; 2 | use std::{path::Path, time::Duration}; 3 | 4 | pub async fn run_go_smoke_test() { 5 | let project_dir = Path::new(env!("CARGO_MANIFEST_DIR")).join("src/connection/cassandra/go"); 6 | tokio::time::timeout( 7 | Duration::from_secs(60), 8 | run_command_async(&project_dir, "go", &["run", "basic.go"]), 9 | ) 10 | .await 11 | .unwrap(); 12 | } 13 | -------------------------------------------------------------------------------- /test-helpers/src/connection/cassandra/go/basic.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/gocql/gocql" 7 | ) 8 | 9 | func main() { 10 | fmt.Println("Starting go smoke test") 11 | 12 | dc := "datacenter1" 13 | cluster := gocql.NewCluster("127.0.0.1:9042") 14 | cluster.Keyspace = "system" 15 | cluster.RetryPolicy = &gocql.ExponentialBackoffRetryPolicy{NumRetries: 5} 16 | cluster.Consistency = gocql.LocalQuorum 17 | cluster.SerialConsistency = gocql.LocalSerial 18 | cluster.PoolConfig.HostSelectionPolicy = gocql.TokenAwareHostPolicy(gocql.DCAwareRoundRobinPolicy(dc)) 19 | cluster.HostFilter = gocql.DataCentreHostFilter(dc) 20 | cluster.WriteCoalesceWaitTime = 0 21 | cluster.NumConns = 4 22 | cluster.ProtoVersion = 4 23 | 24 | session, err := cluster.CreateSession() 25 | if err != nil { 26 | panic(err) 27 | } 28 | 29 | session.Close() 30 | 31 | fmt.Println("Success!") 32 | } 33 | -------------------------------------------------------------------------------- /test-helpers/src/connection/cassandra/go/go.mod: -------------------------------------------------------------------------------- 1 | module example/basic 2 | 3 | go 1.20 4 | 5 | require ( 6 | github.com/gocql/gocql v1.7.0 // indirect 7 | github.com/golang/snappy v0.0.3 // indirect 8 | github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect 9 | gopkg.in/inf.v0 v0.9.1 // indirect 10 | ) 11 | -------------------------------------------------------------------------------- /test-helpers/src/connection/cassandra/go/go.sum: -------------------------------------------------------------------------------- 1 | github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= 2 | github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= 3 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/gocql/gocql v1.7.0 h1:O+7U7/1gSN7QTEAaMEsJc1Oq2QHXvCWoF3DFK9HDHus= 5 | github.com/gocql/gocql v1.7.0/go.mod h1:vnlvXyFZeLBF0Wy+RS8hrOdbn0UWsWtdg07XJnFxZ+4= 6 | github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= 7 | github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 8 | github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= 9 | github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= 10 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 11 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 12 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 13 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 14 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 15 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 16 | gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= 17 | gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= 18 | -------------------------------------------------------------------------------- /test-helpers/src/connection/kafka/node.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | 3 | use crate::run_command_async; 4 | 5 | pub async fn run_node_smoke_test(address: &str) { 6 | let dir = Path::new(env!("CARGO_MANIFEST_DIR")).join("src/connection/kafka/node"); 7 | let config = format!( 8 | r#"({{ 9 | clientId: 'nodejs-client', 10 | brokers: ["{address}"], 11 | }})"# 12 | ); 13 | run_command_async(&dir, "npm", &["install"]).await; 14 | run_command_async(&dir, "npm", &["start", &config]).await; 15 | } 16 | 17 | pub async fn run_node_smoke_test_scram(address: &str, user: &str, password: &str) { 18 | let dir = Path::new(env!("CARGO_MANIFEST_DIR")).join("src/connection/kafka/node"); 19 | let config = format!( 20 | r#"({{ 21 | clientId: 'nodejs-client', 22 | brokers: ["{address}"], 23 | sasl: {{ 24 | mechanism: 'scram-sha-256', 25 | username: '{user}', 26 | password: '{password}' 27 | }} 28 | }})"# 29 | ); 30 | run_command_async(&dir, "npm", &["install"]).await; 31 | run_command_async(&dir, "npm", &["start", &config]).await; 32 | } 33 | -------------------------------------------------------------------------------- /test-helpers/src/connection/kafka/node/index.js: -------------------------------------------------------------------------------- 1 | const { Kafka } = require('kafkajs') 2 | const fs = require('fs') 3 | const assert = require('assert') 4 | 5 | function delay(time) { 6 | return new Promise(resolve => setTimeout(resolve, time)); 7 | } 8 | 9 | const run = async () => { 10 | const args = process.argv.slice(2); 11 | const config = args[0]; 12 | 13 | const kafka = new Kafka(eval(config)) 14 | 15 | // Producing 16 | const producer = kafka.producer() 17 | await producer.connect() 18 | await producer.send({ 19 | topic: 'test', 20 | messages: [ 21 | { value: 'foo' }, 22 | ], 23 | }) 24 | await producer.send({ 25 | topic: 'test', 26 | messages: [ 27 | { value: 'a longer string' }, 28 | ], 29 | }) 30 | await producer.disconnect() 31 | 32 | // Consuming 33 | const consumer = kafka.consumer({ groupId: 'test-group' }) 34 | await consumer.connect() 35 | await consumer.subscribe({ topic: 'test', fromBeginning: true }) 36 | 37 | messages = [] 38 | await consumer.run({ 39 | eachMessage: async ({ topic, partition, message }) => { 40 | messages.push({ 41 | topic, 42 | partition, 43 | offset: message.offset, 44 | value: message.value.toString(), 45 | }) 46 | }, 47 | }) 48 | 49 | // Use a very primitive sleep loop since nodejs doesnt seem to have any kind of mpsc or channel functionality :/ 50 | while (messages.length < 2) { 51 | await delay(10); 52 | } 53 | assert.deepStrictEqual(messages, [ 54 | { 55 | topic: 'test', 56 | partition: 0, 57 | offset: '0', 58 | value: 'foo', 59 | }, 60 | { 61 | topic: 'test', 62 | partition: 0, 63 | offset: '1', 64 | value: 'a longer string', 65 | } 66 | ]) 67 | await consumer.disconnect() 68 | } 69 | 70 | run() -------------------------------------------------------------------------------- /test-helpers/src/connection/kafka/node/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "kafkajs_wrapper", 3 | "version": "1.0.0", 4 | "lockfileVersion": 3, 5 | "requires": true, 6 | "packages": { 7 | "": { 8 | "name": "kafkajs_wrapper", 9 | "version": "1.0.0", 10 | "license": "Apache-2.0", 11 | "dependencies": { 12 | "kafkajs": "^2.2.4" 13 | } 14 | }, 15 | "node_modules/kafkajs": { 16 | "version": "2.2.4", 17 | "resolved": "https://registry.npmjs.org/kafkajs/-/kafkajs-2.2.4.tgz", 18 | "integrity": "sha512-j/YeapB1vfPT2iOIUn/vxdyKEuhuY2PxMBvf5JWux6iSaukAccrMtXEY/Lb7OvavDhOWME589bpLrEdnVHjfjA==", 19 | "license": "MIT", 20 | "engines": { 21 | "node": ">=14.0.0" 22 | } 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /test-helpers/src/connection/kafka/node/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "kafkajs_wrapper", 3 | "version": "1.0.0", 4 | "main": "index.js", 5 | "scripts": { 6 | "start": "node index.js" 7 | }, 8 | "author": "", 9 | "license": "Apache-2.0", 10 | "description": "", 11 | "dependencies": { 12 | "kafkajs": "^2.2.4" 13 | } 14 | } -------------------------------------------------------------------------------- /test-helpers/src/connection/kafka/python/.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /test-helpers/src/connection/kafka/python/auth_fail.py: -------------------------------------------------------------------------------- 1 | from kafka import KafkaConsumer 2 | from kafka import KafkaProducer 3 | from kafka.errors import KafkaError 4 | import sys 5 | 6 | def main(): 7 | config = eval(sys.argv[1]) 8 | print("Running kafka-python script with config:") 9 | print(config) 10 | 11 | try: 12 | KafkaProducer(**config) 13 | raise Exception("KafkaProducer was succesfully created but expected to fail due to using incorrect username/password") 14 | except KafkaError: 15 | print("kafka-python auth_fail script passed all test cases") 16 | 17 | if __name__ == "__main__": 18 | main() 19 | -------------------------------------------------------------------------------- /test-helpers/src/connection/kafka/python/main.py: -------------------------------------------------------------------------------- 1 | from kafka import KafkaConsumer 2 | from kafka import KafkaAdminClient 3 | from kafka import KafkaProducer 4 | from kafka.admin import NewTopic 5 | from kafka.errors import UnknownTopicOrPartitionError 6 | from time import sleep 7 | import sys 8 | 9 | def main(): 10 | config = eval(sys.argv[1]) 11 | print("Running kafka-python script with config:") 12 | print(config) 13 | 14 | admin = KafkaAdminClient(**config) 15 | admin.create_topics([ 16 | NewTopic( 17 | name='python_test_topic', 18 | num_partitions=1, 19 | replication_factor=1 20 | ) 21 | ]) 22 | producer = KafkaProducer(**config) 23 | 24 | # send first message with retry since the topic may not be created yet. 25 | retry_if_not_ready(lambda : producer.send('python_test_topic', b'some_message_bytes').get(timeout=30)) 26 | 27 | # send second message without retry, it has no reason to fail. 28 | producer.send('python_test_topic', b'another_message').get(timeout=30) 29 | 30 | consumer = KafkaConsumer('python_test_topic', auto_offset_reset='earliest', **config) 31 | 32 | msg = next(consumer) 33 | assert(msg.topic == "python_test_topic") 34 | assert(msg.value == b"some_message_bytes") 35 | assert(msg.offset == 0) 36 | 37 | msg = next(consumer) 38 | assert(msg.topic == "python_test_topic") 39 | assert(msg.value == b"another_message") 40 | assert(msg.offset == 1) 41 | 42 | print("kafka-python script passed all test cases") 43 | 44 | def retry_if_not_ready(attempt): 45 | tries = 0 46 | while True: 47 | try: 48 | attempt() 49 | return 50 | except UnknownTopicOrPartitionError: 51 | tries += 1 52 | sleep(0.1) 53 | # fail after 10s worth of attempts 54 | if tries > 100: 55 | raise Exception("Timedout, hit UnknownTopicOrPartitionError 100 times in a row") 56 | 57 | 58 | if __name__ == "__main__": 59 | main() 60 | -------------------------------------------------------------------------------- /test-helpers/src/connection/kafka/python/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "kafka-smoke-test" 3 | version = "0.1.0" 4 | requires-python = ">=3.12" 5 | dependencies = [ 6 | "kafka-python-ng>=2.2.3", 7 | ] 8 | -------------------------------------------------------------------------------- /test-helpers/src/connection/kafka/python/uv.lock: -------------------------------------------------------------------------------- 1 | version = 1 2 | requires-python = ">=3.12" 3 | 4 | [[package]] 5 | name = "kafka-python-ng" 6 | version = "2.2.3" 7 | source = { registry = "https://pypi.org/simple" } 8 | sdist = { url = "https://files.pythonhosted.org/packages/ce/04/1d65bdf3f0103a08710e226b851de4b357ac702f1cadabf6128bab7518a7/kafka_python_ng-2.2.3.tar.gz", hash = "sha256:f79f28e10ade9b5a9860b2ec15b7cc8dc510d5702f5a399430478cff5f93a05a", size = 330644 } 9 | wheels = [ 10 | { url = "https://files.pythonhosted.org/packages/0f/61/22e778f642465a157c449782300d8817ebbc106794a8a7ebe88cbb846b05/kafka_python_ng-2.2.3-py2.py3-none-any.whl", hash = "sha256:adc6e82147c441ca4ae1f22e291fc08efab0d10971cbd4aa1481d2ffa38e9480", size = 232824 }, 11 | ] 12 | 13 | [[package]] 14 | name = "kafka-smoke-test" 15 | version = "0.1.0" 16 | source = { virtual = "." } 17 | dependencies = [ 18 | { name = "kafka-python-ng" }, 19 | ] 20 | 21 | [package.metadata] 22 | requires-dist = [{ name = "kafka-python-ng", specifier = ">=2.2.3" }] 23 | -------------------------------------------------------------------------------- /test-helpers/src/connection/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cassandra; 2 | 3 | pub(crate) mod java; 4 | pub mod kafka; 5 | // valkey_connection is named differently to the cassandra module because it contains raw functions instead of a struct with methods 6 | pub mod valkey_connection; 7 | -------------------------------------------------------------------------------- /test-helpers/src/metrics.rs: -------------------------------------------------------------------------------- 1 | use itertools::Itertools; 2 | 3 | async fn http_request_metrics() -> String { 4 | let url = "http://localhost:9001/metrics"; 5 | reqwest::get(url).await.unwrap().text().await.unwrap() 6 | } 7 | 8 | pub async fn get_metrics_value(key: &str) -> String { 9 | let actual = http_request_metrics().await; 10 | 11 | for actual_line in actual.lines() { 12 | if let Some(actual_value) = actual_line.strip_prefix(key) { 13 | return actual_value.trim().to_owned(); 14 | } 15 | } 16 | panic!("key {key:?} was not found in metrics output:\n{actual}"); 17 | } 18 | 19 | /// Asserts that the `expected` lines of keys are included in the metrics. 20 | /// The `previous` lines are excluded from the assertion, allowing for better error messages when checking for added lines. 21 | /// The keys are removed to keep the output deterministic. 22 | pub async fn assert_metrics_has_keys(previous: &str, expected: &str) { 23 | let actual = http_request_metrics().await; 24 | 25 | let previous: Vec<&str> = previous.lines().filter(|x| !x.is_empty()).collect(); 26 | let expected_sorted: Vec<&str> = expected 27 | .lines() 28 | .filter(|line| !line.is_empty()) 29 | .sorted() 30 | .collect(); 31 | let actual_sorted: Vec<&str> = actual 32 | .lines() 33 | .map(|x| { 34 | // Strip numbers from the end 35 | x.trim_end_matches(|c: char| { 36 | ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ' ', '.'].contains(&c) 37 | }) 38 | }) 39 | .filter(|line| { 40 | !line.is_empty() && previous.iter().all(|previous| !line.starts_with(previous)) 41 | }) 42 | .sorted() 43 | .collect(); 44 | 45 | let expected_string = expected_sorted.join("\n"); 46 | let actual_string = actual_sorted.join("\n"); 47 | 48 | // Manually recreate assert_eq because it formats the strings poorly 49 | assert!( 50 | expected_string == actual_string, 51 | "expected:\n{expected_string}\nbut was:\n{actual_string}" 52 | ); 53 | } 54 | 55 | /// Asserts that the metrics contains a key with the corresponding value 56 | /// Use this to make assertions on specific keys that you know are deterministic 57 | pub async fn assert_metrics_key_value(key: &str, value: &str) { 58 | let actual_value = get_metrics_value(key).await; 59 | assert!( 60 | value == actual_value, 61 | "Expected metrics key {key:?} to have value {value:?} but it was instead {actual_value:?}" 62 | ); 63 | } 64 | -------------------------------------------------------------------------------- /test-helpers/src/test_tracing.rs: -------------------------------------------------------------------------------- 1 | use tracing::level_filters::LevelFilter; 2 | use tracing_subscriber::EnvFilter; 3 | 4 | pub fn setup_tracing_subscriber_for_test() { 5 | tracing_subscriber::fmt() 6 | .with_env_filter( 7 | EnvFilter::builder() 8 | .with_default_directive(LevelFilter::INFO.into()) 9 | .from_env() 10 | .unwrap(), 11 | ) 12 | .try_init() 13 | .ok(); 14 | } 15 | -------------------------------------------------------------------------------- /website/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "website" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "Apache-2.0" 6 | publish = false 7 | 8 | [dependencies] 9 | subprocess.workspace = true 10 | anyhow.workspace = true 11 | askama = { version = "0.14", default-features = false, features = ["std", "derive"] } 12 | semver = "1.0.23" 13 | devserver_lib = { version = "0.4.2", default-features = false } 14 | clap.workspace = true -------------------------------------------------------------------------------- /website/assets/arrow_right.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shotover/shotover-proxy/e6587b7bffc190ba1e034727a73bc674bf2e9ca0/website/assets/arrow_right.png -------------------------------------------------------------------------------- /website/assets/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shotover/shotover-proxy/e6587b7bffc190ba1e034727a73bc674bf2e9ca0/website/assets/favicon.ico -------------------------------------------------------------------------------- /website/assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shotover/shotover-proxy/e6587b7bffc190ba1e034727a73bc674bf2e9ca0/website/assets/logo.png -------------------------------------------------------------------------------- /website/assets/title_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shotover/shotover-proxy/e6587b7bffc190ba1e034727a73bc674bf2e9ca0/website/assets/title_image.png -------------------------------------------------------------------------------- /website/readme.md: -------------------------------------------------------------------------------- 1 | # Shotover website 2 | 3 | This tool generates the shotover website. 4 | To test the website locally, run the command: `cargo run -p website -- --serve` 5 | 6 | The website is hosted via github actions at the domain: 7 | -------------------------------------------------------------------------------- /website/src/cli.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | 3 | /// Generates the shotover website. 4 | #[derive(Parser, Clone)] 5 | #[clap()] 6 | pub struct Args { 7 | /// As well as generating the site, serve the contents of the site over http. 8 | #[clap(long)] 9 | pub serve: bool, 10 | } 11 | -------------------------------------------------------------------------------- /website/src/version_tags.rs: -------------------------------------------------------------------------------- 1 | use crate::run_command; 2 | use semver::Version; 3 | use std::path::Path; 4 | 5 | pub struct VersionTag { 6 | /// e.g. "v0.1.1" 7 | pub tag: String, 8 | /// e.g. "0.1.x" 9 | pub semver_range: String, 10 | /// e.g. 0.1.1 11 | pub version: Version, 12 | } 13 | 14 | impl VersionTag { 15 | fn new(tag: &str) -> Option { 16 | let version = Version::parse(tag.strip_prefix("v")?).ok()?; 17 | 18 | // ignore any prerelease or otherwise unusual tags 19 | if !version.pre.is_empty() || !version.build.is_empty() { 20 | return None; 21 | } 22 | 23 | let semver_range = if version.major != 0 { 24 | format!("{}.Y.Z", version.major) 25 | } else { 26 | format!("0.{}.Z", version.minor) 27 | }; 28 | Some(VersionTag { 29 | tag: tag.to_owned(), 30 | version, 31 | semver_range, 32 | }) 33 | } 34 | } 35 | 36 | pub fn get_versions_of_repo(repo_path: &Path) -> Vec { 37 | let mut versions: Vec = run_command(repo_path, "git", &["tag"]) 38 | .unwrap() 39 | .lines() 40 | .filter_map(VersionTag::new) 41 | .filter(|x| x.version >= Version::new(0, 1, 0)) 42 | .collect(); 43 | 44 | // reverse sort the list of versions 45 | versions.sort_by_key(|x| x.version.clone()); 46 | versions.reverse(); 47 | 48 | // Filter out any versions with duplicate semver range, keeping the first item. 49 | // Keeping the first items leaves us with the most recent item due to the previous reverse sort. 50 | let mut known = vec![]; 51 | versions.retain(|version| { 52 | let any_known = known 53 | .iter() 54 | .any(|known_range| &version.semver_range == known_range); 55 | known.push(version.semver_range.clone()); 56 | !any_known 57 | }); 58 | 59 | versions 60 | } 61 | -------------------------------------------------------------------------------- /website/templates/docs.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block title %} 4 | Shotover Documentation Index 5 | {% endblock %} 6 | 7 | {% block content %} 8 |
9 |
10 |

Documentation for current and historical versions of shotover is hosted below:

11 | 12 | 18 |
19 |
20 | 21 | 22 | {% endblock %} -------------------------------------------------------------------------------- /windsock-cloud-docker/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "windsock-cloud-docker" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "Apache-2.0" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | shell-quote.workspace = true 11 | tokio.workspace = true 12 | -------------------------------------------------------------------------------- /windsock-cloud-docker/src/main.rs: -------------------------------------------------------------------------------- 1 | // A helper to run `windsock --cloud` within docker to workaround libc issues 2 | // It is not possible to use this helper to run windsock locally as that would involve running docker within docker 3 | 4 | mod container; 5 | 6 | use container::{Container, cleanup}; 7 | use tokio::signal::unix::{SignalKind, signal}; 8 | 9 | #[tokio::main] 10 | async fn main() { 11 | let mut interrupt = signal(SignalKind::interrupt()).unwrap(); 12 | let mut terminate = signal(SignalKind::terminate()).unwrap(); 13 | 14 | let container = Container::new().await; 15 | 16 | tokio::select!( 17 | _ = container.run_windsock() => {}, 18 | _ = interrupt.recv() => cleanup().await, 19 | _ = terminate.recv() => cleanup().await, 20 | ); 21 | } 22 | --------------------------------------------------------------------------------