├── .circleci ├── config.yml ├── generate_coverage.sh ├── pgcat.toml ├── run_tests.sh ├── server.cert └── server.key ├── .dockerignore ├── .editorconfig ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── dependabot.yml └── workflows │ ├── build-and-push.yaml │ ├── chart-lint-test.yaml │ ├── chart-release.yaml │ ├── generate-chart-readme.yaml │ ├── publish-ci-docker-image.yml │ └── publish-deb-package.yml ├── .gitignore ├── .rustfmt.toml ├── CONFIG.md ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── Dockerfile.ci ├── Dockerfile.dev ├── LICENSE ├── README.md ├── charts └── pgcat │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ ├── NOTES.txt │ ├── _config.tpl │ ├── _helpers.tpl │ ├── deployment.yaml │ ├── ingress.yaml │ ├── secret.yaml │ ├── service.yaml │ └── serviceaccount.yaml │ └── values.yaml ├── control ├── cov-style.css ├── cr.yaml ├── ct.yaml ├── dev ├── Dockerfile ├── dev_bashrc ├── docker-compose.yaml └── script │ └── console ├── docker-compose.yml ├── examples └── docker │ └── pgcat.toml ├── grafana_dashboard.json ├── images ├── instacart.webp ├── one_signal.webp └── postgresml.webp ├── pgcat.minimal.toml ├── pgcat.service ├── pgcat.toml ├── postinst ├── postrm ├── prerm ├── src ├── admin.rs ├── auth_passthrough.rs ├── client.rs ├── cmd_args.rs ├── config.rs ├── constants.rs ├── dns_cache.rs ├── errors.rs ├── lib.rs ├── logger.rs ├── main.rs ├── messages.rs ├── mirrors.rs ├── plugins │ ├── intercept.rs │ ├── mod.rs │ ├── prewarmer.rs │ ├── query_logger.rs │ └── table_access.rs ├── pool.rs ├── prometheus.rs ├── query_router.rs ├── scram.rs ├── server.rs ├── sharding.rs ├── stats.rs ├── stats │ ├── address.rs │ ├── client.rs │ ├── pool.rs │ └── server.rs └── tls.rs ├── start_test_env.sh ├── tests ├── docker │ ├── Dockerfile │ ├── docker-compose.yml │ └── run.sh ├── go │ ├── go.mod │ ├── go.sum │ ├── pgcat.toml │ ├── prepared_test.go │ └── setup.go ├── pgbench │ └── simple.sql ├── python │ ├── .gitignore │ ├── requirements.txt │ ├── test_auth.py │ ├── test_pgcat.py │ └── utils.py ├── ruby │ ├── .ruby-version │ ├── Gemfile │ ├── Gemfile.lock │ ├── admin_spec.rb │ ├── auth_query_spec.rb │ ├── capture │ ├── copy_spec.rb │ ├── helpers │ │ ├── auth_query_helper.rb │ │ ├── pg_instance.rb │ │ ├── pg_socket.rb │ │ ├── pgcat_helper.rb │ │ └── pgcat_process.rb │ ├── load_balancing_spec.rb │ ├── mirrors_spec.rb │ ├── misc_spec.rb │ ├── plugins_spec.rb │ ├── prepared_spec.rb │ ├── protocol_spec.rb │ ├── routing_spec.rb │ ├── sharding_spec.rb │ ├── spec_helper.rb │ ├── stats_spec.rb │ └── tests.rb ├── rust │ ├── .gitignore │ ├── Cargo.lock │ ├── Cargo.toml │ └── src │ │ └── main.rs └── sharding │ ├── README.md │ ├── partition_hash_test_setup.sql │ ├── query_routing.sh │ ├── query_routing_setup.sql │ ├── query_routing_test_insert.sql │ ├── query_routing_test_primary_replica.sql │ ├── query_routing_test_select.sql │ └── query_routing_test_validate.sql └── utilities ├── deb.sh ├── generate_config_docs.py └── requirements.txt /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | # Use the latest 2.1 version of CircleCI pipeline process engine. 2 | # See: https://circleci.com/docs/2.0/configuration-reference 3 | version: 2.1 4 | 5 | # Define a job to be invoked later in a workflow. 6 | # See: https://circleci.com/docs/2.0/configuration-reference/#jobs 7 | jobs: 8 | build: 9 | # Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub. 10 | # See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor 11 | docker: 12 | - image: ghcr.io/postgresml/pgcat-ci:latest 13 | environment: 14 | RUST_LOG: info 15 | LLVM_PROFILE_FILE: /tmp/pgcat-%m-%p.profraw 16 | RUSTC_BOOTSTRAP: 1 17 | CARGO_INCREMENTAL: 0 18 | RUSTFLAGS: "-Zprofile -Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests -Cpanic=abort -Cinstrument-coverage" 19 | RUSTDOCFLAGS: "-Cpanic=abort" 20 | - image: postgres:14 21 | command: ["postgres", "-p", "5432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"] 22 | environment: 23 | POSTGRES_USER: postgres 24 | POSTGRES_DB: postgres 25 | POSTGRES_PASSWORD: postgres 26 | POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5 27 | - image: postgres:14 28 | command: ["postgres", "-p", "7432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"] 29 | environment: 30 | POSTGRES_USER: postgres 31 | POSTGRES_DB: postgres 32 | POSTGRES_PASSWORD: postgres 33 | POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256 34 | - image: postgres:14 35 | command: ["postgres", "-p", "8432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"] 36 | environment: 37 | POSTGRES_USER: postgres 38 | POSTGRES_DB: postgres 39 | POSTGRES_PASSWORD: postgres 40 | POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256 41 | - image: postgres:14 42 | command: ["postgres", "-p", "9432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"] 43 | environment: 44 | POSTGRES_USER: postgres 45 | POSTGRES_DB: postgres 46 | POSTGRES_PASSWORD: postgres 47 | POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256 48 | 49 | - image: postgres:14 50 | command: ["postgres", "-p", "10432", "-c", "shared_preload_libraries=pg_stat_statements"] 51 | environment: 52 | POSTGRES_USER: postgres 53 | POSTGRES_DB: postgres 54 | POSTGRES_PASSWORD: postgres 55 | POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5 56 | 57 | # Add steps to the job 58 | # See: https://circleci.com/docs/2.0/configuration-reference/#steps 59 | steps: 60 | - checkout 61 | - restore_cache: 62 | key: cargo-lock-2-{{ checksum "Cargo.lock" }} 63 | - run: 64 | name: "Lint" 65 | command: "cargo fmt --check" 66 | - run: 67 | name: "Clippy" 68 | command: "cargo clippy --all --all-targets -- -Dwarnings" 69 | - run: 70 | name: "Tests" 71 | command: "cargo clean && cargo build && cargo test && bash .circleci/run_tests.sh && .circleci/generate_coverage.sh" 72 | - store_artifacts: 73 | path: /tmp/cov 74 | destination: coverage-data 75 | - save_cache: 76 | key: cargo-lock-2-{{ checksum "Cargo.lock" }} 77 | paths: 78 | - target 79 | - ~/.cargo 80 | 81 | 82 | # Invoke jobs via workflows 83 | # See: https://circleci.com/docs/2.0/configuration-reference/#workflows 84 | workflows: 85 | build: 86 | jobs: 87 | - build 88 | -------------------------------------------------------------------------------- /.circleci/generate_coverage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # inspired by https://doc.rust-lang.org/rustc/instrument-coverage.html#tips-for-listing-the-binaries-automatically 4 | TEST_OBJECTS=$( \ 5 | for file in $(cargo test --no-run 2>&1 | grep "target/debug/deps/pgcat-[[:alnum:]]\+" -o); \ 6 | do \ 7 | printf "%s %s " --object $file; \ 8 | done \ 9 | ) 10 | 11 | rust-profdata merge -sparse /tmp/pgcat-*.profraw -o /tmp/pgcat.profdata 12 | 13 | bash -c "rust-cov export -ignore-filename-regex='rustc|registry' -Xdemangler=rustfilt -instr-profile=/tmp/pgcat.profdata $TEST_OBJECTS --object ./target/debug/pgcat --format lcov > ./lcov.info" 14 | 15 | genhtml lcov.info --title "PgCat Code Coverage" --css-file ./cov-style.css --no-function-coverage --highlight --ignore-errors source --legend --output-directory /tmp/cov --prefix $(pwd) 16 | -------------------------------------------------------------------------------- /.circleci/pgcat.toml: -------------------------------------------------------------------------------- 1 | # 2 | # PgCat config example. 3 | # 4 | 5 | # 6 | # General pooler settings 7 | [general] 8 | # What IP to run on, 0.0.0.0 means accessible from everywhere. 9 | host = "0.0.0.0" 10 | 11 | # Port to run on, same as PgBouncer used in this example. 12 | port = 6432 13 | 14 | # Whether to enable prometheus exporter or not. 15 | enable_prometheus_exporter = true 16 | 17 | # Port at which prometheus exporter listens on. 18 | prometheus_exporter_port = 9930 19 | 20 | # How long to wait before aborting a server connection (ms). 21 | connect_timeout = 1000 22 | 23 | # How much time to give the health check query to return with a result (ms). 24 | healthcheck_timeout = 1000 25 | 26 | # How long to keep connection available for immediate re-use, without running a healthcheck query on it 27 | healthcheck_delay = 30000 28 | 29 | # How much time to give clients during shutdown before forcibly killing client connections (ms). 30 | shutdown_timeout = 5000 31 | 32 | # For how long to ban a server if it fails a health check (seconds). 33 | ban_time = 60 # Seconds 34 | 35 | # If we should log client connections 36 | log_client_connections = false 37 | 38 | # If we should log client disconnections 39 | log_client_disconnections = false 40 | 41 | # Reload config automatically if it changes. 42 | autoreload = 15000 43 | 44 | # TLS 45 | tls_certificate = ".circleci/server.cert" 46 | tls_private_key = ".circleci/server.key" 47 | 48 | # Credentials to access the virtual administrative database (pgbouncer or pgcat) 49 | # Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc.. 50 | admin_username = "admin_user" 51 | admin_password = "admin_pass" 52 | 53 | # pool 54 | # configs are structured as pool. 55 | # the pool_name is what clients use as database name when connecting 56 | # For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded_db" 57 | [pools.sharded_db] 58 | # Pool mode (see PgBouncer docs for more). 59 | # session: one server connection per connected client 60 | # transaction: one server connection per client transaction 61 | pool_mode = "transaction" 62 | prepared_statements_cache_size = 500 63 | 64 | # If the client doesn't specify, route traffic to 65 | # this role by default. 66 | # 67 | # any: round-robin between primary and replicas, 68 | # replica: round-robin between replicas only without touching the primary, 69 | # primary: all queries go to the primary unless otherwise specified. 70 | default_role = "any" 71 | 72 | # Query parser. If enabled, we'll attempt to parse 73 | # every incoming query to determine if it's a read or a write. 74 | # If it's a read query, we'll direct it to a replica. Otherwise, if it's a write, 75 | # we'll direct it to the primary. 76 | query_parser_enabled = true 77 | 78 | # If the query parser is enabled and this setting is enabled, we'll attempt to 79 | # infer the role from the query itself. 80 | query_parser_read_write_splitting = true 81 | 82 | # If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for 83 | # load balancing of read queries. Otherwise, the primary will only be used for write 84 | # queries. The primary can always be explicitely selected with our custom protocol. 85 | primary_reads_enabled = true 86 | 87 | # So what if you wanted to implement a different hashing function, 88 | # or you've already built one and you want this pooler to use it? 89 | # 90 | # Current options: 91 | # 92 | # pg_bigint_hash: PARTITION BY HASH (Postgres hashing function) 93 | # sha1: A hashing function based on SHA1 94 | # 95 | sharding_function = "pg_bigint_hash" 96 | 97 | # Credentials for users that may connect to this cluster 98 | [pools.sharded_db.users.0] 99 | username = "sharding_user" 100 | password = "sharding_user" 101 | # Maximum number of server connections that can be established for this user 102 | # The maximum number of connection from a single Pgcat process to any database in the cluster 103 | # is the sum of pool_size across all users. 104 | pool_size = 9 105 | statement_timeout = 0 106 | 107 | [pools.sharded_db.users.1] 108 | username = "other_user" 109 | password = "other_user" 110 | pool_size = 21 111 | statement_timeout = 30000 112 | 113 | # Shard 0 114 | [pools.sharded_db.shards.0] 115 | # [ host, port, role ] 116 | servers = [ 117 | [ "127.0.0.1", 5432, "primary" ], 118 | [ "localhost", 5432, "replica" ] 119 | ] 120 | # Database name (e.g. "postgres") 121 | database = "shard0" 122 | 123 | [pools.sharded_db.shards.1] 124 | servers = [ 125 | [ "127.0.0.1", 5432, "primary" ], 126 | [ "localhost", 5432, "replica" ], 127 | ] 128 | database = "shard1" 129 | 130 | [pools.sharded_db.shards.2] 131 | servers = [ 132 | [ "127.0.0.1", 5432, "primary" ], 133 | [ "localhost", 5432, "replica" ], 134 | ] 135 | database = "shard2" 136 | 137 | 138 | [pools.simple_db] 139 | pool_mode = "session" 140 | default_role = "primary" 141 | query_parser_enabled = true 142 | query_parser_read_write_splitting = true 143 | primary_reads_enabled = true 144 | sharding_function = "pg_bigint_hash" 145 | prepared_statements_cache_size = 500 146 | 147 | [pools.simple_db.users.0] 148 | username = "simple_user" 149 | password = "simple_user" 150 | pool_size = 5 151 | statement_timeout = 30000 152 | 153 | [pools.simple_db.shards.0] 154 | servers = [ 155 | [ "127.0.0.1", 5432, "primary" ], 156 | [ "localhost", 5432, "replica" ] 157 | ] 158 | database = "some_db" 159 | -------------------------------------------------------------------------------- /.circleci/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o xtrace 5 | 6 | # non-zero exit code if we provide bad configs 7 | (! ./target/debug/pgcat "fake_configs" 2>/dev/null) 8 | 9 | # Start PgCat with a particular log level 10 | # for inspection. 11 | function start_pgcat() { 12 | kill -s SIGINT $(pgrep pgcat) || true 13 | RUST_LOG=${1} ./target/debug/pgcat .circleci/pgcat.toml & 14 | sleep 1 15 | } 16 | 17 | # Setup the database with shards and user 18 | PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 5432 -U postgres -f tests/sharding/query_routing_setup.sql 19 | PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 7432 -U postgres -f tests/sharding/query_routing_setup.sql 20 | PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 8432 -U postgres -f tests/sharding/query_routing_setup.sql 21 | PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 9432 -U postgres -f tests/sharding/query_routing_setup.sql 22 | PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 10432 -U postgres -f tests/sharding/query_routing_setup.sql 23 | 24 | PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard0 -i 25 | PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard1 -i 26 | PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard2 -i 27 | 28 | # Start Toxiproxy 29 | kill -9 $(pgrep toxiproxy) || true 30 | LOG_LEVEL=error toxiproxy-server & 31 | sleep 1 32 | 33 | # Create a database at port 5433, forward it to Postgres 34 | toxiproxy-cli create -l 127.0.0.1:5433 -u 127.0.0.1:5432 postgres_replica 35 | 36 | start_pgcat "info" 37 | 38 | # Check that prometheus is running 39 | curl --fail localhost:9930/metrics 40 | 41 | export PGPASSWORD=sharding_user 42 | export PGDATABASE=sharded_db 43 | 44 | # pgbench test 45 | pgbench -U sharding_user -i -h 127.0.0.1 -p 6432 46 | pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol simple -f tests/pgbench/simple.sql 47 | pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol extended 48 | 49 | # COPY TO STDOUT test 50 | psql -U sharding_user -h 127.0.0.1 -p 6432 -c 'COPY (SELECT * FROM pgbench_accounts LIMIT 15) TO STDOUT;' > /dev/null 51 | 52 | # Query cancellation test 53 | (psql -U sharding_user -h 127.0.0.1 -p 6432 -c 'SELECT pg_sleep(50)' || true) & 54 | sleep 1 55 | killall psql -s SIGINT 56 | 57 | # Pause/resume test. 58 | # Running benches before, during, and after pause/resume. 59 | pgbench -U sharding_user -t 500 -c 2 -h 127.0.0.1 -p 6432 --protocol extended & 60 | BENCH_ONE=$! 61 | PGPASSWORD=admin_pass psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'PAUSE sharded_db,sharding_user' 62 | pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol extended & 63 | BENCH_TWO=$! 64 | PGPASSWORD=admin_pass psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'RESUME sharded_db,sharding_user' 65 | wait ${BENCH_ONE} 66 | wait ${BENCH_TWO} 67 | 68 | # Reload pool (closing unused server connections) 69 | PGPASSWORD=admin_pass psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'RELOAD' 70 | 71 | (psql -U sharding_user -h 127.0.0.1 -p 6432 -c 'SELECT pg_sleep(50)' || true) & 72 | sleep 1 73 | killall psql -s SIGINT 74 | 75 | # Sharding insert 76 | psql -U sharding_user -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_insert.sql 77 | 78 | # Sharding select 79 | psql -U sharding_user -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_select.sql > /dev/null 80 | 81 | # Replica/primary selection & more sharding tests 82 | psql -U sharding_user -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_primary_replica.sql > /dev/null 83 | 84 | # Statement timeout tests 85 | sed -i 's/statement_timeout = 0/statement_timeout = 100/' .circleci/pgcat.toml 86 | kill -SIGHUP $(pgrep pgcat) # Reload config 87 | sleep 0.2 88 | 89 | # This should timeout 90 | (! psql -U sharding_user -e -h 127.0.0.1 -p 6432 -c 'select pg_sleep(0.5)') 91 | 92 | # Disable statement timeout 93 | sed -i 's/statement_timeout = 100/statement_timeout = 0/' .circleci/pgcat.toml 94 | kill -SIGHUP $(pgrep pgcat) # Reload config again 95 | 96 | # 97 | # Integration tests and ActiveRecord tests 98 | # 99 | cd tests/ruby 100 | sudo bundle install 101 | bundle exec ruby tests.rb --format documentation || exit 1 102 | bundle exec rspec *_spec.rb --format documentation || exit 1 103 | cd ../.. 104 | 105 | # 106 | # Python tests 107 | # These tests will start and stop the pgcat server so it will need to be restarted after the tests 108 | # 109 | pip3 install -r tests/python/requirements.txt 110 | pytest || exit 1 111 | 112 | 113 | # 114 | # Go tests 115 | # Starts its own pgcat server 116 | # 117 | pushd tests/go 118 | /usr/local/go/bin/go test || exit 1 119 | popd 120 | 121 | start_pgcat "info" 122 | 123 | # 124 | # Rust tests 125 | # 126 | cd tests/rust 127 | cargo run 128 | cd ../../ 129 | 130 | # Admin tests 131 | export PGPASSWORD=admin_pass 132 | psql -U admin_user -e -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW STATS' > /dev/null 133 | psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'RELOAD' > /dev/null 134 | psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW CONFIG' > /dev/null 135 | psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW DATABASES' > /dev/null 136 | psql -U admin_user -h 127.0.0.1 -p 6432 -d pgcat -c 'SHOW LISTS' > /dev/null 137 | psql -U admin_user -h 127.0.0.1 -p 6432 -d pgcat -c 'SHOW POOLS' > /dev/null 138 | psql -U admin_user -h 127.0.0.1 -p 6432 -d pgcat -c 'SHOW VERSION' > /dev/null 139 | psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c "SET client_encoding TO 'utf8'" > /dev/null # will ignore 140 | (! psql -U admin_user -e -h 127.0.0.1 -p 6432 -d random_db -c 'SHOW STATS' > /dev/null) 141 | export PGPASSWORD=sharding_user 142 | 143 | # Start PgCat in debug to demonstrate failover better 144 | start_pgcat "trace" 145 | 146 | # Add latency to the replica at port 5433 slightly above the healthcheck timeout 147 | toxiproxy-cli toxic add -t latency -a latency=300 postgres_replica 148 | sleep 1 149 | 150 | # Note the failover in the logs 151 | timeout 5 psql -U sharding_user -e -h 127.0.0.1 -p 6432 <<-EOF 152 | SELECT 1; 153 | SELECT 1; 154 | SELECT 1; 155 | EOF 156 | 157 | # Remove latency 158 | toxiproxy-cli toxic remove --toxicName latency_downstream postgres_replica 159 | 160 | start_pgcat "info" 161 | 162 | # Test session mode (and config reload) 163 | sed -i '0,/simple_db/s/pool_mode = "transaction"/pool_mode = "session"/' .circleci/pgcat.toml 164 | 165 | # Reload config test 166 | kill -SIGHUP $(pgrep pgcat) 167 | 168 | # Revert settings after reload. Makes test runs idempotent 169 | sed -i '0,/simple_db/s/pool_mode = "session"/pool_mode = "transaction"/' .circleci/pgcat.toml 170 | 171 | sleep 1 172 | 173 | # Prepared statements that will only work in session mode 174 | pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol prepared 175 | 176 | # Attempt clean shut down 177 | killall pgcat -s SIGINT 178 | 179 | # Allow for graceful shutdown 180 | sleep 1 181 | 182 | kill -9 $(pgrep toxiproxy) 183 | sleep 1 184 | -------------------------------------------------------------------------------- /.circleci/server.cert: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDazCCAlOgAwIBAgIUChIvUGFJGJe5EDch32rchqoxER0wDQYJKoZIhvcNAQEL 3 | BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM 4 | GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMjA2MjcyMjI2MDZaFw0yMjA3 5 | MjcyMjI2MDZaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw 6 | HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB 7 | AQUAA4IBDwAwggEKAoIBAQDdTwrBzV1v79faVckFvIn/9V4fypYs4vDi3X+h3wGn 8 | AjEh6mmizlKCwSwAam07D9Q5zKiXFrzNJqzSioOv5zsOAvObwrnzbtKSwfs3aP5g 9 | eEh2clHCZYx9p06WszPcgSB5nTz1NeY4XAwvGn3A+SVCLyPMTNwnem48+ONh2F9u 10 | FHtSuIsEVvTjMlH09O7LjwJlODxy3HNv2JHYM5Hx9tzc+NVYdERPtaVcX8ycw1Eh 11 | 9hgGSgfaNM52/JfRMIDhENrsn0S1omRUtcJe72loreiwrECUOLAnAfp9Xqc+rMPP 12 | aLA6ElzmYef1+ZEC0p6isCHPhxY5ESVhKYhE9nQvksjnAgMBAAGjUzBRMB0GA1Ud 13 | DgQWBBQLDtzexqjx7xPtUZuZB/angU9oSDAfBgNVHSMEGDAWgBQLDtzexqjx7xPt 14 | UZuZB/angU9oSDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQC/ 15 | mxY/a/WeLENVj2Gg9EUH0CKzfqeTey1mb6YfPGxzrD7oq1m0Vn2MmTbjZrJgh/Ob 16 | QckO3ElF4kC9+6XP+iDPmabGpjeLgllBboT5l2aqnD1syMrf61WPLzgRzRfplYGy 17 | cjBQDDKPu8Lu0QRMWU28tHYN0bMxJoCuXysGGX5WsuFnKCA6f/V+nycJJXxJH3eB 18 | eLjTueD9/RE3OXhi6m8A29Q1E9AE5EF4uRxYXrr91BmYnk4aFvSmBxhUEzE12eSN 19 | lHB/uSc0+Dp+UVmVr6wW8AQfd16UBA0BUf3kSW3aSvirYPYH0rXiOOpEJgOwOMnR 20 | f5+XAbN1Y+3OsFz/ZmP9 21 | -----END CERTIFICATE----- 22 | -------------------------------------------------------------------------------- /.circleci/server.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDdTwrBzV1v79fa 3 | VckFvIn/9V4fypYs4vDi3X+h3wGnAjEh6mmizlKCwSwAam07D9Q5zKiXFrzNJqzS 4 | ioOv5zsOAvObwrnzbtKSwfs3aP5geEh2clHCZYx9p06WszPcgSB5nTz1NeY4XAwv 5 | Gn3A+SVCLyPMTNwnem48+ONh2F9uFHtSuIsEVvTjMlH09O7LjwJlODxy3HNv2JHY 6 | M5Hx9tzc+NVYdERPtaVcX8ycw1Eh9hgGSgfaNM52/JfRMIDhENrsn0S1omRUtcJe 7 | 72loreiwrECUOLAnAfp9Xqc+rMPPaLA6ElzmYef1+ZEC0p6isCHPhxY5ESVhKYhE 8 | 9nQvksjnAgMBAAECggEAbnvddO9frFhivJ+DIhgEFQKcIOb0nigV9kx6QYehvYy8 9 | lp/+aMb0Lk7d9r8rFQdL/icMK5GwZALg2KNKJvEbbF1Q3PwT9VHoUlgBYKJMDEFA 10 | e9GKu7ASuVBjTZzdUUItwkkbe5eS/aQGeSWSjlpTnX0HNCFS72qRymK+scRhsAQf 11 | ZoHyZHDslkvPR3Pos+sndWBYCDHag5/KoPhsMt1+5S9NQcOUHx9Ac0gLHjau3N+P 12 | 0FhODHFFGnnpyQvLvj6u3ZOR34ladMgoBglE0O3vPFhckn92EK4teeTWOsUMotiz 13 | qM3QIJTOJjtiY6VDGY93bIa4pFvt7Zi4vIerenKt0QKBgQD/UMFqfevTAMrk10AC 14 | bOa4+cM07ORY4ZwVj5ILhZn+8crDEEtBsUyuEU2FTINtnoEq1yGc/IXpsyS1BHjL 15 | L1xSml5LN3jInbi8z5XQfY5Sj3VOMtwY6yD20jcdeDC44rz3nStXdkcMWxbTMapx 16 | iOPsap5ciUKOMS7LyMidPEG/LQKBgQDd5vHgrLN0FBIIm+vZg6MEm4QyobstVp4l 17 | 7V/GZsdL+M8AQv1Rx+5wSUSWKomOIv5lglis7f6g0c9O7Qkr78/wzoyoKC2RRqPp 18 | I90GjY2Iv22N4GIkRrDAgMZbkTitzIB6tbXEVeLAOh3frFJ8IwauRCOiXIjrZdJ4 19 | FvV86+nU4wKBgQDdWTP2kWkMrBk7QOp7r9Jv+AmnLuHhtOdPQgOJ/bA++X2ik9PL 20 | Bl3GY7XjpSwks1CkxZKcucmXjPp7/X6EGXFfI/owF82dkDADca0e7lufdERtIWb0 21 | K5WOpz2lTPhgsiLGQfq7fw2lxqsJOnvcpqOD6gOVkmKjSDyb7F0RBJazmQKBgQDD 22 | a8PQTcesjpBjLI3EfX1vbVY7ENu6zfFxDV+vZoxVh8UlQdm90AlYse3JIaUKnB7W 23 | Xrihcucv0hZ0N6RAIW5LcFvHK7sVmdR4WbEpODhRGeTtcZJ8yBSZM898jKQRy2vK 24 | pYRyaADNsWDlvujVkjMr/a40KrIaPQ3h3LZNUaYYaQKBgQD1x8A5S5SiE1cN1vFr 25 | aACkmA2WqEDKKhUsUigJdwW6WB/B9kWlIlz/iV1H9uwBXtSIYG4VqCSTAvh0z4gX 26 | Qu2SrdPm5PYnKzpdynpz78OnGdflD1RKWFGHItR6GN6tj/VmulO6mlFvT4jzBQ7j 27 | +Hf8m2TcD4U3ksz3xw+YOD+cmA== 28 | -----END RSA PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | target/ 2 | tests/ 3 | tracing/ 4 | .circleci/ 5 | .git/ 6 | dev/ 7 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | trim_trailing_whitespace = true 5 | insert_final_newline = true 6 | 7 | [*.rs] 8 | indent_style = space 9 | indent_size = 4 10 | max_line_length = 120 11 | 12 | [*.toml] 13 | indent_style = space 14 | indent_size = 2 15 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "cargo" 4 | directory: "/" 5 | schedule: 6 | interval: "daily" 7 | time: "04:00" # UTC 8 | labels: 9 | - "dependencies" 10 | commit-message: 11 | prefix: "chore(deps)" 12 | open-pull-requests-limit: 10 13 | - package-ecosystem: "github-actions" 14 | directory: "/" 15 | schedule: 16 | interval: "weekly" 17 | -------------------------------------------------------------------------------- /.github/workflows/build-and-push.yaml: -------------------------------------------------------------------------------- 1 | name: Build and Push 2 | 3 | on: 4 | push: 5 | paths: 6 | - '!charts/**.md' 7 | branches: 8 | - main 9 | tags: 10 | - v* 11 | 12 | env: 13 | registry: ghcr.io 14 | image-name: ${{ github.repository }} 15 | 16 | jobs: 17 | build-and-push: 18 | runs-on: ubuntu-latest 19 | 20 | permissions: 21 | contents: read 22 | packages: write 23 | 24 | steps: 25 | - name: Checkout Repository 26 | uses: actions/checkout@v4 27 | 28 | - name: Set up QEMU 29 | uses: docker/setup-qemu-action@v3 30 | 31 | - name: Set up Docker Buildx 32 | uses: docker/setup-buildx-action@v3 33 | 34 | - name: Determine tags 35 | id: metadata 36 | uses: docker/metadata-action@v5 37 | with: 38 | images: ${{ env.registry }}/${{ env.image-name }} 39 | tags: | 40 | type=sha,prefix=,format=long 41 | type=schedule 42 | type=ref,event=tag 43 | type=ref,event=branch 44 | type=ref,event=pr 45 | type=raw,value=latest,enable={{ is_default_branch }} 46 | 47 | - name: Log in to the Container registry 48 | uses: docker/login-action@v3 49 | with: 50 | registry: ${{ env.registry }} 51 | username: ${{ github.actor }} 52 | password: ${{ secrets.GITHUB_TOKEN }} 53 | 54 | - name: Build and push ${{ env.image-name }} 55 | uses: docker/build-push-action@v6 56 | with: 57 | context: . 58 | platforms: linux/amd64,linux/arm64 59 | provenance: false 60 | push: true 61 | tags: ${{ steps.metadata.outputs.tags }} 62 | labels: ${{ steps.metadata.outputs.labels }} 63 | cache-from: type=gha 64 | cache-to: type=gha,mode=max 65 | 66 | concurrency: 67 | group: ${{ github.ref }} 68 | cancel-in-progress: true 69 | -------------------------------------------------------------------------------- /.github/workflows/chart-lint-test.yaml: -------------------------------------------------------------------------------- 1 | name: Lint and Test Charts 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - charts/** 7 | - '!charts/**.md' 8 | jobs: 9 | lint-test: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout 13 | uses: actions/checkout@v3.1.0 14 | with: 15 | fetch-depth: 0 16 | 17 | - name: Set up Helm 18 | uses: azure/setup-helm@v3 19 | with: 20 | version: v3.8.1 21 | 22 | # Python is required because `ct lint` runs Yamale (https://github.com/23andMe/Yamale) and 23 | # yamllint (https://github.com/adrienverge/yamllint) which require Python 24 | - name: Set up Python 25 | uses: actions/setup-python@v5.1.0 26 | with: 27 | python-version: 3.7 28 | 29 | - name: Set up chart-testing 30 | uses: helm/chart-testing-action@v2.2.1 31 | with: 32 | version: v3.5.1 33 | 34 | - name: Run chart-testing (list-changed) 35 | id: list-changed 36 | run: | 37 | changed=$(ct list-changed --config ct.yaml) 38 | if [[ -n "$changed" ]]; then 39 | echo "changed=true" >> $GITHUB_OUTPUT 40 | fi 41 | 42 | - name: Run chart-testing (lint) 43 | run: ct lint --config ct.yaml 44 | 45 | - name: Create kind cluster 46 | uses: helm/kind-action@v1.10.0 47 | if: steps.list-changed.outputs.changed == 'true' 48 | 49 | - name: Run chart-testing (install) 50 | run: ct install --config ct.yaml 51 | -------------------------------------------------------------------------------- /.github/workflows/chart-release.yaml: -------------------------------------------------------------------------------- 1 | name: Release Charts 2 | 3 | on: 4 | push: 5 | paths: 6 | - charts/** 7 | - '!**.md' 8 | branches: 9 | - main 10 | 11 | jobs: 12 | release: 13 | runs-on: ubuntu-latest 14 | 15 | permissions: 16 | contents: write 17 | 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 21 | with: 22 | fetch-depth: 0 23 | 24 | - name: Configure Git 25 | run: | 26 | git config user.name "$GITHUB_ACTOR" 27 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com" 28 | 29 | - name: Install Helm 30 | uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5 31 | with: 32 | version: v3.13.0 33 | 34 | - name: Run chart-releaser 35 | uses: helm/chart-releaser-action@a917fd15b20e8b64b94d9158ad54cd6345335584 # v1.6.0 36 | with: 37 | charts_dir: charts 38 | config: cr.yaml 39 | env: 40 | CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 41 | -------------------------------------------------------------------------------- /.github/workflows/generate-chart-readme.yaml: -------------------------------------------------------------------------------- 1 | name: '[CI/CD] Update README metadata' 2 | 3 | on: 4 | pull_request_target: 5 | branches: 6 | - main 7 | paths: 8 | - 'charts/*/values.yaml' 9 | # Remove all permissions by default 10 | permissions: {} 11 | jobs: 12 | update-readme-metadata: 13 | runs-on: ubuntu-latest 14 | permissions: 15 | contents: write 16 | steps: 17 | - name: Install readme-generator-for-helm 18 | run: npm install -g @bitnami/readme-generator-for-helm 19 | - name: Checkout 20 | uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 21 | with: 22 | path: charts 23 | ref: ${{github.event.pull_request.head.ref}} 24 | repository: ${{github.event.pull_request.head.repo.full_name}} 25 | token: ${{ secrets.GITHUB_TOKEN }} 26 | - name: Execute readme-generator-for-helm 27 | env: 28 | DIFF_URL: "${{github.event.pull_request.diff_url}}" 29 | TEMP_FILE: "${{runner.temp}}/pr-${{github.event.number}}.diff" 30 | run: | 31 | # This request doesn't consume API calls. 32 | curl -Lkso $TEMP_FILE $DIFF_URL 33 | files_changed="$(sed -nr 's/[\-\+]{3} [ab]\/(.*)/\1/p' $TEMP_FILE | sort | uniq)" 34 | # Adding || true to avoid "Process exited with code 1" errors 35 | charts_dirs_changed="$(echo "$files_changed" | xargs dirname | grep -o "pgcat/[^/]*" | sort | uniq || true)" 36 | for chart in ${charts_dirs_changed}; do 37 | echo "Updating README.md for ${chart}" 38 | readme-generator --values "charts/${chart}/values.yaml" --readme "charts/${chart}/README.md" --schema "/tmp/schema.json" 39 | done 40 | - name: Push changes 41 | run: | 42 | # Push all the changes 43 | cd charts 44 | if git status -s | grep pgcat; then 45 | git config user.name "$GITHUB_ACTOR" 46 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com" 47 | git add . && git commit -am "Update README.md with readme-generator-for-helm" --signoff && git push 48 | fi 49 | -------------------------------------------------------------------------------- /.github/workflows/publish-ci-docker-image.yml: -------------------------------------------------------------------------------- 1 | name: publish-ci-docker-image 2 | on: 3 | push: 4 | branches: [ main ] 5 | jobs: 6 | publish-ci-docker-image: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v2 10 | - name: Login to GitHub Container Registry 11 | uses: docker/login-action@v1 12 | with: 13 | registry: ghcr.io 14 | username: ${{ github.actor }} 15 | password: ${{ secrets.GITHUB_TOKEN }} 16 | - name: Build CI Docker image 17 | run: | 18 | docker build . -f Dockerfile.ci --tag ghcr.io/postgresml/pgcat-ci:latest 19 | docker run ghcr.io/postgresml/pgcat-ci:latest 20 | docker push ghcr.io/postgresml/pgcat-ci:latest 21 | -------------------------------------------------------------------------------- /.github/workflows/publish-deb-package.yml: -------------------------------------------------------------------------------- 1 | name: pgcat package (deb) 2 | 3 | on: 4 | push: 5 | tags: 6 | - v* 7 | workflow_dispatch: 8 | inputs: 9 | packageVersion: 10 | default: "1.1.2-dev1" 11 | jobs: 12 | build: 13 | strategy: 14 | max-parallel: 1 15 | fail-fast: false # Let the other job finish, or they can lock each other out 16 | matrix: 17 | os: ["buildjet-4vcpu-ubuntu-2204", "buildjet-4vcpu-ubuntu-2204-arm"] 18 | 19 | runs-on: ${{ matrix.os }} 20 | steps: 21 | - uses: actions/checkout@v3 22 | - name: Set package version 23 | if: github.event_name == 'push' # For push event 24 | run: | 25 | TAG=${{ github.ref_name }} 26 | echo "packageVersion=${TAG#v}" >> "$GITHUB_ENV" 27 | - name: Set package version (manual dispatch) 28 | if: github.event_name == 'workflow_dispatch' # For manual dispatch 29 | run: echo "packageVersion=${{ github.event.inputs.packageVersion }}" >> "$GITHUB_ENV" 30 | - uses: actions-rs/toolchain@v1 31 | with: 32 | toolchain: stable 33 | - name: Install dependencies 34 | env: 35 | DEBIAN_FRONTEND: noninteractive 36 | TZ: Etc/UTC 37 | run: | 38 | curl -sLO https://github.com/deb-s3/deb-s3/releases/download/0.11.4/deb-s3-0.11.4.gem 39 | sudo gem install deb-s3-0.11.4.gem 40 | dpkg-deb --version 41 | - name: Build and release package 42 | env: 43 | AWS_ACCESS_KEY_ID: ${{ vars.AWS_ACCESS_KEY_ID }} 44 | AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 45 | AWS_DEFAULT_REGION: ${{ vars.AWS_DEFAULT_REGION }} 46 | run: | 47 | if [[ $(arch) == "x86_64" ]]; then 48 | export ARCH=amd64 49 | else 50 | export ARCH=arm64 51 | fi 52 | 53 | bash utilities/deb.sh ${{ env.packageVersion }} 54 | 55 | deb-s3 upload \ 56 | --lock \ 57 | --bucket apt.postgresml.org \ 58 | pgcat-${{ env.packageVersion }}-ubuntu22.04-${ARCH}.deb \ 59 | --codename $(lsb_release -cs) 60 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | /target 3 | *.deb 4 | .vscode 5 | *.profraw 6 | cov/ 7 | lcov.info 8 | 9 | # Dev 10 | dev/.bash_history 11 | dev/cache 12 | !dev/cache/.keepme 13 | .venv 14 | **/__pycache__ 15 | .bundle -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" 2 | hard_tabs = false 3 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## Introduction 2 | 3 | Thank you for contributing! Just a few tips here: 4 | 5 | 1. `cargo fmt` and `cargo clippy` your code before opening up a PR 6 | 2. Run the test suite (e.g. `pgbench`) to make sure everything still works. The tests are in `.circleci/run_tests.sh`. 7 | 3. Performance is important, make sure there are no regressions in your branch vs. `main`. 8 | 9 | ## How to run the integration tests locally and iterate on them 10 | We have integration tests written in Ruby, Python, Go and Rust. 11 | Below are the steps to run them in a developer-friendly way that allows iterating and quick turnaround. 12 | Hear me out, this should be easy, it will involve opening a shell into a container with all the necessary dependancies available for you and you can modify the test code and immediately rerun your test in the interactive shell. 13 | 14 | 15 | Quite simply, make sure you have docker installed and then run 16 | `./start_test_env.sh` 17 | 18 | That is it! 19 | 20 | Within this test environment you can modify the file in your favorite IDE and rerun the tests without having to bootstrap the entire environment again. 21 | 22 | Once the environment is ready, you can run the tests by running 23 | Ruby: `cd /app/tests/ruby && bundle exec ruby .rb --format documentation` 24 | Python: `cd /app/ && pytest` 25 | Rust: `cd /app/tests/rust && cargo run` 26 | Go: `cd /app/tests/go && /usr/local/go/bin/go test` 27 | 28 | You can also rebuild PgCat directly within the environment and the tests will run against the newly built binary 29 | To rebuild PgCat, just run `cargo build` within the container under `/app` 30 | 31 | ![Animated gif showing how to run tests](https://github.com/user-attachments/assets/2258fde3-2aed-4efb-bdc5-e4f12dcd4d33) 32 | 33 | 34 | 35 | Happy hacking! 36 | 37 | ## TODOs 38 | 39 | See [Issues]([url](https://github.com/levkk/pgcat/issues)). 40 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pgcat" 3 | version = "1.3.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | [dependencies] 8 | tokio = { version = "1", features = ["full"] } 9 | bytes = "1" 10 | md-5 = "0.10" 11 | bb8 = "=0.8.6" 12 | async-trait = "0.1" 13 | rand = "0.8" 14 | chrono = "0.4" 15 | sha-1 = "0.10" 16 | toml = "0.7" 17 | serde = { version = "1", features = ["derive"] } 18 | serde_derive = "1" 19 | regex = "1" 20 | num_cpus = "1" 21 | once_cell = "1" 22 | sqlparser = { version = "0.52", features = ["visitor"] } 23 | log = "0.4" 24 | arc-swap = "1" 25 | parking_lot = "0.12.1" 26 | hmac = "0.12" 27 | sha2 = "0.10" 28 | base64 = "0.21" 29 | stringprep = "0.1" 30 | tokio-rustls = "0.24" 31 | rustls-pemfile = "1" 32 | http-body-util = "0.1.2" 33 | hyper = { version = "1.4.1", features = ["full"] } 34 | hyper-util = { version = "0.1.7", features = ["tokio"] } 35 | phf = { version = "0.11.1", features = ["macros"] } 36 | exitcode = "1.1.2" 37 | futures = "0.3" 38 | socket2 = { version = "0.4.7", features = ["all"] } 39 | nix = "0.26.2" 40 | atomic_enum = "0.2.0" 41 | postgres-protocol = "0.6.5" 42 | fallible-iterator = "0.2" 43 | pin-project = "1" 44 | webpki-roots = "0.23" 45 | rustls = { version = "0.21", features = ["dangerous_configuration"] } 46 | trust-dns-resolver = "0.22.0" 47 | tokio-test = "0.4.2" 48 | serde_json = "1" 49 | itertools = "0.10" 50 | clap = { version = "4.3.1", features = ["derive", "env"] } 51 | tracing = "0.1.37" 52 | tracing-subscriber = { version = "0.3.17", features = [ 53 | "json", 54 | "env-filter", 55 | "std", 56 | ] } 57 | lru = "0.12.0" 58 | mini-moka = "0.10.3" 59 | 60 | [target.'cfg(not(target_env = "msvc"))'.dependencies] 61 | jemallocator = "0.5.0" 62 | 63 | [dev-dependencies] 64 | serial_test = "*" 65 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.81.0-slim-bookworm AS builder 2 | 3 | RUN apt-get update && \ 4 | apt-get install -y build-essential 5 | 6 | COPY . /app 7 | WORKDIR /app 8 | RUN cargo build --release 9 | 10 | FROM debian:bookworm-slim 11 | RUN apt-get update && apt-get install -o Dpkg::Options::=--force-confdef -yq --no-install-recommends \ 12 | postgresql-client \ 13 | # Clean up layer 14 | && apt-get clean \ 15 | && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \ 16 | && truncate -s 0 /var/log/*log 17 | COPY --from=builder /app/target/release/pgcat /usr/bin/pgcat 18 | COPY --from=builder /app/pgcat.toml /etc/pgcat/pgcat.toml 19 | WORKDIR /etc/pgcat 20 | ENV RUST_LOG=info 21 | CMD ["pgcat"] 22 | STOPSIGNAL SIGINT 23 | -------------------------------------------------------------------------------- /Dockerfile.ci: -------------------------------------------------------------------------------- 1 | FROM cimg/rust:1.81.0 2 | COPY --from=sclevine/yj /bin/yj /bin/yj 3 | RUN /bin/yj -h 4 | RUN sudo apt-get update && \ 5 | sudo apt-get install -y \ 6 | psmisc postgresql-contrib-14 postgresql-client-14 libpq-dev \ 7 | ruby ruby-dev python3 python3-pip \ 8 | lcov llvm-11 iproute2 && \ 9 | sudo apt-get upgrade curl && \ 10 | cargo install cargo-binutils rustfilt && \ 11 | rustup component add llvm-tools-preview && \ 12 | pip3 install psycopg2 && sudo gem install bundler && \ 13 | wget -O /tmp/toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \ 14 | sudo dpkg -i /tmp/toxiproxy-2.4.0.deb 15 | RUN wget -O /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz https://go.dev/dl/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \ 16 | sudo tar -C /usr/local -xzf /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \ 17 | rm /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz 18 | -------------------------------------------------------------------------------- /Dockerfile.dev: -------------------------------------------------------------------------------- 1 | FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef 2 | 3 | RUN apt-get update && \ 4 | apt-get install -y build-essential 5 | 6 | WORKDIR /app 7 | 8 | FROM chef AS planner 9 | COPY . . 10 | RUN cargo chef prepare --recipe-path recipe.json 11 | 12 | FROM chef AS builder 13 | COPY --from=planner /app/recipe.json recipe.json 14 | # Build dependencies - this is the caching Docker layer! 15 | RUN cargo chef cook --release --recipe-path recipe.json 16 | # Build application 17 | COPY . . 18 | RUN cargo build 19 | 20 | FROM debian:bookworm-slim 21 | COPY --from=builder /app/target/release/pgcat /usr/bin/pgcat 22 | COPY --from=builder /app/pgcat.toml /etc/pgcat/pgcat.toml 23 | WORKDIR /etc/pgcat 24 | ENV RUST_LOG=info 25 | CMD ["pgcat"] 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2023 PgCat Contributors 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining 4 | a copy of this software and associated documentation files (the 5 | "Software"), to deal in the Software without restriction, including 6 | without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to 8 | permit persons to whom the Software is furnished to do so, subject to 9 | the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be 12 | included in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 19 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 20 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /charts/pgcat/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /charts/pgcat/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: pgcat 3 | description: A Helm chart for PgCat a PostgreSQL pooler and proxy (like PgBouncer) with support for sharding, load balancing, failover and mirroring. 4 | maintainers: 5 | - name: PostgresML 6 | email: team@postgresml.org 7 | appVersion: "1.3.0" 8 | version: 0.2.5 9 | -------------------------------------------------------------------------------- /charts/pgcat/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if .Values.ingress.enabled }} 3 | {{- range $host := .Values.ingress.hosts }} 4 | {{- range .paths }} 5 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} 6 | {{- end }} 7 | {{- end }} 8 | {{- else if contains "NodePort" .Values.service.type }} 9 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "pgcat.fullname" . }}) 10 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 11 | echo http://$NODE_IP:$NODE_PORT 12 | {{- else if contains "LoadBalancer" .Values.service.type }} 13 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 14 | You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "pgcat.fullname" . }}' 15 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "pgcat.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") 16 | echo http://$SERVICE_IP:{{ .Values.service.port }} 17 | {{- else if contains "ClusterIP" .Values.service.type }} 18 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "pgcat.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 19 | export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") 20 | echo "Visit http://127.0.0.1:8080 to use your application" 21 | kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /charts/pgcat/templates/_config.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Configuration template definition 3 | */}} 4 | -------------------------------------------------------------------------------- /charts/pgcat/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "pgcat.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "pgcat.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "pgcat.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "pgcat.labels" -}} 37 | helm.sh/chart: {{ include "pgcat.chart" . }} 38 | {{ include "pgcat.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "pgcat.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "pgcat.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "pgcat.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "pgcat.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /charts/pgcat/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "pgcat.fullname" . }} 5 | labels: 6 | {{- include "pgcat.labels" . | nindent 4 }} 7 | spec: 8 | replicas: {{ .Values.replicaCount }} 9 | selector: 10 | matchLabels: 11 | {{- include "pgcat.selectorLabels" . | nindent 6 }} 12 | template: 13 | metadata: 14 | annotations: 15 | checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} 16 | {{- with .Values.podAnnotations }} 17 | {{- toYaml . | nindent 8 }} 18 | {{- end }} 19 | labels: 20 | {{- include "pgcat.selectorLabels" . | nindent 8 }} 21 | spec: 22 | {{- with .Values.image.pullSecrets }} 23 | imagePullSecrets: 24 | {{- toYaml . | nindent 8 }} 25 | {{- end }} 26 | serviceAccountName: {{ include "pgcat.serviceAccountName" . }} 27 | securityContext: 28 | {{- toYaml .Values.podSecurityContext | nindent 8 }} 29 | containers: 30 | - name: {{ .Chart.Name }} 31 | securityContext: 32 | {{- toYaml .Values.containerSecurityContext | nindent 12 }} 33 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 34 | imagePullPolicy: {{ .Values.image.pullPolicy }} 35 | ports: 36 | - name: pgcat 37 | containerPort: {{ .Values.configuration.general.port }} 38 | protocol: TCP 39 | livenessProbe: 40 | tcpSocket: 41 | port: pgcat 42 | readinessProbe: 43 | tcpSocket: 44 | port: pgcat 45 | resources: 46 | {{- toYaml .Values.resources | nindent 12 }} 47 | volumeMounts: 48 | - mountPath: /etc/pgcat 49 | name: config 50 | {{- with .Values.nodeSelector }} 51 | nodeSelector: 52 | {{- toYaml . | nindent 8 }} 53 | {{- end }} 54 | {{- with .Values.affinity }} 55 | affinity: 56 | {{- toYaml . | nindent 8 }} 57 | {{- end }} 58 | {{- with .Values.tolerations }} 59 | tolerations: 60 | {{- toYaml . | nindent 8 }} 61 | {{- end }} 62 | volumes: 63 | - secret: 64 | defaultMode: 420 65 | secretName: {{ include "pgcat.fullname" . }} 66 | name: config 67 | -------------------------------------------------------------------------------- /charts/pgcat/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "pgcat.fullname" . -}} 3 | {{- $svcPort := .Values.service.port -}} 4 | {{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} 5 | {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} 6 | {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} 7 | {{- end }} 8 | {{- end }} 9 | {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} 10 | apiVersion: networking.k8s.io/v1 11 | {{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} 12 | apiVersion: networking.k8s.io/v1beta1 13 | {{- else -}} 14 | apiVersion: extensions/v1beta1 15 | {{- end }} 16 | kind: Ingress 17 | metadata: 18 | name: {{ $fullName }} 19 | labels: 20 | {{- include "pgcat.labels" . | nindent 4 }} 21 | {{- with .Values.ingress.annotations }} 22 | annotations: 23 | {{- toYaml . | nindent 4 }} 24 | {{- end }} 25 | spec: 26 | {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} 27 | ingressClassName: {{ .Values.ingress.className }} 28 | {{- end }} 29 | {{- if .Values.ingress.tls }} 30 | tls: 31 | {{- range .Values.ingress.tls }} 32 | - hosts: 33 | {{- range .hosts }} 34 | - {{ . | quote }} 35 | {{- end }} 36 | secretName: {{ .secretName }} 37 | {{- end }} 38 | {{- end }} 39 | rules: 40 | {{- range .Values.ingress.hosts }} 41 | - host: {{ .host | quote }} 42 | http: 43 | paths: 44 | {{- range .paths }} 45 | - path: {{ .path }} 46 | {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} 47 | pathType: {{ .pathType }} 48 | {{- end }} 49 | backend: 50 | {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} 51 | service: 52 | name: {{ $fullName }} 53 | port: 54 | number: {{ $svcPort }} 55 | {{- else }} 56 | serviceName: {{ $fullName }} 57 | servicePort: {{ $svcPort }} 58 | {{- end }} 59 | {{- end }} 60 | {{- end }} 61 | {{- end }} 62 | -------------------------------------------------------------------------------- /charts/pgcat/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: {{ include "pgcat.fullname" . }} 5 | labels: 6 | {{- include "pgcat.labels" . | nindent 4 }} 7 | type: Opaque 8 | stringData: 9 | pgcat.toml: | 10 | [general] 11 | host = {{ .Values.configuration.general.host | quote }} 12 | port = {{ .Values.configuration.general.port }} 13 | enable_prometheus_exporter = {{ .Values.configuration.general.enable_prometheus_exporter }} 14 | prometheus_exporter_port = {{ .Values.configuration.general.prometheus_exporter_port }} 15 | connect_timeout = {{ .Values.configuration.general.connect_timeout }} 16 | idle_timeout = {{ .Values.configuration.general.idle_timeout | int }} 17 | server_lifetime = {{ .Values.configuration.general.server_lifetime | int }} 18 | server_tls = {{ .Values.configuration.general.server_tls }} 19 | idle_client_in_transaction_timeout = {{ .Values.configuration.general.idle_client_in_transaction_timeout | int }} 20 | healthcheck_timeout = {{ .Values.configuration.general.healthcheck_timeout }} 21 | healthcheck_delay = {{ .Values.configuration.general.healthcheck_delay }} 22 | shutdown_timeout = {{ .Values.configuration.general.shutdown_timeout }} 23 | ban_time = {{ .Values.configuration.general.ban_time }} 24 | log_client_connections = {{ .Values.configuration.general.log_client_connections }} 25 | log_client_disconnections = {{ .Values.configuration.general.log_client_disconnections }} 26 | tcp_keepalives_idle = {{ .Values.configuration.general.tcp_keepalives_idle }} 27 | tcp_keepalives_count = {{ .Values.configuration.general.tcp_keepalives_count }} 28 | tcp_keepalives_interval = {{ .Values.configuration.general.tcp_keepalives_interval }} 29 | {{- if and (ne .Values.configuration.general.tls_certificate "-") (ne .Values.configuration.general.tls_private_key "-") }} 30 | tls_certificate = "{{ .Values.configuration.general.tls_certificate }}" 31 | tls_private_key = "{{ .Values.configuration.general.tls_private_key }}" 32 | {{- end }} 33 | admin_username = {{ .Values.configuration.general.admin_username | quote }} 34 | admin_password = {{ .Values.configuration.general.admin_password | quote }} 35 | {{- if and .Values.configuration.general.auth_query_user .Values.configuration.general.auth_query_password .Values.configuration.general.auth_query }} 36 | auth_query = {{ .Values.configuration.general.auth_query | quote }} 37 | auth_query_user = {{ .Values.configuration.general.auth_query_user | quote }} 38 | auth_query_password = {{ .Values.configuration.general.auth_query_password | quote }} 39 | {{- end }} 40 | 41 | {{- range $pool := .Values.configuration.pools }} 42 | 43 | ## 44 | ## pool for {{ $pool.name }} 45 | ## 46 | [pools.{{ $pool.name | quote }}] 47 | pool_mode = {{ default "transaction" $pool.pool_mode | quote }} 48 | load_balancing_mode = {{ default "random" $pool.load_balancing_mode | quote }} 49 | default_role = {{ default "any" $pool.default_role | quote }} 50 | prepared_statements_cache_size = {{ default 500 $pool.prepared_statements_cache_size }} 51 | query_parser_enabled = {{ default true $pool.query_parser_enabled }} 52 | query_parser_read_write_splitting = {{ default true $pool.query_parser_read_write_splitting }} 53 | primary_reads_enabled = {{ default true $pool.primary_reads_enabled }} 54 | db_activity_based_routing = {{ default false $pool.db_activity_based_routing }} 55 | db_activity_based_ms_init_delay = {{ default 100 $pool.db_activity_based_ms_init_delay }} 56 | db_activity_ttl = {{ default 900 $pool.db_activity_ttl }} 57 | table_mutation_cache_ttl = {{ default 50 $pool.table_mutation_cache_ttl }} 58 | sharding_function = {{ default "pg_bigint_hash" $pool.sharding_function | quote }} 59 | 60 | {{- range $index, $user := $pool.users }} 61 | 62 | ## pool {{ $pool.name }} user {{ $user.username | quote }} 63 | ## 64 | [pools.{{ $pool.name | quote }}.users.{{ $index }}] 65 | username = {{ $user.username | quote }} 66 | {{- if $user.password }} 67 | password = {{ $user.password | quote }} 68 | {{- else if and $user.passwordSecret.name $user.passwordSecret.key }} 69 | {{- $secret := (lookup "v1" "Secret" $.Release.Namespace $user.passwordSecret.name) }} 70 | {{- if $secret }} 71 | {{- $password := index $secret.data $user.passwordSecret.key | b64dec }} 72 | password = {{ $password | quote }} 73 | {{- end }} 74 | {{- end }} 75 | pool_size = {{ $user.pool_size }} 76 | statement_timeout = {{ default 0 $user.statement_timeout }} 77 | min_pool_size = {{ default 3 $user.min_pool_size }} 78 | {{- if $user.server_lifetime }} 79 | server_lifetime = {{ $user.server_lifetime }} 80 | {{- end }} 81 | {{- if and $user.server_username $user.server_password }} 82 | server_username = {{ $user.server_username | quote }} 83 | server_password = {{ $user.server_password | quote }} 84 | {{- end }} 85 | {{- end }} 86 | 87 | {{- range $index, $shard := $pool.shards }} 88 | 89 | ## pool {{ $pool.name }} database {{ $shard.database }} 90 | ## 91 | [pools.{{ $pool.name | quote }}.shards.{{ $index }}] 92 | {{- if gt (len $shard.servers) 0}} 93 | servers = [ 94 | {{- range $server := $shard.servers }} 95 | [ {{ $server.host | quote }}, {{ $server.port }}, {{ $server.role | quote }} ], 96 | {{- end }} 97 | ] 98 | {{- end }} 99 | database = {{ $shard.database | quote }} 100 | {{- end }} 101 | {{- end }} 102 | -------------------------------------------------------------------------------- /charts/pgcat/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "pgcat.fullname" . }} 5 | labels: 6 | {{- include "pgcat.labels" . | nindent 4 }} 7 | spec: 8 | type: {{ .Values.service.type }} 9 | ports: 10 | - port: {{ .Values.service.port }} 11 | targetPort: pgcat 12 | protocol: TCP 13 | name: pgcat 14 | selector: 15 | {{- include "pgcat.selectorLabels" . | nindent 4 }} 16 | -------------------------------------------------------------------------------- /charts/pgcat/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "pgcat.serviceAccountName" . }} 6 | labels: 7 | {{- include "pgcat.labels" . | nindent 4 }} 8 | {{- with .Values.serviceAccount.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /control: -------------------------------------------------------------------------------- 1 | Package: pgcat 2 | Version: ${PACKAGE_VERSION} 3 | Section: database 4 | Priority: optional 5 | Architecture: ${ARCH} 6 | Maintainer: PostgresML 7 | Homepage: https://postgresml.org 8 | Description: PgCat - NextGen PostgreSQL Pooler 9 | PostgreSQL pooler and proxy (like PgBouncer) with support for sharding, load balancing, failover and mirroring. 10 | -------------------------------------------------------------------------------- /cov-style.css: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2021 Collabora, Ltd. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining 5 | * a copy of this software and associated documentation files (the 6 | * "Software"), to deal in the Software without restriction, including 7 | * without limitation the rights to use, copy, modify, merge, publish, 8 | * distribute, sublicense, and/or sell copies of the Software, and to 9 | * permit persons to whom the Software is furnished to do so, subject to 10 | * the following conditions: 11 | * 12 | * The above copyright notice and this permission notice (including the 13 | * next paragraph) shall be included in all copies or substantial 14 | * portions of the Software. 15 | * 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 20 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 21 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 22 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 | * SOFTWARE. 24 | */ 25 | 26 | body { 27 | background-color: #f2f2f2; 28 | font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, 29 | "Noto Sans", Ubuntu, Cantarell, "Helvetica Neue", sans-serif, 30 | "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", 31 | "Noto Color Emoji"; 32 | } 33 | 34 | .sourceHeading, .source, .coverFn, 35 | .testName, .testPer, .testNum, 36 | .coverLegendCovLo, .headerCovTableEntryLo, .coverPerLo, .coverNumLo, 37 | .coverLegendCovMed, .headerCovTableEntryMed, .coverPerMed, .coverNumMed, 38 | .coverLegendCovHi, .headerCovTableEntryHi, .coverPerHi, .coverNumHi, 39 | .coverFile { 40 | font-family: "Menlo", "DejaVu Sans Mono", "Liberation Mono", 41 | "Consolas", "Ubuntu Mono", "Courier New", "andale mono", 42 | "lucida console", monospace; 43 | } 44 | 45 | pre { 46 | font-size: 0.7875rem; 47 | } 48 | 49 | .headerCovTableEntry, .testPer, .testNum, .testName, 50 | .coverLegendCovLo, .headerCovTableEntryLo, .coverPerLo, .coverNumLo, 51 | .coverLegendCovMed, .headerCovTableEntryMed, .coverPerMed, .coverNumMed, 52 | .coverLegendCovHi, .headerCovTableEntryHi, .coverPerHi, .coverNumHi { 53 | text-align: right; 54 | white-space: nowrap; 55 | } 56 | 57 | .coverPerLo, .coverPerMed, .coverPerHi, .testPer { 58 | /* font-weight: bold;*/ 59 | } 60 | 61 | .coverNumLo, .coverNumMed, .coverNumHi, .testNum { 62 | font-style: italic; 63 | font-size: 90%; 64 | padding-left: 1em; 65 | } 66 | 67 | .title { 68 | font-size: 200%; 69 | } 70 | 71 | .tableHead { 72 | text-align: center; 73 | font-weight: bold; 74 | background-color: #bfbfbf; 75 | } 76 | 77 | .coverFile, .coverBar, .coverFn { 78 | background-color: #d9d9d9; 79 | } 80 | 81 | .headerCovTableHead { 82 | font-weight: bold; 83 | text-align: right; 84 | } 85 | 86 | .headerCovTableEntry { 87 | background-color: #d9d9d9; 88 | } 89 | 90 | .coverFnLo, 91 | .coverLegendCovLo, .headerCovTableEntryLo, .coverPerLo, .coverNumLo { 92 | background-color: #f2dada; 93 | } 94 | 95 | .coverFnHi, 96 | .coverLegendCovMed, .headerCovTableEntryMed, .coverPerMed, .coverNumMed { 97 | background-color: #add9ad; 98 | } 99 | 100 | .coverLegendCovHi, .headerCovTableEntryHi, .coverPerHi, .coverNumHi { 101 | background-color: #59b359; 102 | } 103 | 104 | .coverBarOutline { 105 | border-style: solid; 106 | border-width: 1px; 107 | border-color: black; 108 | padding: 0px; 109 | } 110 | 111 | .coverFnLo, .coverFnHi { 112 | text-align: right; 113 | } 114 | 115 | .lineNum { 116 | background-color: #d9d9d9; 117 | } 118 | 119 | .coverLegendCov, .lineCov, .branchCov { 120 | background-image: url('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAABCAIAAABsYngUAAADAXpUWHRSYXcgcHJvZmlsZSB0eXBlIGV4aWYAAHjazZVbktwgDEX/WUWWgCSExHIwj6rsIMvPxcY9PY9MzVTyEVMNtCwkoYNwGL9+zvADDxHHkNQ8l5wjnlRS4YqJx+upZ08xnf313O/otTw8FBgzwShbP2/5gJyhz1vetp0KuT4ZKmO/OF6/qNsQ+3ZwO9yOhC4HcRsOdRsS3p7T9f+4thVzcXveQtv6sz5t1dfW0CUxzprJEvrE0SwXzJ1jMuStr0CPvhfqdvTmf7hVGTHxEJKI3leEsn4kFWNCT/CGfUnBXDEuyd4yaHGIhnm58/r581nk4Q59Y32N+p69Qc3xPelwJvRWkTeE8mP8UE76Ig/PSE9uT55z3jN+LZ/pJaibXLjxzdl9znHtrqaMLee9qXuL5wx6x8rWuSqjGX4afSV7tYLmKImGc9RxyA60RoUYGCcl6lRp0jjHRg0hJh4MjszcALcFCB0wCjcgJYBGo8kGzF0cB6DhOAik/IiFTrfldNfI4biTB5wegjHCkr9q4StKc66CIlq55CtXiItXwhHFIkeE6ocaiNDcSdUzwXd7+yyuAoJ6ptmxwRqPZQH4D6WXwyUnaIGiYrwKmKxvA0gRIlAEQwICMZMoZYrGHIwIiXQAqgidJfEBLKTKHUFyEsmAgyqAb6wxOlVZ+RLjIgQIlRzEwAaFCFgpKc6PJccZqiqaVDWrqWvRmiWvCsvZ8rpRq4klU8tm5lasBhdPrp7d3L14LVwEN64W1GPxUkqtcFphuWJ1hUKtBx9ypEOPfNjhRzlq49CkpaYtN2veSqudu3TUcc/duvfS66CBozTS0JGHDR9l1ImjNmWmqTNPmz5LmPVBbWN9175BjTY1PkktRXtQg9TsNkHrOtHFDMQ4EYDbIkASmBez6JQSL3KLWSyMqlBGkLrgdFrEQDANYp30YPdCToPkf8MtAAT/C3JhofsCuffcPqLW6/mhk5PQKsOV1CiovpHgnx3LcCvhwlnz9dF8P4Y/vfju+J8aQpZK+A373P3XzDqcKwAAAAZiS0dEAAAAAAAA+UO7fwAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAd0SU1FB+UEEQYyDQA04tUAAAAZdEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAADklEQVQI12PULVBlwAYAEagAxGHRDdwAAAAASUVORK5CYII='); 121 | background-repeat: repeat-y; 122 | background-position: left top; 123 | background-color: #c6ffb8; 124 | } 125 | 126 | .coverLegendNoCov, .lineNoCov, .branchNoCov, .branchNoExec { 127 | background-image: url('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAABCAIAAABsYngUAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAAB3RJTUUH5QMUCiMidNgp2gAAABl0RVh0Q29tbWVudABDcmVhdGVkIHdpdGggR0lNUFeBDhcAAAAPSURBVAjXY/wZIcWADQAAIa4BbZaExr0AAAAASUVORK5CYII='); 128 | background-repeat: repeat-y; 129 | background-position: left top; 130 | background-color: #ffcfbb; 131 | } 132 | 133 | .coverLegendCov, .coverLegendNoCov { 134 | padding: 0em 1em 0em 1em; 135 | } 136 | 137 | .headerItem, .headerValue, .headerValueLeg { 138 | white-space: nowrap; 139 | } 140 | 141 | .headerItem { 142 | text-align: right; 143 | font-weight: bold; 144 | } 145 | 146 | .ruler { 147 | background-color: #d9d9d9; 148 | } 149 | 150 | .detail { 151 | font-size: 80%; 152 | } 153 | 154 | .versionInfo { 155 | font-size: 80%; 156 | text-align: right; 157 | } 158 | 159 | -------------------------------------------------------------------------------- /cr.yaml: -------------------------------------------------------------------------------- 1 | sign: false 2 | pages_branch: main 3 | -------------------------------------------------------------------------------- /ct.yaml: -------------------------------------------------------------------------------- 1 | remote: origin 2 | target-branch: main 3 | chart-dirs: 4 | - charts 5 | 6 | -------------------------------------------------------------------------------- /dev/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:bullseye 2 | 3 | # Dependencies 4 | COPY --from=sclevine/yj /bin/yj /bin/yj 5 | RUN /bin/yj -h 6 | RUN apt-get update -y \ 7 | && apt-get install -y \ 8 | llvm-11 psmisc postgresql-contrib postgresql-client \ 9 | ruby ruby-dev libpq-dev python3 python3-pip lcov curl sudo iproute2 \ 10 | strace ngrep iproute2 dnsutils lsof net-tools telnet 11 | 12 | # Rust 13 | RUN cargo install cargo-binutils rustfilt 14 | RUN rustup component add llvm-tools-preview 15 | 16 | # Ruby 17 | RUN sudo gem install bundler 18 | 19 | # Toxyproxy 20 | RUN wget -O toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \ 21 | sudo dpkg -i toxiproxy-2.4.0.deb 22 | 23 | # Config 24 | ENV APP_ROOT=/app 25 | ARG APP_USER=pgcat 26 | COPY dev_bashrc /etc/bash.bashrc 27 | 28 | RUN useradd -m -o -u 999 ${APP_USER} || exit 0 && mkdir ${APP_ROOT} && chown ${APP_USER} ${APP_ROOT} 29 | RUN adduser ${APP_USER} sudo \ 30 | && echo "${APP_USER} ALL=NOPASSWD: ALL" > /etc/sudoers.d/${APP_USER} \ 31 | && chmod ugo+s /usr/sbin/usermod /usr/sbin/groupmod 32 | ENV HOME=${APP_ROOT} 33 | WORKDIR ${APP_ROOT} 34 | 35 | ENTRYPOINT ["/bin/bash"] 36 | -------------------------------------------------------------------------------- /dev/dev_bashrc: -------------------------------------------------------------------------------- 1 | # ~/.bashrc: executed by bash(1) for non-login shells. 2 | # see /usr/share/doc/bash/examples/startup-files (in the package bash-doc) 3 | # for examples 4 | 5 | # FIX USER NEEDED SO WE CAN SHARE UID BETWEEN HOST AND DEV ENV 6 | usermod -o -u $(id -u) pgcat 7 | groupmod -o -g $(id -g) pgcat 8 | 9 | # We fix the setuid in those commands as we now have sudo 10 | sudo chmod ugo-s /usr/sbin/usermod /usr/sbin/groupmod 11 | 12 | # Environment customization 13 | export DEV_ROOT="${APP_ROOT}/dev" 14 | export HISTFILE="${DEV_ROOT}/.bash_history" 15 | export CARGO_TARGET_DIR="${DEV_ROOT}/cache/target" 16 | export CARGO_HOME="${DEV_ROOT}/cache/target/.cargo" 17 | export BUNDLE_PATH="${DEV_ROOT}/cache/bundle" 18 | 19 | # Regular bashrc 20 | # If not running interactively, don't do anything 21 | case $- in 22 | *i*) ;; 23 | *) return;; 24 | esac 25 | 26 | # don't put duplicate lines or lines starting with space in the history. 27 | # See bash(1) for more options 28 | HISTCONTROL=ignoreboth 29 | 30 | # append to the history file, don't overwrite it 31 | shopt -s histappend 32 | 33 | # for setting history length see HISTSIZE and HISTFILESIZE in bash(1) 34 | HISTSIZE=1000 35 | HISTFILESIZE=2000 36 | 37 | # check the window size after each command and, if necessary, 38 | # update the values of LINES and COLUMNS. 39 | shopt -s checkwinsize 40 | 41 | # If set, the pattern "**" used in a pathname expansion context will 42 | # match all files and zero or more directories and subdirectories. 43 | #shopt -s globstar 44 | 45 | # make less more friendly for non-text input files, see lesspipe(1) 46 | [ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)" 47 | 48 | # set variable identifying the chroot you work in (used in the prompt below) 49 | if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then 50 | debian_chroot=$(cat /etc/debian_chroot) 51 | fi 52 | 53 | # set a fancy prompt (non-color, unless we know we "want" color) 54 | case "$TERM" in 55 | xterm-color|*-256color) color_prompt=yes;; 56 | esac 57 | 58 | # uncomment for a colored prompt, if the terminal has the capability; turned 59 | # off by default to not distract the user: the focus in a terminal window 60 | # should be on the output of commands, not on the prompt 61 | #force_color_prompt=yes 62 | 63 | if [ -n "$force_color_prompt" ]; then 64 | if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then 65 | # We have color support; assume it's compliant with Ecma-48 66 | # (ISO/IEC-6429). (Lack of such support is extremely rare, and such 67 | # a case would tend to support setf rather than setaf.) 68 | color_prompt=yes 69 | else 70 | color_prompt= 71 | fi 72 | fi 73 | 74 | PS1='\[\e]0;pgcat@dev-container\h: \w\a\]${debian_chroot:+($debian_chroot)}\[\033[01;32m\]pgcat\[\033[00m\]@\[\033[01;32m\]dev-container\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\[\033[01;31m\]$(git branch &>/dev/null; if [ $? -eq 0 ]; then echo " ($(git branch | grep ^* |sed s/\*\ //))"; fi)\[\033[00m\]\$ ' 75 | 76 | unset color_prompt force_color_prompt 77 | 78 | # enable color support of ls and also add handy aliases 79 | if [ -x /usr/bin/dircolors ]; then 80 | test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)" 81 | alias ls='ls --color=auto' 82 | #alias dir='dir --color=auto' 83 | #alias vdir='vdir --color=auto' 84 | 85 | alias grep='grep --color=auto' 86 | alias fgrep='fgrep --color=auto' 87 | alias egrep='egrep --color=auto' 88 | fi 89 | 90 | # colored GCC warnings and errors 91 | #export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01' 92 | 93 | # some more ls aliases 94 | alias ll='ls -alF' 95 | alias la='ls -A' 96 | alias l='ls -CF' 97 | 98 | # Add an "alert" alias for long running commands. Use like so: 99 | # sleep 10; alert 100 | alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"' 101 | 102 | # Alias definitions. 103 | # You may want to put all your additions into a separate file like 104 | # ~/.bash_aliases, instead of adding them here directly. 105 | # See /usr/share/doc/bash-doc/examples in the bash-doc package. 106 | 107 | if [ -f ~/.bash_aliases ]; then 108 | . ~/.bash_aliases 109 | fi 110 | 111 | # enable programmable completion features (you don't need to enable 112 | # this, if it's already enabled in /etc/bash.bashrc and /etc/profile 113 | # sources /etc/bash.bashrc). 114 | if ! shopt -oq posix; then 115 | if [ -f /usr/share/bash-completion/bash_completion ]; then 116 | . /usr/share/bash-completion/bash_completion 117 | elif [ -f /etc/bash_completion ]; then 118 | . /etc/bash_completion 119 | fi 120 | fi 121 | -------------------------------------------------------------------------------- /dev/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | x-common-definition-pg: 4 | &common-definition-pg 5 | image: postgres:14 6 | network_mode: "service:main" 7 | healthcheck: 8 | test: [ "CMD-SHELL", "pg_isready -U postgres -d postgres" ] 9 | interval: 5s 10 | timeout: 5s 11 | retries: 5 12 | volumes: 13 | - type: bind 14 | source: ../tests/sharding/query_routing_setup.sql 15 | target: /docker-entrypoint-initdb.d/query_routing_setup.sql 16 | - type: bind 17 | source: ../tests/sharding/partition_hash_test_setup.sql 18 | target: /docker-entrypoint-initdb.d/partition_hash_test_setup.sql 19 | 20 | x-common-env-pg: 21 | &common-env-pg 22 | POSTGRES_USER: postgres 23 | POSTGRES_DB: postgres 24 | POSTGRES_PASSWORD: postgres 25 | 26 | services: 27 | main: 28 | image: gcr.io/google_containers/pause:3.2 29 | ports: 30 | - 6432 31 | 32 | pg1: 33 | <<: *common-definition-pg 34 | environment: 35 | <<: *common-env-pg 36 | POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5 37 | PGPORT: 5432 38 | command: ["postgres", "-p", "5432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"] 39 | 40 | pg2: 41 | <<: *common-definition-pg 42 | environment: 43 | <<: *common-env-pg 44 | POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256 45 | PGPORT: 7432 46 | command: ["postgres", "-p", "7432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"] 47 | pg3: 48 | <<: *common-definition-pg 49 | environment: 50 | <<: *common-env-pg 51 | POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256 52 | PGPORT: 8432 53 | command: ["postgres", "-p", "8432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"] 54 | pg4: 55 | <<: *common-definition-pg 56 | environment: 57 | <<: *common-env-pg 58 | POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256 59 | PGPORT: 9432 60 | command: ["postgres", "-p", "9432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"] 61 | pg5: 62 | <<: *common-definition-pg 63 | environment: 64 | <<: *common-env-pg 65 | POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5 66 | PGPORT: 10432 67 | command: ["postgres", "-p", "10432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"] 68 | 69 | toxiproxy: 70 | build: . 71 | network_mode: "service:main" 72 | container_name: toxiproxy 73 | environment: 74 | LOG_LEVEL: info 75 | entrypoint: toxiproxy-server 76 | depends_on: 77 | - pg1 78 | - pg2 79 | - pg3 80 | - pg4 81 | - pg5 82 | 83 | pgcat-shell: 84 | stdin_open: true 85 | user: "${HOST_UID}:${HOST_GID}" 86 | build: . 87 | network_mode: "service:main" 88 | depends_on: 89 | - toxiproxy 90 | volumes: 91 | - ../:/app/ 92 | entrypoint: 93 | - /bin/bash 94 | - -i 95 | -------------------------------------------------------------------------------- /dev/script/console: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | export HOST_UID="$(id -u)" 5 | export HOST_GID="$(id -g)" 6 | 7 | if [[ "${1}" == "down" ]]; then 8 | docker-compose -f "${DIR}/../docker-compose.yaml" down 9 | exit 0 10 | else 11 | docker-compose -f "${DIR}/../docker-compose.yaml" run --rm pgcat-shell 12 | fi 13 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | postgres: 4 | image: postgres:14 5 | environment: 6 | POSTGRES_PASSWORD: postgres 7 | POSTGRES_HOST_AUTH_METHOD: md5 8 | pgcat: 9 | build: . 10 | command: 11 | - "pgcat" 12 | - "/etc/pgcat/pgcat.toml" 13 | volumes: 14 | - "${PWD}/examples/docker/pgcat.toml:/etc/pgcat/pgcat.toml" 15 | ports: 16 | - "6432:6432" 17 | - "9930:9930" 18 | -------------------------------------------------------------------------------- /examples/docker/pgcat.toml: -------------------------------------------------------------------------------- 1 | # 2 | # PgCat config example. 3 | # 4 | 5 | # 6 | # General pooler settings 7 | [general] 8 | # What IP to run on, 0.0.0.0 means accessible from everywhere. 9 | host = "0.0.0.0" 10 | 11 | # Port to run on, same as PgBouncer used in this example. 12 | port = 6432 13 | 14 | # Whether to enable prometheus exporter or not. 15 | enable_prometheus_exporter = true 16 | 17 | # Port at which prometheus exporter listens on. 18 | prometheus_exporter_port = 9930 19 | 20 | # How long to wait before aborting a server connection (ms). 21 | connect_timeout = 5000 22 | 23 | # How much time to give `SELECT 1` health check query to return with a result (ms). 24 | healthcheck_timeout = 1000 25 | 26 | # How long to keep connection available for immediate re-use, without running a healthcheck query on it 27 | healthcheck_delay = 30000 28 | 29 | # How much time to give clients during shutdown before forcibly killing client connections (ms). 30 | shutdown_timeout = 60000 31 | 32 | # For how long to ban a server if it fails a health check (seconds). 33 | ban_time = 60 # seconds 34 | 35 | # If we should log client connections 36 | log_client_connections = false 37 | 38 | # If we should log client disconnections 39 | log_client_disconnections = false 40 | 41 | # TLS 42 | # tls_certificate = "server.cert" 43 | # tls_private_key = "server.key" 44 | 45 | # Credentials to access the virtual administrative database (pgbouncer or pgcat) 46 | # Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc.. 47 | admin_username = "postgres" 48 | admin_password = "postgres" 49 | 50 | # pool 51 | # configs are structured as pool. 52 | # the pool_name is what clients use as database name when connecting 53 | # For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded" 54 | [pools.postgres] 55 | # Pool mode (see PgBouncer docs for more). 56 | # session: one server connection per connected client 57 | # transaction: one server connection per client transaction 58 | pool_mode = "transaction" 59 | 60 | # If the client doesn't specify, route traffic to 61 | # this role by default. 62 | # 63 | # any: round-robin between primary and replicas, 64 | # replica: round-robin between replicas only without touching the primary, 65 | # primary: all queries go to the primary unless otherwise specified. 66 | default_role = "any" 67 | 68 | # Query parser. If enabled, we'll attempt to parse 69 | # every incoming query to determine if it's a read or a write. 70 | # If it's a read query, we'll direct it to a replica. Otherwise, if it's a write, 71 | # we'll direct it to the primary. 72 | query_parser_enabled = true 73 | 74 | # If the query parser is enabled and this setting is enabled, we'll attempt to 75 | # infer the role from the query itself. 76 | query_parser_read_write_splitting = true 77 | 78 | # If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for 79 | # load balancing of read queries. Otherwise, the primary will only be used for write 80 | # queries. The primary can always be explicitly selected with our custom protocol. 81 | primary_reads_enabled = true 82 | 83 | # So what if you wanted to implement a different hashing function, 84 | # or you've already built one and you want this pooler to use it? 85 | # 86 | # Current options: 87 | # 88 | # pg_bigint_hash: PARTITION BY HASH (Postgres hashing function) 89 | # sha1: A hashing function based on SHA1 90 | # 91 | sharding_function = "pg_bigint_hash" 92 | 93 | # Credentials for users that may connect to this cluster 94 | [pools.postgres.users.0] 95 | username = "postgres" 96 | password = "postgres" 97 | # Maximum number of server connections that can be established for this user 98 | # The maximum number of connection from a single Pgcat process to any database in the cluster 99 | # is the sum of pool_size across all users. 100 | pool_size = 9 101 | 102 | # Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way. 103 | statement_timeout = 0 104 | 105 | # Shard 0 106 | [pools.postgres.shards.0] 107 | # [ host, port, role ] 108 | servers = [ 109 | [ "postgres", 5432, "primary" ], 110 | [ "postgres", 5432, "replica" ] 111 | ] 112 | # Database name (e.g. "postgres") 113 | database = "postgres" 114 | 115 | [pools.postgres.shards.1] 116 | servers = [ 117 | [ "postgres", 5432, "primary" ], 118 | [ "postgres", 5432, "replica" ], 119 | ] 120 | database = "postgres" 121 | 122 | [pools.postgres.shards.2] 123 | servers = [ 124 | [ "postgres", 5432, "primary" ], 125 | [ "postgres", 5432, "replica" ], 126 | ] 127 | database = "postgres" 128 | -------------------------------------------------------------------------------- /images/instacart.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/postgresml/pgcat/5b038813eb14f181434ab7b5509e74d9b1fe123b/images/instacart.webp -------------------------------------------------------------------------------- /images/one_signal.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/postgresml/pgcat/5b038813eb14f181434ab7b5509e74d9b1fe123b/images/one_signal.webp -------------------------------------------------------------------------------- /images/postgresml.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/postgresml/pgcat/5b038813eb14f181434ab7b5509e74d9b1fe123b/images/postgresml.webp -------------------------------------------------------------------------------- /pgcat.minimal.toml: -------------------------------------------------------------------------------- 1 | # This is an example of the most basic config 2 | # that will mimic what PgBouncer does in transaction mode with one server. 3 | 4 | [general] 5 | 6 | host = "0.0.0.0" 7 | port = 6433 8 | admin_username = "pgcat" 9 | admin_password = "pgcat" 10 | 11 | [pools.pgml.users.0] 12 | username = "postgres" 13 | password = "postgres" 14 | pool_size = 10 15 | min_pool_size = 1 16 | pool_mode = "transaction" 17 | 18 | [pools.pgml.shards.0] 19 | servers = [ 20 | ["127.0.0.1", 28815, "primary"] 21 | ] 22 | database = "postgres" 23 | -------------------------------------------------------------------------------- /pgcat.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=PgCat pooler 3 | After=network.target 4 | StartLimitIntervalSec=0 5 | 6 | [Service] 7 | User=pgcat 8 | Type=simple 9 | Restart=always 10 | RestartSec=1 11 | Environment=RUST_LOG=info 12 | LimitNOFILE=65536 13 | ExecStart=/usr/bin/pgcat /etc/pgcat.toml 14 | ExecReload=/bin/kill -SIGHUP $MAINPID 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /postinst: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | systemctl daemon-reload 5 | systemctl enable pgcat 6 | 7 | if ! id pgcat 2> /dev/null; then 8 | useradd -s /usr/bin/false pgcat 9 | fi 10 | 11 | if [ -f /etc/pgcat.toml ]; then 12 | systemctl start pgcat 13 | fi 14 | -------------------------------------------------------------------------------- /postrm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | systemctl daemon-reload 5 | -------------------------------------------------------------------------------- /prerm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | systemctl stop pgcat 5 | systemctl disable pgcat 6 | -------------------------------------------------------------------------------- /src/auth_passthrough.rs: -------------------------------------------------------------------------------- 1 | use crate::config::AuthType; 2 | use crate::errors::Error; 3 | use crate::pool::ConnectionPool; 4 | use crate::server::Server; 5 | use log::debug; 6 | 7 | #[derive(Clone, Debug)] 8 | pub struct AuthPassthrough { 9 | password: String, 10 | query: String, 11 | user: String, 12 | } 13 | 14 | impl AuthPassthrough { 15 | /// Initializes an AuthPassthrough. 16 | pub fn new(query: &str, user: &str, password: &str) -> Self { 17 | AuthPassthrough { 18 | password: password.to_string(), 19 | query: query.to_string(), 20 | user: user.to_string(), 21 | } 22 | } 23 | 24 | /// Returns an AuthPassthrough given the pool configuration. 25 | /// If any of required values is not set, None is returned. 26 | pub fn from_pool_config(pool_config: &crate::config::Pool) -> Option { 27 | if pool_config.is_auth_query_configured() { 28 | return Some(AuthPassthrough::new( 29 | pool_config.auth_query.as_ref().unwrap(), 30 | pool_config.auth_query_user.as_ref().unwrap(), 31 | pool_config.auth_query_password.as_ref().unwrap(), 32 | )); 33 | } 34 | 35 | None 36 | } 37 | 38 | /// Returns an AuthPassthrough given the pool settings. 39 | /// If any of required values is not set, None is returned. 40 | pub fn from_pool_settings(pool_settings: &crate::pool::PoolSettings) -> Option { 41 | let pool_config = crate::config::Pool { 42 | auth_query: pool_settings.auth_query.clone(), 43 | auth_query_password: pool_settings.auth_query_password.clone(), 44 | auth_query_user: pool_settings.auth_query_user.clone(), 45 | ..Default::default() 46 | }; 47 | 48 | AuthPassthrough::from_pool_config(&pool_config) 49 | } 50 | 51 | /// Connects to server and executes auth_query for the specified address. 52 | /// If the response is a row with two columns containing the username set in the address. 53 | /// and its MD5 hash, the MD5 hash returned. 54 | /// 55 | /// Note that the query is executed, changing $1 with the name of the user 56 | /// this is so we only hold in memory (and transfer) the least amount of 'sensitive' data. 57 | /// Also, it is compatible with pgbouncer. 58 | /// 59 | /// # Arguments 60 | /// 61 | /// * `address` - An Address of the server we want to connect to. The username for the hash will be obtained from this value. 62 | /// 63 | /// # Examples 64 | /// 65 | /// ``` 66 | /// use pgcat::auth_passthrough::AuthPassthrough; 67 | /// use pgcat::config::Address; 68 | /// let auth_passthrough = AuthPassthrough::new("SELECT * FROM public.user_lookup('$1');", "postgres", "postgres"); 69 | /// auth_passthrough.fetch_hash(&Address::default()); 70 | /// ``` 71 | /// 72 | pub async fn fetch_hash(&self, address: &crate::config::Address) -> Result { 73 | let auth_user = crate::config::User { 74 | username: self.user.clone(), 75 | auth_type: AuthType::MD5, 76 | password: Some(self.password.clone()), 77 | server_username: None, 78 | server_password: None, 79 | pool_size: 1, 80 | statement_timeout: 0, 81 | pool_mode: None, 82 | server_lifetime: None, 83 | min_pool_size: None, 84 | connect_timeout: None, 85 | idle_timeout: None, 86 | }; 87 | 88 | let user = &address.username; 89 | 90 | debug!("Connecting to server to obtain auth hashes"); 91 | 92 | let auth_query = self.query.replace("$1", user); 93 | 94 | match Server::exec_simple_query(address, &auth_user, &auth_query).await { 95 | Ok(password_data) => { 96 | if password_data.len() == 2 && password_data.first().unwrap() == user { 97 | if let Some(stripped_hash) = password_data 98 | .last() 99 | .unwrap() 100 | .to_string() 101 | .strip_prefix("md5") { 102 | Ok(stripped_hash.to_string()) 103 | } 104 | else { 105 | Err(Error::AuthPassthroughError( 106 | "Obtained hash from auth_query does not seem to be in md5 format.".to_string(), 107 | )) 108 | } 109 | } else { 110 | Err(Error::AuthPassthroughError( 111 | "Data obtained from query does not follow the scheme 'user','hash'." 112 | .to_string(), 113 | )) 114 | } 115 | } 116 | Err(err) => { 117 | Err(Error::AuthPassthroughError( 118 | format!("Error trying to obtain password from auth_query, ignoring hash for user '{}'. Error: {:?}", 119 | user, err)) 120 | ) 121 | } 122 | } 123 | } 124 | } 125 | 126 | pub async fn refetch_auth_hash(pool: &ConnectionPool) -> Result { 127 | let address = pool.address(0, 0); 128 | if let Some(apt) = AuthPassthrough::from_pool_settings(&pool.settings) { 129 | let hash = apt.fetch_hash(address).await?; 130 | 131 | return Ok(hash); 132 | } 133 | 134 | Err(Error::ClientError(format!( 135 | "Could not obtain hash for {{ username: {:?}, database: {:?} }}. Auth passthrough not enabled.", 136 | address.username, address.database 137 | ))) 138 | } 139 | -------------------------------------------------------------------------------- /src/cmd_args.rs: -------------------------------------------------------------------------------- 1 | use clap::{Parser, ValueEnum}; 2 | use tracing::Level; 3 | 4 | /// PgCat: Nextgen PostgreSQL Pooler 5 | #[derive(Parser, Debug)] 6 | #[command(author, version, about, long_about = None)] 7 | pub struct Args { 8 | #[arg(default_value_t = String::from("pgcat.toml"), env)] 9 | pub config_file: String, 10 | 11 | #[arg(short, long, default_value_t = tracing::Level::INFO, env)] 12 | pub log_level: Level, 13 | 14 | #[clap(short='F', long, value_enum, default_value_t=LogFormat::Text, env)] 15 | pub log_format: LogFormat, 16 | 17 | #[arg( 18 | short, 19 | long, 20 | default_value_t = false, 21 | env, 22 | help = "disable colors in the log output" 23 | )] 24 | pub no_color: bool, 25 | } 26 | 27 | pub fn parse() -> Args { 28 | Args::parse() 29 | } 30 | 31 | #[derive(ValueEnum, Clone, Debug)] 32 | pub enum LogFormat { 33 | Text, 34 | Structured, 35 | Debug, 36 | } 37 | -------------------------------------------------------------------------------- /src/constants.rs: -------------------------------------------------------------------------------- 1 | /// Various protocol constants, as defined in 2 | /// 3 | /// and elsewhere in the source code. 4 | 5 | // Used in the StartupMessage to indicate regular handshake. 6 | pub const PROTOCOL_VERSION_NUMBER: i32 = 196608; 7 | 8 | // SSLRequest: used to indicate we want an SSL connection. 9 | pub const SSL_REQUEST_CODE: i32 = 80877103; 10 | 11 | // CancelRequest: the cancel request code. 12 | pub const CANCEL_REQUEST_CODE: i32 = 80877102; 13 | 14 | // AuthenticationMD5Password 15 | pub const MD5_ENCRYPTED_PASSWORD: i32 = 5; 16 | 17 | // SASL 18 | pub const SASL: i32 = 10; 19 | pub const SASL_CONTINUE: i32 = 11; 20 | pub const SASL_FINAL: i32 = 12; 21 | pub const SCRAM_SHA_256: &str = "SCRAM-SHA-256"; 22 | pub const NONCE_LENGTH: usize = 24; 23 | 24 | // AuthenticationOk 25 | pub const AUTHENTICATION_SUCCESSFUL: i32 = 0; 26 | 27 | // ErrorResponse: A code identifying the field type; if zero, this is the message terminator and no string follows. 28 | pub const MESSAGE_TERMINATOR: u8 = 0; 29 | 30 | // 31 | // Data types 32 | // 33 | pub const _OID_INT8: i32 = 20; // bigint 34 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | //! Errors. 2 | 3 | /// Various errors. 4 | #[derive(Debug, PartialEq, Clone)] 5 | pub enum Error { 6 | SocketError(String), 7 | ClientSocketError(String, ClientIdentifier), 8 | ClientGeneralError(String, ClientIdentifier), 9 | ClientAuthImpossible(String), 10 | ClientAuthPassthroughError(String, ClientIdentifier), 11 | ClientBadStartup, 12 | ProtocolSyncError(String), 13 | BadQuery(String), 14 | ServerError, 15 | ServerMessageParserError(String), 16 | ServerStartupError(String, ServerIdentifier), 17 | ServerAuthError(String, ServerIdentifier), 18 | BadConfig, 19 | AllServersDown, 20 | ClientError(String), 21 | TlsError, 22 | StatementTimeout, 23 | DNSCachedError(String), 24 | ShuttingDown, 25 | ParseBytesError(String), 26 | AuthError(String), 27 | AuthPassthroughError(String), 28 | UnsupportedStatement, 29 | QueryRouterParserError(String), 30 | QueryRouterError(String), 31 | InvalidShardId(usize), 32 | PreparedStatementError, 33 | } 34 | 35 | #[derive(Clone, PartialEq, Debug)] 36 | pub struct ClientIdentifier { 37 | pub application_name: String, 38 | pub username: String, 39 | pub pool_name: String, 40 | } 41 | 42 | impl ClientIdentifier { 43 | pub fn new(application_name: &str, username: &str, pool_name: &str) -> ClientIdentifier { 44 | ClientIdentifier { 45 | application_name: application_name.into(), 46 | username: username.into(), 47 | pool_name: pool_name.into(), 48 | } 49 | } 50 | } 51 | 52 | impl std::fmt::Display for ClientIdentifier { 53 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 54 | write!( 55 | f, 56 | "{{ application_name: {}, username: {}, pool_name: {} }}", 57 | self.application_name, self.username, self.pool_name 58 | ) 59 | } 60 | } 61 | 62 | #[derive(Clone, PartialEq, Debug)] 63 | pub struct ServerIdentifier { 64 | pub username: String, 65 | pub database: String, 66 | } 67 | 68 | impl ServerIdentifier { 69 | pub fn new(username: &str, database: &str) -> ServerIdentifier { 70 | ServerIdentifier { 71 | username: username.into(), 72 | database: database.into(), 73 | } 74 | } 75 | } 76 | 77 | impl std::fmt::Display for ServerIdentifier { 78 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 79 | write!( 80 | f, 81 | "{{ username: {}, database: {} }}", 82 | self.username, self.database 83 | ) 84 | } 85 | } 86 | 87 | impl std::fmt::Display for Error { 88 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 89 | match &self { 90 | &Error::ClientSocketError(error, client_identifier) => write!( 91 | f, 92 | "Error reading {} from client {}", 93 | error, client_identifier 94 | ), 95 | &Error::ClientGeneralError(error, client_identifier) => { 96 | write!(f, "{} {}", error, client_identifier) 97 | } 98 | &Error::ClientAuthImpossible(username) => write!( 99 | f, 100 | "Client auth not possible, \ 101 | no cleartext password set for username: {} \ 102 | in config and auth passthrough (query_auth) \ 103 | is not set up.", 104 | username 105 | ), 106 | &Error::ClientAuthPassthroughError(error, client_identifier) => write!( 107 | f, 108 | "No cleartext password set, \ 109 | and no auth passthrough could not \ 110 | obtain the hash from server for {}, \ 111 | the error was: {}", 112 | client_identifier, error 113 | ), 114 | &Error::ServerStartupError(error, server_identifier) => write!( 115 | f, 116 | "Error reading {} on server startup {}", 117 | error, server_identifier, 118 | ), 119 | &Error::ServerAuthError(error, server_identifier) => { 120 | write!(f, "{} for {}", error, server_identifier,) 121 | } 122 | 123 | // The rest can use Debug. 124 | err => write!(f, "{:?}", err), 125 | } 126 | } 127 | } 128 | 129 | impl From for Error { 130 | fn from(err: std::ffi::NulError) -> Self { 131 | Error::QueryRouterError(err.to_string()) 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod admin; 2 | pub mod auth_passthrough; 3 | pub mod client; 4 | pub mod cmd_args; 5 | pub mod config; 6 | pub mod constants; 7 | pub mod dns_cache; 8 | pub mod errors; 9 | pub mod logger; 10 | pub mod messages; 11 | pub mod mirrors; 12 | pub mod plugins; 13 | pub mod pool; 14 | pub mod prometheus; 15 | pub mod query_router; 16 | pub mod scram; 17 | pub mod server; 18 | pub mod sharding; 19 | pub mod stats; 20 | pub mod tls; 21 | 22 | /// Format chrono::Duration to be more human-friendly. 23 | /// 24 | /// # Arguments 25 | /// 26 | /// * `duration` - A duration of time 27 | pub fn format_duration(duration: &chrono::Duration) -> String { 28 | let milliseconds = format!("{:0>3}", duration.num_milliseconds() % 1000); 29 | 30 | let seconds = format!("{:0>2}", duration.num_seconds() % 60); 31 | 32 | let minutes = format!("{:0>2}", duration.num_minutes() % 60); 33 | 34 | let hours = format!("{:0>2}", duration.num_hours() % 24); 35 | 36 | let days = duration.num_days().to_string(); 37 | 38 | format!( 39 | "{}d {}:{}:{}.{}", 40 | days, hours, minutes, seconds, milliseconds 41 | ) 42 | } 43 | -------------------------------------------------------------------------------- /src/logger.rs: -------------------------------------------------------------------------------- 1 | use crate::cmd_args::{Args, LogFormat}; 2 | use tracing_subscriber; 3 | use tracing_subscriber::EnvFilter; 4 | 5 | pub fn init(args: &Args) { 6 | // Initialize a default filter, and then override the builtin default "warning" with our 7 | // commandline, (default: "info") 8 | let filter = EnvFilter::from_default_env().add_directive(args.log_level.into()); 9 | 10 | let trace_sub = tracing_subscriber::fmt() 11 | .with_thread_ids(true) 12 | .with_env_filter(filter) 13 | .with_ansi(!args.no_color); 14 | 15 | match args.log_format { 16 | LogFormat::Structured => trace_sub.json().init(), 17 | LogFormat::Debug => trace_sub.pretty().init(), 18 | _ => trace_sub.init(), 19 | }; 20 | } 21 | -------------------------------------------------------------------------------- /src/plugins/intercept.rs: -------------------------------------------------------------------------------- 1 | //! The intercept plugin. 2 | //! 3 | //! It intercepts queries and returns fake results. 4 | 5 | use async_trait::async_trait; 6 | use bytes::{BufMut, BytesMut}; 7 | use serde::{Deserialize, Serialize}; 8 | use sqlparser::ast::Statement; 9 | 10 | use log::debug; 11 | 12 | use crate::{ 13 | config::Intercept as InterceptConfig, 14 | errors::Error, 15 | messages::{command_complete, data_row_nullable, row_description, DataType}, 16 | plugins::{Plugin, PluginOutput}, 17 | query_router::QueryRouter, 18 | }; 19 | 20 | // TODO: use these structs for deserialization 21 | #[derive(Serialize, Deserialize)] 22 | pub struct Rule { 23 | query: String, 24 | schema: Vec, 25 | result: Vec>, 26 | } 27 | 28 | #[derive(Serialize, Deserialize)] 29 | pub struct Column { 30 | name: String, 31 | data_type: String, 32 | } 33 | 34 | /// The intercept plugin. 35 | pub struct Intercept<'a> { 36 | pub enabled: bool, 37 | pub config: &'a InterceptConfig, 38 | } 39 | 40 | #[async_trait] 41 | impl<'a> Plugin for Intercept<'a> { 42 | async fn run( 43 | &mut self, 44 | query_router: &QueryRouter, 45 | ast: &Vec, 46 | ) -> Result { 47 | if !self.enabled || ast.is_empty() { 48 | return Ok(PluginOutput::Allow); 49 | } 50 | 51 | let mut config = self.config.clone(); 52 | config.substitute( 53 | &query_router.pool_settings().db, 54 | &query_router.pool_settings().user.username, 55 | ); 56 | 57 | let mut result = BytesMut::new(); 58 | 59 | for q in ast { 60 | // Normalization 61 | let q = q.to_string().to_ascii_lowercase(); 62 | 63 | for (_, target) in config.queries.iter() { 64 | if target.query.as_str() == q { 65 | debug!("Intercepting query: {}", q); 66 | 67 | let rd = target 68 | .schema 69 | .iter() 70 | .map(|row| { 71 | let name = &row[0]; 72 | let data_type = &row[1]; 73 | ( 74 | name.as_str(), 75 | match data_type.as_str() { 76 | "text" => DataType::Text, 77 | "anyarray" => DataType::AnyArray, 78 | "oid" => DataType::Oid, 79 | "bool" => DataType::Bool, 80 | "int4" => DataType::Int4, 81 | _ => DataType::Any, 82 | }, 83 | ) 84 | }) 85 | .collect::>(); 86 | 87 | result.put(row_description(&rd)); 88 | 89 | target.result.iter().for_each(|row| { 90 | let row = row 91 | .iter() 92 | .map(|s| { 93 | let s = s.as_str().to_string(); 94 | 95 | if s.is_empty() { 96 | None 97 | } else { 98 | Some(s) 99 | } 100 | }) 101 | .collect::>>(); 102 | result.put(data_row_nullable(&row)); 103 | }); 104 | 105 | result.put(command_complete("SELECT")); 106 | } 107 | } 108 | } 109 | 110 | if !result.is_empty() { 111 | result.put_u8(b'Z'); 112 | result.put_i32(5); 113 | result.put_u8(b'I'); 114 | 115 | return Ok(PluginOutput::Intercept(result)); 116 | } else { 117 | Ok(PluginOutput::Allow) 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /src/plugins/mod.rs: -------------------------------------------------------------------------------- 1 | //! The plugin ecosystem. 2 | //! 3 | //! Currently plugins only grant access or deny access to the database for a particual query. 4 | //! Example use cases: 5 | //! - block known bad queries 6 | //! - block access to system catalogs 7 | //! - block dangerous modifications like `DROP TABLE` 8 | //! - etc 9 | //! 10 | 11 | pub mod intercept; 12 | pub mod prewarmer; 13 | pub mod query_logger; 14 | pub mod table_access; 15 | 16 | use crate::{errors::Error, query_router::QueryRouter}; 17 | use async_trait::async_trait; 18 | use bytes::BytesMut; 19 | use sqlparser::ast::Statement; 20 | 21 | pub use intercept::Intercept; 22 | pub use query_logger::QueryLogger; 23 | pub use table_access::TableAccess; 24 | 25 | #[derive(Clone, Debug, PartialEq)] 26 | pub enum PluginOutput { 27 | Allow, 28 | Deny(String), 29 | Overwrite(Vec), 30 | Intercept(BytesMut), 31 | } 32 | 33 | #[async_trait] 34 | pub trait Plugin { 35 | // Run before the query is sent to the server. 36 | #[allow(clippy::ptr_arg)] 37 | async fn run( 38 | &mut self, 39 | query_router: &QueryRouter, 40 | ast: &Vec, 41 | ) -> Result; 42 | 43 | // TODO: run after the result is returned 44 | // async fn callback(&mut self, query_router: &QueryRouter); 45 | } 46 | -------------------------------------------------------------------------------- /src/plugins/prewarmer.rs: -------------------------------------------------------------------------------- 1 | //! Prewarm new connections before giving them to the client. 2 | use crate::{errors::Error, server::Server}; 3 | use log::info; 4 | 5 | pub struct Prewarmer<'a> { 6 | pub enabled: bool, 7 | pub server: &'a mut Server, 8 | pub queries: &'a Vec, 9 | } 10 | 11 | impl<'a> Prewarmer<'a> { 12 | pub async fn run(&mut self) -> Result<(), Error> { 13 | if !self.enabled { 14 | return Ok(()); 15 | } 16 | 17 | for query in self.queries { 18 | info!( 19 | "{} Prewarning with query: `{}`", 20 | self.server.address(), 21 | query 22 | ); 23 | self.server.query(query).await?; 24 | } 25 | 26 | Ok(()) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/plugins/query_logger.rs: -------------------------------------------------------------------------------- 1 | //! Log all queries to stdout (or somewhere else, why not). 2 | 3 | use crate::{ 4 | errors::Error, 5 | plugins::{Plugin, PluginOutput}, 6 | query_router::QueryRouter, 7 | }; 8 | use async_trait::async_trait; 9 | use log::info; 10 | use sqlparser::ast::Statement; 11 | 12 | pub struct QueryLogger<'a> { 13 | pub enabled: bool, 14 | pub user: &'a str, 15 | pub db: &'a str, 16 | } 17 | 18 | #[async_trait] 19 | impl<'a> Plugin for QueryLogger<'a> { 20 | async fn run( 21 | &mut self, 22 | _query_router: &QueryRouter, 23 | ast: &Vec, 24 | ) -> Result { 25 | if !self.enabled { 26 | return Ok(PluginOutput::Allow); 27 | } 28 | 29 | let query = ast 30 | .iter() 31 | .map(|q| q.to_string()) 32 | .collect::>() 33 | .join("; "); 34 | info!("[pool: {}][user: {}] {}", self.db, self.user, query); 35 | 36 | Ok(PluginOutput::Allow) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/plugins/table_access.rs: -------------------------------------------------------------------------------- 1 | //! This query router plugin will check if the user can access a particular 2 | //! table as part of their query. If they can't, the query will not be routed. 3 | 4 | use async_trait::async_trait; 5 | use sqlparser::ast::{visit_relations, Statement}; 6 | 7 | use crate::{ 8 | errors::Error, 9 | plugins::{Plugin, PluginOutput}, 10 | query_router::QueryRouter, 11 | }; 12 | 13 | use log::debug; 14 | 15 | use core::ops::ControlFlow; 16 | 17 | pub struct TableAccess<'a> { 18 | pub enabled: bool, 19 | pub tables: &'a Vec, 20 | } 21 | 22 | #[async_trait] 23 | impl<'a> Plugin for TableAccess<'a> { 24 | async fn run( 25 | &mut self, 26 | _query_router: &QueryRouter, 27 | ast: &Vec, 28 | ) -> Result { 29 | if !self.enabled { 30 | return Ok(PluginOutput::Allow); 31 | } 32 | 33 | let mut found = None; 34 | 35 | visit_relations(ast, |relation| { 36 | let relation = relation.to_string(); 37 | let parts = relation.split('.').collect::>(); 38 | let table_name = parts.last().unwrap(); 39 | 40 | if self.tables.contains(&table_name.to_string()) { 41 | found = Some(table_name.to_string()); 42 | ControlFlow::<()>::Break(()) 43 | } else { 44 | ControlFlow::<()>::Continue(()) 45 | } 46 | }); 47 | 48 | if let Some(found) = found { 49 | debug!("Blocking access to table \"{}\"", found); 50 | 51 | Ok(PluginOutput::Deny(format!( 52 | "permission for table \"{}\" denied", 53 | found 54 | ))) 55 | } else { 56 | Ok(PluginOutput::Allow) 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/sharding.rs: -------------------------------------------------------------------------------- 1 | use serde_derive::{Deserialize, Serialize}; 2 | /// Implements various sharding functions. 3 | use sha1::{Digest, Sha1}; 4 | 5 | /// See: . 6 | const PARTITION_HASH_SEED: u64 = 0x7A5B22367996DCFD; 7 | 8 | /// The sharding functions we support. 9 | #[derive(Debug, PartialEq, Copy, Clone, Serialize, Deserialize, Hash, std::cmp::Eq)] 10 | pub enum ShardingFunction { 11 | #[serde(alias = "pg_bigint_hash", alias = "PgBigintHash")] 12 | PgBigintHash, 13 | #[serde(alias = "sha1", alias = "Sha1")] 14 | Sha1, 15 | } 16 | 17 | impl std::fmt::Display for ShardingFunction { 18 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 19 | match self { 20 | ShardingFunction::PgBigintHash => write!(f, "pg_bigint_hash"), 21 | ShardingFunction::Sha1 => write!(f, "sha1"), 22 | } 23 | } 24 | } 25 | 26 | /// The sharder. 27 | pub struct Sharder { 28 | /// Number of shards in the cluster. 29 | shards: usize, 30 | 31 | /// The sharding function in use. 32 | sharding_function: ShardingFunction, 33 | } 34 | 35 | impl Sharder { 36 | /// Create new instance of the sharder. 37 | pub fn new(shards: usize, sharding_function: ShardingFunction) -> Sharder { 38 | Sharder { 39 | shards, 40 | sharding_function, 41 | } 42 | } 43 | 44 | /// Compute the shard given sharding key. 45 | pub fn shard(&self, key: i64) -> usize { 46 | match self.sharding_function { 47 | ShardingFunction::PgBigintHash => self.pg_bigint_hash(key), 48 | ShardingFunction::Sha1 => self.sha1(key), 49 | } 50 | } 51 | 52 | /// Hash function used by Postgres to determine which partition 53 | /// to put the row in when using HASH(column) partitioning. 54 | /// Source: . 55 | /// Supports only 1 bigint at the moment, but we can add more later. 56 | fn pg_bigint_hash(&self, key: i64) -> usize { 57 | let mut lohalf = key as u32; 58 | let hihalf = (key >> 32) as u32; 59 | lohalf ^= if key >= 0 { hihalf } else { !hihalf }; 60 | Self::combine(0, Self::pg_u32_hash(lohalf)) as usize % self.shards 61 | } 62 | 63 | /// Example of a hashing function based on SHA1. 64 | fn sha1(&self, key: i64) -> usize { 65 | let mut hasher = Sha1::new(); 66 | 67 | hasher.update(key.to_string().as_bytes()); 68 | 69 | let result = hasher.finalize(); 70 | 71 | // Convert the SHA1 hash into hex so we can parse it as a large integer. 72 | let hex = format!("{:x}", result); 73 | 74 | // Parse the last 8 bytes as an integer (8 bytes = bigint). 75 | let key = i64::from_str_radix(&hex[hex.len() - 8..], 16).unwrap() as usize; 76 | 77 | key % self.shards 78 | } 79 | 80 | #[inline] 81 | fn rot(x: u32, k: u32) -> u32 { 82 | (x << k) | (x >> (32 - k)) 83 | } 84 | 85 | #[inline] 86 | fn mix(mut a: u32, mut b: u32, mut c: u32) -> (u32, u32, u32) { 87 | a = a.wrapping_sub(c); 88 | a ^= Self::rot(c, 4); 89 | c = c.wrapping_add(b); 90 | 91 | b = b.wrapping_sub(a); 92 | b ^= Self::rot(a, 6); 93 | a = a.wrapping_add(c); 94 | 95 | c = c.wrapping_sub(b); 96 | c ^= Self::rot(b, 8); 97 | b = b.wrapping_add(a); 98 | 99 | a = a.wrapping_sub(c); 100 | a ^= Self::rot(c, 16); 101 | c = c.wrapping_add(b); 102 | 103 | b = b.wrapping_sub(a); 104 | b ^= Self::rot(a, 19); 105 | a = a.wrapping_add(c); 106 | 107 | c = c.wrapping_sub(b); 108 | c ^= Self::rot(b, 4); 109 | b = b.wrapping_add(a); 110 | 111 | (a, b, c) 112 | } 113 | 114 | #[inline] 115 | fn _final(mut a: u32, mut b: u32, mut c: u32) -> (u32, u32, u32) { 116 | c ^= b; 117 | c = c.wrapping_sub(Self::rot(b, 14)); 118 | a ^= c; 119 | a = a.wrapping_sub(Self::rot(c, 11)); 120 | b ^= a; 121 | b = b.wrapping_sub(Self::rot(a, 25)); 122 | c ^= b; 123 | c = c.wrapping_sub(Self::rot(b, 16)); 124 | a ^= c; 125 | a = a.wrapping_sub(Self::rot(c, 4)); 126 | b ^= a; 127 | b = b.wrapping_sub(Self::rot(a, 14)); 128 | c ^= b; 129 | c = c.wrapping_sub(Self::rot(b, 24)); 130 | (a, b, c) 131 | } 132 | 133 | #[inline] 134 | fn combine(mut a: u64, b: u64) -> u64 { 135 | a ^= b 136 | .wrapping_add(0x49a0f4dd15e5a8e3_u64) 137 | .wrapping_add(a << 54) 138 | .wrapping_add(a >> 7); 139 | a 140 | } 141 | 142 | #[inline] 143 | fn pg_u32_hash(k: u32) -> u64 { 144 | let mut a: u32 = 0x9e3779b9_u32 + std::mem::size_of::() as u32 + 3923095_u32; 145 | let mut b = a; 146 | let c = a; 147 | 148 | a = a.wrapping_add((PARTITION_HASH_SEED >> 32) as u32); 149 | b = b.wrapping_add(PARTITION_HASH_SEED as u32); 150 | let (mut a, b, c) = Self::mix(a, b, c); 151 | 152 | a = a.wrapping_add(k); 153 | 154 | let (_a, b, c) = Self::_final(a, b, c); 155 | 156 | ((b as u64) << 32) | (c as u64) 157 | } 158 | } 159 | 160 | #[cfg(test)] 161 | mod test { 162 | use super::*; 163 | 164 | // See tests/sharding/partition_hash_test_setup.sql 165 | // The output of those SELECT statements will match this test, 166 | // confirming that we implemented Postgres BIGINT hashing correctly. 167 | #[test] 168 | fn test_pg_bigint_hash() { 169 | let sharder = Sharder::new(5, ShardingFunction::PgBigintHash); 170 | 171 | let shard_0 = vec![1, 4, 5, 14, 19, 39, 40, 46, 47, 53]; 172 | 173 | for v in shard_0 { 174 | assert_eq!(sharder.shard(v), 0); 175 | } 176 | 177 | let shard_1 = vec![2, 3, 11, 17, 21, 23, 30, 49, 51, 54]; 178 | 179 | for v in shard_1 { 180 | assert_eq!(sharder.shard(v), 1); 181 | } 182 | 183 | let shard_2 = vec![6, 7, 15, 16, 18, 20, 25, 28, 34, 35]; 184 | 185 | for v in shard_2 { 186 | assert_eq!(sharder.shard(v), 2); 187 | } 188 | 189 | let shard_3 = vec![8, 12, 13, 22, 29, 31, 33, 36, 41, 43]; 190 | 191 | for v in shard_3 { 192 | assert_eq!(sharder.shard(v), 3); 193 | } 194 | 195 | let shard_4 = vec![9, 10, 24, 26, 27, 32, 37, 38, 42, 45]; 196 | 197 | for v in shard_4 { 198 | assert_eq!(sharder.shard(v), 4); 199 | } 200 | } 201 | 202 | #[test] 203 | fn test_sha1_hash() { 204 | let sharder = Sharder::new(12, ShardingFunction::Sha1); 205 | let ids = [ 206 | 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 207 | ]; 208 | let shards = [ 209 | 4, 7, 8, 3, 6, 0, 0, 10, 3, 11, 1, 7, 4, 4, 11, 2, 5, 0, 8, 3, 210 | ]; 211 | 212 | for (i, id) in ids.iter().enumerate() { 213 | assert_eq!(sharder.shard(*id), shards[i]); 214 | } 215 | } 216 | } 217 | -------------------------------------------------------------------------------- /src/stats.rs: -------------------------------------------------------------------------------- 1 | /// Statistics and reporting. 2 | use arc_swap::ArcSwap; 3 | 4 | use log::{info, warn}; 5 | use once_cell::sync::Lazy; 6 | use parking_lot::RwLock; 7 | use std::collections::HashMap; 8 | 9 | use std::sync::Arc; 10 | 11 | // Structs that hold stats for different resources 12 | pub mod address; 13 | pub mod client; 14 | pub mod pool; 15 | pub mod server; 16 | pub use address::AddressStats; 17 | pub use client::{ClientState, ClientStats}; 18 | pub use server::{ServerState, ServerStats}; 19 | 20 | /// Convenience types for various stats 21 | type ClientStatesLookup = HashMap>; 22 | type ServerStatesLookup = HashMap>; 23 | 24 | /// Stats for individual client connections 25 | /// Used in SHOW CLIENTS. 26 | static CLIENT_STATS: Lazy>> = 27 | Lazy::new(|| Arc::new(RwLock::new(ClientStatesLookup::default()))); 28 | 29 | /// Stats for individual server connections 30 | /// Used in SHOW SERVERS. 31 | static SERVER_STATS: Lazy>> = 32 | Lazy::new(|| Arc::new(RwLock::new(ServerStatesLookup::default()))); 33 | 34 | /// The statistics reporter. An instance is given to each possible source of statistics, 35 | /// e.g. client stats, server stats, connection pool stats. 36 | pub static REPORTER: Lazy> = 37 | Lazy::new(|| ArcSwap::from_pointee(Reporter::default())); 38 | 39 | /// Statistics period used for average calculations. 40 | /// 15 seconds. 41 | static STAT_PERIOD: u64 = 15000; 42 | 43 | /// The statistics reporter. An instance is given 44 | /// to each possible source of statistics, 45 | /// e.g. clients, servers, connection pool. 46 | #[derive(Clone, Debug, Default)] 47 | pub struct Reporter {} 48 | 49 | impl Reporter { 50 | /// Register a client with the stats system. The stats system uses client_id 51 | /// to track and aggregate statistics from all source that relate to that client 52 | fn client_register(&self, client_id: i32, stats: Arc) { 53 | if CLIENT_STATS.read().get(&client_id).is_some() { 54 | warn!("Client {:?} was double registered!", client_id); 55 | return; 56 | } 57 | 58 | CLIENT_STATS.write().insert(client_id, stats); 59 | } 60 | 61 | /// Reports a client is disconnecting from the pooler. 62 | fn client_disconnecting(&self, client_id: i32) { 63 | CLIENT_STATS.write().remove(&client_id); 64 | } 65 | 66 | /// Register a server connection with the stats system. The stats system uses server_id 67 | /// to track and aggregate statistics from all source that relate to that server 68 | fn server_register(&self, server_id: i32, stats: Arc) { 69 | SERVER_STATS.write().insert(server_id, stats); 70 | } 71 | /// Reports a server connection is disconnecting from the pooler. 72 | fn server_disconnecting(&self, server_id: i32) { 73 | SERVER_STATS.write().remove(&server_id); 74 | } 75 | } 76 | 77 | /// The statistics collector which used for calculating averages 78 | /// There is only one collector (kind of like a singleton) 79 | /// it updates averages every 15 seconds. 80 | #[derive(Default)] 81 | pub struct Collector {} 82 | 83 | impl Collector { 84 | /// The statistics collection handler. It will collect statistics 85 | /// for `address_id`s starting at 0 up to `addresses`. 86 | pub async fn collect(&mut self) { 87 | info!("Events reporter started"); 88 | 89 | tokio::task::spawn(async move { 90 | let mut interval = 91 | tokio::time::interval(tokio::time::Duration::from_millis(STAT_PERIOD)); 92 | loop { 93 | interval.tick().await; 94 | 95 | // Hold read lock for duration of update to retain all server stats 96 | let server_stats = SERVER_STATS.read(); 97 | 98 | for stats in server_stats.values() { 99 | if !stats.check_address_stat_average_is_updated_status() { 100 | stats.address_stats().update_averages(); 101 | stats.address_stats().reset_current_counts(); 102 | stats.set_address_stat_average_is_updated_status(true); 103 | } 104 | } 105 | 106 | // Reset to false for next update 107 | for stats in server_stats.values() { 108 | stats.set_address_stat_average_is_updated_status(false); 109 | } 110 | } 111 | }); 112 | } 113 | } 114 | 115 | /// Get a snapshot of client statistics. 116 | /// by the `Collector`. 117 | pub fn get_client_stats() -> ClientStatesLookup { 118 | CLIENT_STATS.read().clone() 119 | } 120 | 121 | /// Get a snapshot of server statistics. 122 | /// by the `Collector`. 123 | pub fn get_server_stats() -> ServerStatesLookup { 124 | SERVER_STATS.read().clone() 125 | } 126 | 127 | /// Get the statistics reporter used to update stats across the pools/clients. 128 | pub fn get_reporter() -> Reporter { 129 | (*(*REPORTER.load())).clone() 130 | } 131 | -------------------------------------------------------------------------------- /src/stats/pool.rs: -------------------------------------------------------------------------------- 1 | use log::debug; 2 | 3 | use super::{ClientState, ServerState}; 4 | use crate::{config::PoolMode, messages::DataType, pool::PoolIdentifier}; 5 | use std::collections::HashMap; 6 | use std::sync::atomic::*; 7 | 8 | use crate::pool::get_all_pools; 9 | 10 | #[derive(Debug, Clone)] 11 | /// A struct that holds information about a Pool . 12 | pub struct PoolStats { 13 | pub identifier: PoolIdentifier, 14 | pub mode: PoolMode, 15 | pub cl_idle: u64, 16 | pub cl_active: u64, 17 | pub cl_waiting: u64, 18 | pub cl_cancel_req: u64, 19 | pub sv_active: u64, 20 | pub sv_idle: u64, 21 | pub sv_used: u64, 22 | pub sv_tested: u64, 23 | pub sv_login: u64, 24 | pub maxwait: u64, 25 | } 26 | impl PoolStats { 27 | pub fn new(identifier: PoolIdentifier, mode: PoolMode) -> Self { 28 | PoolStats { 29 | identifier, 30 | mode, 31 | cl_idle: 0, 32 | cl_active: 0, 33 | cl_waiting: 0, 34 | cl_cancel_req: 0, 35 | sv_active: 0, 36 | sv_idle: 0, 37 | sv_used: 0, 38 | sv_tested: 0, 39 | sv_login: 0, 40 | maxwait: 0, 41 | } 42 | } 43 | 44 | pub fn construct_pool_lookup() -> HashMap { 45 | let mut map: HashMap = HashMap::new(); 46 | let client_map = super::get_client_stats(); 47 | let server_map = super::get_server_stats(); 48 | 49 | for (identifier, pool) in get_all_pools() { 50 | map.insert( 51 | identifier.clone(), 52 | PoolStats::new(identifier, pool.settings.pool_mode), 53 | ); 54 | } 55 | 56 | for client in client_map.values() { 57 | match map.get_mut(&PoolIdentifier { 58 | db: client.pool_name(), 59 | user: client.username(), 60 | }) { 61 | Some(pool_stats) => { 62 | match client.state.load(Ordering::Relaxed) { 63 | ClientState::Active => pool_stats.cl_active += 1, 64 | ClientState::Idle => pool_stats.cl_idle += 1, 65 | ClientState::Waiting => pool_stats.cl_waiting += 1, 66 | } 67 | let wait_start_us = client.wait_start_us.load(Ordering::Relaxed); 68 | if wait_start_us > 0 { 69 | let wait_time_us = client.get_current_wait_time_us(); 70 | pool_stats.maxwait = std::cmp::max(pool_stats.maxwait, wait_time_us); 71 | } 72 | } 73 | None => debug!("Client from an obselete pool"), 74 | } 75 | } 76 | 77 | for server in server_map.values() { 78 | match map.get_mut(&PoolIdentifier { 79 | db: server.pool_name(), 80 | user: server.username(), 81 | }) { 82 | Some(pool_stats) => match server.state.load(Ordering::Relaxed) { 83 | ServerState::Active => pool_stats.sv_active += 1, 84 | ServerState::Idle => pool_stats.sv_idle += 1, 85 | ServerState::Login => pool_stats.sv_login += 1, 86 | ServerState::Tested => pool_stats.sv_tested += 1, 87 | }, 88 | None => debug!("Server from an obselete pool"), 89 | } 90 | } 91 | 92 | map 93 | } 94 | 95 | pub fn generate_header() -> Vec<(&'static str, DataType)> { 96 | vec![ 97 | ("database", DataType::Text), 98 | ("user", DataType::Text), 99 | ("pool_mode", DataType::Text), 100 | ("cl_idle", DataType::Numeric), 101 | ("cl_active", DataType::Numeric), 102 | ("cl_waiting", DataType::Numeric), 103 | ("cl_cancel_req", DataType::Numeric), 104 | ("sv_active", DataType::Numeric), 105 | ("sv_idle", DataType::Numeric), 106 | ("sv_used", DataType::Numeric), 107 | ("sv_tested", DataType::Numeric), 108 | ("sv_login", DataType::Numeric), 109 | ("maxwait", DataType::Numeric), 110 | ("maxwait_us", DataType::Numeric), 111 | ] 112 | } 113 | 114 | pub fn generate_row(&self) -> Vec { 115 | vec![ 116 | self.identifier.db.clone(), 117 | self.identifier.user.clone(), 118 | self.mode.to_string(), 119 | self.cl_idle.to_string(), 120 | self.cl_active.to_string(), 121 | self.cl_waiting.to_string(), 122 | self.cl_cancel_req.to_string(), 123 | self.sv_active.to_string(), 124 | self.sv_idle.to_string(), 125 | self.sv_used.to_string(), 126 | self.sv_tested.to_string(), 127 | self.sv_login.to_string(), 128 | (self.maxwait / 1_000_000).to_string(), 129 | (self.maxwait % 1_000_000).to_string(), 130 | ] 131 | } 132 | } 133 | 134 | impl IntoIterator for PoolStats { 135 | type Item = (String, u64); 136 | type IntoIter = std::vec::IntoIter; 137 | 138 | fn into_iter(self) -> Self::IntoIter { 139 | vec![ 140 | ("cl_idle".to_string(), self.cl_idle), 141 | ("cl_active".to_string(), self.cl_active), 142 | ("cl_waiting".to_string(), self.cl_waiting), 143 | ("cl_cancel_req".to_string(), self.cl_cancel_req), 144 | ("sv_active".to_string(), self.sv_active), 145 | ("sv_idle".to_string(), self.sv_idle), 146 | ("sv_used".to_string(), self.sv_used), 147 | ("sv_tested".to_string(), self.sv_tested), 148 | ("sv_login".to_string(), self.sv_login), 149 | ("maxwait".to_string(), self.maxwait / 1_000_000), 150 | ("maxwait_us".to_string(), self.maxwait % 1_000_000), 151 | ] 152 | .into_iter() 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /src/tls.rs: -------------------------------------------------------------------------------- 1 | // Stream wrapper. 2 | 3 | use rustls_pemfile::{certs, read_one, Item}; 4 | use std::iter; 5 | use std::path::Path; 6 | use std::sync::Arc; 7 | use std::time::SystemTime; 8 | use tokio_rustls::rustls::{ 9 | self, 10 | client::{ServerCertVerified, ServerCertVerifier}, 11 | Certificate, PrivateKey, ServerName, 12 | }; 13 | use tokio_rustls::TlsAcceptor; 14 | 15 | use crate::config::get_config; 16 | use crate::errors::Error; 17 | 18 | // TLS 19 | pub fn load_certs(path: &Path) -> std::io::Result> { 20 | certs(&mut std::io::BufReader::new(std::fs::File::open(path)?)) 21 | .map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidInput, "invalid cert")) 22 | .map(|mut certs| certs.drain(..).map(Certificate).collect()) 23 | } 24 | 25 | pub fn load_keys(path: &Path) -> std::io::Result> { 26 | let mut rd = std::io::BufReader::new(std::fs::File::open(path)?); 27 | 28 | iter::from_fn(|| read_one(&mut rd).transpose()) 29 | .filter_map(|item| match item { 30 | Err(err) => Some(Err(err)), 31 | Ok(Item::RSAKey(key)) => Some(Ok(PrivateKey(key))), 32 | Ok(Item::ECKey(key)) => Some(Ok(PrivateKey(key))), 33 | Ok(Item::PKCS8Key(key)) => Some(Ok(PrivateKey(key))), 34 | _ => None, 35 | }) 36 | .collect() 37 | } 38 | 39 | pub struct Tls { 40 | pub acceptor: TlsAcceptor, 41 | } 42 | 43 | impl Tls { 44 | pub fn new() -> Result { 45 | let config = get_config(); 46 | 47 | let certs = match load_certs(Path::new(&config.general.tls_certificate.unwrap())) { 48 | Ok(certs) => certs, 49 | Err(_) => return Err(Error::TlsError), 50 | }; 51 | 52 | let mut keys = match load_keys(Path::new(&config.general.tls_private_key.unwrap())) { 53 | Ok(keys) => keys, 54 | Err(_) => return Err(Error::TlsError), 55 | }; 56 | 57 | let config = match rustls::ServerConfig::builder() 58 | .with_safe_defaults() 59 | .with_no_client_auth() 60 | .with_single_cert(certs, keys.remove(0)) 61 | .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidInput, err)) 62 | { 63 | Ok(c) => c, 64 | Err(_) => return Err(Error::TlsError), 65 | }; 66 | 67 | Ok(Tls { 68 | acceptor: TlsAcceptor::from(Arc::new(config)), 69 | }) 70 | } 71 | } 72 | 73 | pub struct NoCertificateVerification; 74 | 75 | impl ServerCertVerifier for NoCertificateVerification { 76 | fn verify_server_cert( 77 | &self, 78 | _end_entity: &Certificate, 79 | _intermediates: &[Certificate], 80 | _server_name: &ServerName, 81 | _scts: &mut dyn Iterator, 82 | _ocsp_response: &[u8], 83 | _now: SystemTime, 84 | ) -> Result { 85 | Ok(ServerCertVerified::assertion()) 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /start_test_env.sh: -------------------------------------------------------------------------------- 1 | GREEN="\033[0;32m" 2 | RED="\033[0;31m" 3 | BLUE="\033[0;34m" 4 | RESET="\033[0m" 5 | 6 | 7 | cd tests/docker/ 8 | docker compose kill main || true 9 | docker compose build main 10 | docker compose down 11 | docker compose up -d 12 | # wait for the container to start 13 | while ! docker compose exec main ls; do 14 | echo "Waiting for test environment to start" 15 | sleep 1 16 | done 17 | echo "===================================" 18 | docker compose exec -e LOG_LEVEL=error -d main toxiproxy-server 19 | docker compose exec --workdir /app main cargo build 20 | docker compose exec -d --workdir /app main ./target/debug/pgcat ./.circleci/pgcat.toml 21 | docker compose exec --workdir /app/tests/ruby main bundle install 22 | docker compose exec --workdir /app/tests/python main pip3 install -r requirements.txt 23 | echo "Interactive test environment ready" 24 | echo "To run integration tests, you can use the following commands:" 25 | echo -e " ${BLUE}Ruby: ${RED}cd /app/tests/ruby && bundle exec ruby tests.rb --format documentation${RESET}" 26 | echo -e " ${BLUE}Python: ${RED}cd /app/ && pytest ${RESET}" 27 | echo -e " ${BLUE}Rust: ${RED}cd /app/tests/rust && cargo run ${RESET}" 28 | echo -e " ${BLUE}Go: ${RED}cd /app/tests/go && /usr/local/go/bin/go test${RESET}" 29 | echo "the source code for tests are directly linked to the source code in the container so you can modify the code and run the tests again" 30 | echo "You can rebuild PgCat from within the container by running" 31 | echo -e " ${GREEN}cargo build${RESET}" 32 | echo "and then run the tests again" 33 | echo "===================================" 34 | docker compose exec --workdir /app/tests main bash 35 | -------------------------------------------------------------------------------- /tests/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.81.0-slim-bookworm 2 | 3 | COPY --from=sclevine/yj /bin/yj /bin/yj 4 | RUN /bin/yj -h 5 | RUN apt-get update && apt-get install llvm-11 psmisc postgresql-contrib postgresql-client ruby ruby-dev libpq-dev python3 python3-pip lcov curl sudo iproute2 -y 6 | RUN cargo install cargo-binutils rustfilt 7 | RUN rustup component add llvm-tools-preview 8 | RUN sudo gem install bundler 9 | RUN wget -O toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \ 10 | sudo dpkg -i toxiproxy-2.4.0.deb 11 | RUN wget -O go1.21.3.linux-$(dpkg --print-architecture).tar.gz https://go.dev/dl/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \ 12 | sudo tar -C /usr/local -xzf go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \ 13 | rm go1.21.3.linux-$(dpkg --print-architecture).tar.gz 14 | -------------------------------------------------------------------------------- /tests/docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | pg1: 3 | image: postgres:14 4 | network_mode: "service:main" 5 | environment: 6 | POSTGRES_USER: postgres 7 | POSTGRES_DB: postgres 8 | POSTGRES_PASSWORD: postgres 9 | POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5 10 | command: ["postgres", "-p", "5432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"] 11 | pg2: 12 | image: postgres:14 13 | network_mode: "service:main" 14 | environment: 15 | POSTGRES_USER: postgres 16 | POSTGRES_DB: postgres 17 | POSTGRES_PASSWORD: postgres 18 | POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256 19 | command: ["postgres", "-p", "7432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"] 20 | pg3: 21 | image: postgres:14 22 | network_mode: "service:main" 23 | environment: 24 | POSTGRES_USER: postgres 25 | POSTGRES_DB: postgres 26 | POSTGRES_PASSWORD: postgres 27 | POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256 28 | command: ["postgres", "-p", "8432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"] 29 | pg4: 30 | image: postgres:14 31 | network_mode: "service:main" 32 | environment: 33 | POSTGRES_USER: postgres 34 | POSTGRES_DB: postgres 35 | POSTGRES_PASSWORD: postgres 36 | POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256 37 | command: ["postgres", "-p", "9432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"] 38 | pg5: 39 | image: postgres:14 40 | network_mode: "service:main" 41 | environment: 42 | POSTGRES_USER: postgres 43 | POSTGRES_DB: postgres 44 | POSTGRES_PASSWORD: postgres 45 | POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5 46 | command: ["postgres", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-p", "10432"] 47 | main: 48 | build: . 49 | command: ["bash", "/app/tests/docker/run.sh"] 50 | environment: 51 | - INTERACTIVE_TEST_ENVIRONMENT=true 52 | volumes: 53 | - ../../:/app/ 54 | - /app/target/ 55 | -------------------------------------------------------------------------------- /tests/docker/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -rf /app/target/ || true 4 | rm /app/*.profraw || true 5 | rm /app/pgcat.profdata || true 6 | rm -rf /app/cov || true 7 | 8 | # Prepares the interactive test environment 9 | # 10 | if [ -n "$INTERACTIVE_TEST_ENVIRONMENT" ]; then 11 | ports=(5432 7432 8432 9432 10432) 12 | for port in "${ports[@]}"; do 13 | is_it_up=0 14 | attempts=0 15 | while [ $is_it_up -eq 0 ]; do 16 | PGPASSWORD=postgres psql -h 127.0.0.1 -p $port -U postgres -c '\q' > /dev/null 2>&1 17 | if [ $? -eq 0 ]; then 18 | echo "PostgreSQL on port $port is up." 19 | is_it_up=1 20 | else 21 | attempts=$((attempts+1)) 22 | if [ $attempts -gt 10 ]; then 23 | echo "PostgreSQL on port $port is down, giving up." 24 | exit 1 25 | fi 26 | echo "PostgreSQL on port $port is down, waiting for it to start." 27 | sleep 1 28 | fi 29 | done 30 | done 31 | PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 5432 -U postgres -f /app/tests/sharding/query_routing_setup.sql 32 | PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 7432 -U postgres -f /app/tests/sharding/query_routing_setup.sql 33 | PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 8432 -U postgres -f /app/tests/sharding/query_routing_setup.sql 34 | PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 9432 -U postgres -f /app/tests/sharding/query_routing_setup.sql 35 | PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 10432 -U postgres -f /app/tests/sharding/query_routing_setup.sql 36 | sleep 100000000000000000 37 | exit 0 38 | fi 39 | 40 | export LLVM_PROFILE_FILE="/app/pgcat-%m-%p.profraw" 41 | export RUSTC_BOOTSTRAP=1 42 | export CARGO_INCREMENTAL=0 43 | export RUSTFLAGS="-Zprofile -Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests -Cpanic=abort -Cinstrument-coverage" 44 | export RUSTDOCFLAGS="-Cpanic=abort" 45 | 46 | cd /app/ 47 | cargo clean 48 | cargo build 49 | cargo test --tests 50 | 51 | bash .circleci/run_tests.sh 52 | 53 | TEST_OBJECTS=$( \ 54 | for file in $(cargo test --no-run 2>&1 | grep "target/debug/deps/pgcat-[[:alnum:]]\+" -o); \ 55 | do \ 56 | printf "%s %s " --object $file; \ 57 | done \ 58 | ) 59 | 60 | echo "Generating coverage report" 61 | 62 | rust-profdata merge -sparse /app/pgcat-*.profraw -o /app/pgcat.profdata 63 | 64 | bash -c "rust-cov export -ignore-filename-regex='rustc|registry' -Xdemangler=rustfilt -instr-profile=/app/pgcat.profdata $TEST_OBJECTS --object ./target/debug/pgcat --format lcov > ./lcov.info" 65 | 66 | genhtml lcov.info --title "PgCat Code Coverage" --css-file ./cov-style.css --highlight --no-function-coverage --ignore-errors source --legend --output-directory cov --prefix $(pwd) 67 | 68 | rm /app/*.profraw 69 | rm /app/pgcat.profdata 70 | -------------------------------------------------------------------------------- /tests/go/go.mod: -------------------------------------------------------------------------------- 1 | module pgcat 2 | 3 | go 1.21 4 | 5 | require github.com/lib/pq v1.10.9 6 | -------------------------------------------------------------------------------- /tests/go/go.sum: -------------------------------------------------------------------------------- 1 | github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= 2 | github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= 3 | -------------------------------------------------------------------------------- /tests/go/pgcat.toml: -------------------------------------------------------------------------------- 1 | # 2 | # PgCat config example. 3 | # 4 | 5 | # 6 | # General pooler settings 7 | [general] 8 | # What IP to run on, 0.0.0.0 means accessible from everywhere. 9 | host = "0.0.0.0" 10 | 11 | # Port to run on, same as PgBouncer used in this example. 12 | port = "${PORT}" 13 | 14 | # Whether to enable prometheus exporter or not. 15 | enable_prometheus_exporter = true 16 | 17 | # Port at which prometheus exporter listens on. 18 | prometheus_exporter_port = 9930 19 | 20 | # How long to wait before aborting a server connection (ms). 21 | connect_timeout = 1000 22 | 23 | # How much time to give the health check query to return with a result (ms). 24 | healthcheck_timeout = 1000 25 | 26 | # How long to keep connection available for immediate re-use, without running a healthcheck query on it 27 | healthcheck_delay = 30000 28 | 29 | # How much time to give clients during shutdown before forcibly killing client connections (ms). 30 | shutdown_timeout = 5000 31 | 32 | # For how long to ban a server if it fails a health check (seconds). 33 | ban_time = 60 # Seconds 34 | 35 | # If we should log client connections 36 | log_client_connections = false 37 | 38 | # If we should log client disconnections 39 | log_client_disconnections = false 40 | 41 | # Reload config automatically if it changes. 42 | autoreload = 15000 43 | 44 | server_round_robin = false 45 | 46 | # TLS 47 | tls_certificate = "../../.circleci/server.cert" 48 | tls_private_key = "../../.circleci/server.key" 49 | 50 | # Credentials to access the virtual administrative database (pgbouncer or pgcat) 51 | # Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc.. 52 | admin_username = "admin_user" 53 | admin_password = "admin_pass" 54 | 55 | # pool 56 | # configs are structured as pool. 57 | # the pool_name is what clients use as database name when connecting 58 | # For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded_db" 59 | [pools.sharded_db] 60 | # Pool mode (see PgBouncer docs for more). 61 | # session: one server connection per connected client 62 | # transaction: one server connection per client transaction 63 | pool_mode = "transaction" 64 | 65 | # If the client doesn't specify, route traffic to 66 | # this role by default. 67 | # 68 | # any: round-robin between primary and replicas, 69 | # replica: round-robin between replicas only without touching the primary, 70 | # primary: all queries go to the primary unless otherwise specified. 71 | default_role = "any" 72 | 73 | # Query parser. If enabled, we'll attempt to parse 74 | # every incoming query to determine if it's a read or a write. 75 | # If it's a read query, we'll direct it to a replica. Otherwise, if it's a write, 76 | # we'll direct it to the primary. 77 | query_parser_enabled = true 78 | 79 | # If the query parser is enabled and this setting is enabled, we'll attempt to 80 | # infer the role from the query itself. 81 | query_parser_read_write_splitting = true 82 | 83 | # If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for 84 | # load balancing of read queries. Otherwise, the primary will only be used for write 85 | # queries. The primary can always be explicitely selected with our custom protocol. 86 | primary_reads_enabled = true 87 | 88 | # So what if you wanted to implement a different hashing function, 89 | # or you've already built one and you want this pooler to use it? 90 | # 91 | # Current options: 92 | # 93 | # pg_bigint_hash: PARTITION BY HASH (Postgres hashing function) 94 | # sha1: A hashing function based on SHA1 95 | # 96 | sharding_function = "pg_bigint_hash" 97 | 98 | # Prepared statements cache size. 99 | prepared_statements_cache_size = 500 100 | 101 | # Credentials for users that may connect to this cluster 102 | [pools.sharded_db.users.0] 103 | username = "sharding_user" 104 | password = "sharding_user" 105 | # Maximum number of server connections that can be established for this user 106 | # The maximum number of connection from a single Pgcat process to any database in the cluster 107 | # is the sum of pool_size across all users. 108 | pool_size = 5 109 | statement_timeout = 0 110 | 111 | 112 | [pools.sharded_db.users.1] 113 | username = "other_user" 114 | password = "other_user" 115 | pool_size = 21 116 | statement_timeout = 30000 117 | 118 | # Shard 0 119 | [pools.sharded_db.shards.0] 120 | # [ host, port, role ] 121 | servers = [ 122 | [ "127.0.0.1", 5432, "primary" ], 123 | [ "localhost", 5432, "replica" ] 124 | ] 125 | # Database name (e.g. "postgres") 126 | database = "shard0" 127 | 128 | [pools.sharded_db.shards.1] 129 | servers = [ 130 | [ "127.0.0.1", 5432, "primary" ], 131 | [ "localhost", 5432, "replica" ], 132 | ] 133 | database = "shard1" 134 | 135 | [pools.sharded_db.shards.2] 136 | servers = [ 137 | [ "127.0.0.1", 5432, "primary" ], 138 | [ "localhost", 5432, "replica" ], 139 | ] 140 | database = "shard2" 141 | 142 | 143 | [pools.simple_db] 144 | pool_mode = "session" 145 | default_role = "primary" 146 | query_parser_enabled = true 147 | query_parser_read_write_splitting = true 148 | primary_reads_enabled = true 149 | sharding_function = "pg_bigint_hash" 150 | 151 | [pools.simple_db.users.0] 152 | username = "simple_user" 153 | password = "simple_user" 154 | pool_size = 5 155 | statement_timeout = 30000 156 | 157 | [pools.simple_db.shards.0] 158 | servers = [ 159 | [ "127.0.0.1", 5432, "primary" ], 160 | [ "localhost", 5432, "replica" ] 161 | ] 162 | database = "some_db" 163 | -------------------------------------------------------------------------------- /tests/go/prepared_test.go: -------------------------------------------------------------------------------- 1 | package pgcat 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "fmt" 7 | _ "github.com/lib/pq" 8 | "testing" 9 | ) 10 | 11 | func Test(t *testing.T) { 12 | t.Cleanup(setup(t)) 13 | t.Run("Named parameterized prepared statement works", namedParameterizedPreparedStatement) 14 | t.Run("Unnamed parameterized prepared statement works", unnamedParameterizedPreparedStatement) 15 | } 16 | 17 | func namedParameterizedPreparedStatement(t *testing.T) { 18 | db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=sharded_db user=sharding_user password=sharding_user sslmode=disable", port)) 19 | if err != nil { 20 | t.Fatalf("could not open connection: %+v", err) 21 | } 22 | 23 | stmt, err := db.Prepare("SELECT $1") 24 | 25 | if err != nil { 26 | t.Fatalf("could not prepare: %+v", err) 27 | } 28 | 29 | for i := 0; i < 100; i++ { 30 | rows, err := stmt.Query(1) 31 | if err != nil { 32 | t.Fatalf("could not query: %+v", err) 33 | } 34 | _ = rows.Close() 35 | } 36 | } 37 | 38 | func unnamedParameterizedPreparedStatement(t *testing.T) { 39 | db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=sharded_db user=sharding_user password=sharding_user sslmode=disable", port)) 40 | if err != nil { 41 | t.Fatalf("could not open connection: %+v", err) 42 | } 43 | 44 | for i := 0; i < 100; i++ { 45 | // Under the hood QueryContext generates an unnamed parameterized prepared statement 46 | rows, err := db.QueryContext(context.Background(), "SELECT $1", 1) 47 | if err != nil { 48 | t.Fatalf("could not query: %+v", err) 49 | } 50 | _ = rows.Close() 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /tests/go/setup.go: -------------------------------------------------------------------------------- 1 | package pgcat 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | _ "embed" 7 | "fmt" 8 | "math/rand" 9 | "os" 10 | "os/exec" 11 | "strings" 12 | "testing" 13 | "time" 14 | ) 15 | 16 | //go:embed pgcat.toml 17 | var pgcatCfg string 18 | 19 | var port = rand.Intn(32760-20000) + 20000 20 | 21 | func setup(t *testing.T) func() { 22 | cfg, err := os.CreateTemp("/tmp", "pgcat_cfg_*.toml") 23 | if err != nil { 24 | t.Fatalf("could not create temp file: %+v", err) 25 | } 26 | 27 | pgcatCfg = strings.Replace(pgcatCfg, "\"${PORT}\"", fmt.Sprintf("%d", port), 1) 28 | 29 | _, err = cfg.Write([]byte(pgcatCfg)) 30 | if err != nil { 31 | t.Fatalf("could not write temp file: %+v", err) 32 | } 33 | 34 | commandPath := "../../target/debug/pgcat" 35 | if os.Getenv("CARGO_TARGET_DIR") != "" { 36 | commandPath = os.Getenv("CARGO_TARGET_DIR") + "/debug/pgcat" 37 | } 38 | 39 | cmd := exec.Command(commandPath, cfg.Name()) 40 | cmd.Stdout = os.Stdout 41 | cmd.Stderr = os.Stderr 42 | go func() { 43 | err = cmd.Run() 44 | if err != nil { 45 | t.Errorf("could not run pgcat: %+v", err) 46 | } 47 | }() 48 | 49 | deadline, cancelFunc := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second)) 50 | defer cancelFunc() 51 | for { 52 | select { 53 | case <-deadline.Done(): 54 | break 55 | case <-time.After(50 * time.Millisecond): 56 | db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=pgcat user=admin_user password=admin_pass sslmode=disable", port)) 57 | if err != nil { 58 | continue 59 | } 60 | rows, err := db.QueryContext(deadline, "SHOW STATS") 61 | if err != nil { 62 | continue 63 | } 64 | _ = rows.Close() 65 | _ = db.Close() 66 | break 67 | } 68 | break 69 | } 70 | 71 | return func() { 72 | err := cmd.Process.Signal(os.Interrupt) 73 | if err != nil { 74 | t.Fatalf("could not interrupt pgcat: %+v", err) 75 | } 76 | err = os.Remove(cfg.Name()) 77 | if err != nil { 78 | t.Fatalf("could not remove temp file: %+v", err) 79 | } 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /tests/pgbench/simple.sql: -------------------------------------------------------------------------------- 1 | 2 | -- \setrandom aid 1 :naccounts 3 | \set aid random(1, 100000) 4 | -- \setrandom bid 1 :nbranches 5 | \set bid random(1, 100000) 6 | -- \setrandom tid 1 :ntellers 7 | \set tid random(1, 100000) 8 | -- \setrandom delta -5000 5000 9 | \set delta random(-5000,5000) 10 | 11 | \set shard random(0, 2) 12 | 13 | SET SHARD TO :shard; 14 | 15 | SET SERVER ROLE TO 'auto'; 16 | 17 | BEGIN; 18 | 19 | UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid; 20 | 21 | SELECT abalance FROM pgbench_accounts WHERE aid = :aid; 22 | 23 | UPDATE pgbench_tellers SET tbalance = tbalance + :delta WHERE tid = :tid; 24 | 25 | UPDATE pgbench_branches SET bbalance = bbalance + :delta WHERE bid = :bid; 26 | 27 | INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP); 28 | 29 | END; 30 | 31 | SET SHARDING KEY TO :aid; 32 | 33 | -- Read load balancing 34 | SELECT abalance FROM pgbench_accounts WHERE aid = :aid; 35 | 36 | SET SERVER ROLE TO 'replica'; 37 | 38 | -- Read load balancing 39 | SELECT abalance FROM pgbench_accounts WHERE aid = :aid; -------------------------------------------------------------------------------- /tests/python/.gitignore: -------------------------------------------------------------------------------- 1 | venv/ -------------------------------------------------------------------------------- /tests/python/requirements.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | psycopg2==2.9.3 3 | psutil==5.9.1 4 | -------------------------------------------------------------------------------- /tests/python/test_auth.py: -------------------------------------------------------------------------------- 1 | import utils 2 | import signal 3 | 4 | class TestTrustAuth: 5 | @classmethod 6 | def setup_method(cls): 7 | config= """ 8 | [general] 9 | host = "0.0.0.0" 10 | port = 6432 11 | admin_username = "admin_user" 12 | admin_password = "" 13 | admin_auth_type = "trust" 14 | 15 | [pools.sharded_db.users.0] 16 | username = "sharding_user" 17 | password = "sharding_user" 18 | auth_type = "trust" 19 | pool_size = 10 20 | min_pool_size = 1 21 | pool_mode = "transaction" 22 | 23 | [pools.sharded_db.shards.0] 24 | servers = [ 25 | [ "127.0.0.1", 5432, "primary" ], 26 | ] 27 | database = "shard0" 28 | """ 29 | utils.pgcat_generic_start(config) 30 | 31 | @classmethod 32 | def teardown_method(self): 33 | utils.pg_cat_send_signal(signal.SIGTERM) 34 | 35 | def test_admin_trust_auth(self): 36 | conn, cur = utils.connect_db_trust(admin=True) 37 | cur.execute("SHOW POOLS") 38 | res = cur.fetchall() 39 | print(res) 40 | utils.cleanup_conn(conn, cur) 41 | 42 | def test_normal_trust_auth(self): 43 | conn, cur = utils.connect_db_trust(autocommit=False) 44 | cur.execute("SELECT 1") 45 | res = cur.fetchall() 46 | print(res) 47 | utils.cleanup_conn(conn, cur) 48 | 49 | class TestMD5Auth: 50 | @classmethod 51 | def setup_method(cls): 52 | utils.pgcat_start() 53 | 54 | @classmethod 55 | def teardown_method(self): 56 | utils.pg_cat_send_signal(signal.SIGTERM) 57 | 58 | def test_normal_db_access(self): 59 | conn, cur = utils.connect_db(autocommit=False) 60 | cur.execute("SELECT 1") 61 | res = cur.fetchall() 62 | print(res) 63 | utils.cleanup_conn(conn, cur) 64 | 65 | def test_admin_db_access(self): 66 | conn, cur = utils.connect_db(admin=True) 67 | 68 | cur.execute("SHOW POOLS") 69 | res = cur.fetchall() 70 | print(res) 71 | utils.cleanup_conn(conn, cur) 72 | -------------------------------------------------------------------------------- /tests/python/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import signal 3 | import time 4 | from typing import Tuple 5 | import tempfile 6 | 7 | import psutil 8 | import psycopg2 9 | 10 | PGCAT_HOST = "127.0.0.1" 11 | PGCAT_PORT = "6432" 12 | 13 | 14 | def _pgcat_start(config_path: str): 15 | pg_cat_send_signal(signal.SIGTERM) 16 | os.system(f"./target/debug/pgcat {config_path} &") 17 | time.sleep(2) 18 | 19 | 20 | def pgcat_start(): 21 | _pgcat_start(config_path='.circleci/pgcat.toml') 22 | 23 | 24 | def pgcat_generic_start(config: str): 25 | tmp = tempfile.NamedTemporaryFile() 26 | with open(tmp.name, 'w') as f: 27 | f.write(config) 28 | _pgcat_start(config_path=tmp.name) 29 | 30 | 31 | def glauth_send_signal(signal: signal.Signals): 32 | try: 33 | for proc in psutil.process_iter(["pid", "name"]): 34 | if proc.name() == "glauth": 35 | os.kill(proc.pid, signal) 36 | except Exception as e: 37 | # The process can be gone when we send this signal 38 | print(e) 39 | 40 | if signal == signal.SIGTERM: 41 | # Returns 0 if pgcat process exists 42 | time.sleep(2) 43 | if not os.system('pgrep glauth'): 44 | raise Exception("glauth not closed after SIGTERM") 45 | 46 | 47 | def pg_cat_send_signal(signal: signal.Signals): 48 | try: 49 | for proc in psutil.process_iter(["pid", "name"]): 50 | if "pgcat" == proc.name(): 51 | os.kill(proc.pid, signal) 52 | except Exception as e: 53 | # The process can be gone when we send this signal 54 | print(e) 55 | 56 | if signal == signal.SIGTERM: 57 | # Returns 0 if pgcat process exists 58 | time.sleep(2) 59 | if not os.system('pgrep pgcat'): 60 | raise Exception("pgcat not closed after SIGTERM") 61 | 62 | 63 | def connect_db( 64 | autocommit: bool = True, 65 | admin: bool = False, 66 | ) -> Tuple[psycopg2.extensions.connection, psycopg2.extensions.cursor]: 67 | 68 | if admin: 69 | user = "admin_user" 70 | password = "admin_pass" 71 | db = "pgcat" 72 | else: 73 | user = "sharding_user" 74 | password = "sharding_user" 75 | db = "sharded_db" 76 | 77 | conn = psycopg2.connect( 78 | f"postgres://{user}:{password}@{PGCAT_HOST}:{PGCAT_PORT}/{db}?application_name=testing_pgcat", 79 | connect_timeout=2, 80 | ) 81 | conn.autocommit = autocommit 82 | cur = conn.cursor() 83 | 84 | return (conn, cur) 85 | 86 | def connect_db_trust( 87 | autocommit: bool = True, 88 | admin: bool = False, 89 | ) -> Tuple[psycopg2.extensions.connection, psycopg2.extensions.cursor]: 90 | 91 | if admin: 92 | user = "admin_user" 93 | db = "pgcat" 94 | else: 95 | user = "sharding_user" 96 | db = "sharded_db" 97 | 98 | conn = psycopg2.connect( 99 | f"postgres://{user}@{PGCAT_HOST}:{PGCAT_PORT}/{db}?application_name=testing_pgcat", 100 | connect_timeout=2, 101 | ) 102 | conn.autocommit = autocommit 103 | cur = conn.cursor() 104 | 105 | return (conn, cur) 106 | 107 | 108 | def cleanup_conn(conn: psycopg2.extensions.connection, cur: psycopg2.extensions.cursor): 109 | cur.close() 110 | conn.close() 111 | -------------------------------------------------------------------------------- /tests/ruby/.ruby-version: -------------------------------------------------------------------------------- 1 | 3.0.0 2 | 3 | -------------------------------------------------------------------------------- /tests/ruby/Gemfile: -------------------------------------------------------------------------------- 1 | source "https://rubygems.org" 2 | 3 | gem "pg" 4 | gem "toml" 5 | gem "rspec" 6 | gem "rubocop" 7 | gem "toxiproxy" 8 | gem "activerecord" 9 | -------------------------------------------------------------------------------- /tests/ruby/Gemfile.lock: -------------------------------------------------------------------------------- 1 | GEM 2 | remote: https://rubygems.org/ 3 | specs: 4 | activemodel (7.1.4) 5 | activesupport (= 7.1.4) 6 | activerecord (7.1.4) 7 | activemodel (= 7.1.4) 8 | activesupport (= 7.1.4) 9 | timeout (>= 0.4.0) 10 | activesupport (7.1.4) 11 | base64 12 | bigdecimal 13 | concurrent-ruby (~> 1.0, >= 1.0.2) 14 | connection_pool (>= 2.2.5) 15 | drb 16 | i18n (>= 1.6, < 2) 17 | minitest (>= 5.1) 18 | mutex_m 19 | tzinfo (~> 2.0) 20 | ast (2.4.2) 21 | base64 (0.2.0) 22 | bigdecimal (3.1.8) 23 | concurrent-ruby (1.3.4) 24 | connection_pool (2.4.1) 25 | diff-lcs (1.5.0) 26 | drb (2.2.1) 27 | i18n (1.14.5) 28 | concurrent-ruby (~> 1.0) 29 | minitest (5.25.1) 30 | mutex_m (0.2.0) 31 | parallel (1.22.1) 32 | parser (3.1.2.0) 33 | ast (~> 2.4.1) 34 | parslet (2.0.0) 35 | pg (1.3.2) 36 | rainbow (3.1.1) 37 | regexp_parser (2.3.1) 38 | rexml (3.3.6) 39 | strscan 40 | rspec (3.11.0) 41 | rspec-core (~> 3.11.0) 42 | rspec-expectations (~> 3.11.0) 43 | rspec-mocks (~> 3.11.0) 44 | rspec-core (3.11.0) 45 | rspec-support (~> 3.11.0) 46 | rspec-expectations (3.11.0) 47 | diff-lcs (>= 1.2.0, < 2.0) 48 | rspec-support (~> 3.11.0) 49 | rspec-mocks (3.11.1) 50 | diff-lcs (>= 1.2.0, < 2.0) 51 | rspec-support (~> 3.11.0) 52 | rspec-support (3.11.0) 53 | rubocop (1.29.0) 54 | parallel (~> 1.10) 55 | parser (>= 3.1.0.0) 56 | rainbow (>= 2.2.2, < 4.0) 57 | regexp_parser (>= 1.8, < 3.0) 58 | rexml (>= 3.2.5, < 4.0) 59 | rubocop-ast (>= 1.17.0, < 2.0) 60 | ruby-progressbar (~> 1.7) 61 | unicode-display_width (>= 1.4.0, < 3.0) 62 | rubocop-ast (1.17.0) 63 | parser (>= 3.1.1.0) 64 | ruby-progressbar (1.11.0) 65 | strscan (3.1.0) 66 | timeout (0.4.1) 67 | toml (0.3.0) 68 | parslet (>= 1.8.0, < 3.0.0) 69 | toxiproxy (2.0.1) 70 | tzinfo (2.0.6) 71 | concurrent-ruby (~> 1.0) 72 | unicode-display_width (2.1.0) 73 | 74 | PLATFORMS 75 | aarch64-linux 76 | arm64-darwin-21 77 | x86_64-linux 78 | 79 | DEPENDENCIES 80 | activerecord 81 | pg 82 | rspec 83 | rubocop 84 | toml 85 | toxiproxy 86 | 87 | BUNDLED WITH 88 | 2.3.21 89 | -------------------------------------------------------------------------------- /tests/ruby/admin_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | require 'uri' 3 | require_relative 'spec_helper' 4 | 5 | describe "Admin" do 6 | let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 10) } 7 | let(:pgcat_conn_str) { processes.pgcat.connection_string("sharded_db", "sharding_user") } 8 | 9 | after do 10 | processes.all_databases.map(&:reset) 11 | processes.pgcat.shutdown 12 | end 13 | 14 | describe "Manual Banning" do 15 | let(:processes) { Helpers::Pgcat.single_shard_setup("sharded_db", 10) } 16 | before do 17 | new_configs = processes.pgcat.current_config 18 | # Prevent immediate unbanning when we ban localhost 19 | new_configs["pools"]["sharded_db"]["shards"]["0"]["servers"][0][0] = "127.0.0.1" 20 | new_configs["pools"]["sharded_db"]["shards"]["0"]["servers"][1][0] = "127.0.0.1" 21 | processes.pgcat.update_config(new_configs) 22 | processes.pgcat.reload_config 23 | end 24 | 25 | describe "BAN/UNBAN and SHOW BANS" do 26 | it "bans/unbans hosts" do 27 | admin_conn = PG::connect(processes.pgcat.admin_connection_string) 28 | 29 | # Returns a list of the banned addresses 30 | results = admin_conn.async_exec("BAN localhost 10").to_a 31 | expect(results.count).to eq(2) 32 | expect(results.map{ |r| r["host"] }.uniq).to eq(["localhost"]) 33 | 34 | # Subsequent calls should yield no results 35 | results = admin_conn.async_exec("BAN localhost 10").to_a 36 | expect(results.count).to eq(0) 37 | 38 | results = admin_conn.async_exec("SHOW BANS").to_a 39 | expect(results.count).to eq(2) 40 | expect(results.map{ |r| r["host"] }.uniq).to eq(["localhost"]) 41 | 42 | # Returns a list of the unbanned addresses 43 | results = admin_conn.async_exec("UNBAN localhost").to_a 44 | expect(results.count).to eq(2) 45 | expect(results.map{ |r| r["host"] }.uniq).to eq(["localhost"]) 46 | 47 | # Subsequent calls should yield no results 48 | results = admin_conn.async_exec("UNBAN localhost").to_a 49 | expect(results.count).to eq(0) 50 | 51 | results = admin_conn.async_exec("SHOW BANS").to_a 52 | expect(results.count).to eq(0) 53 | end 54 | 55 | it "honors ban duration" do 56 | admin_conn = PG::connect(processes.pgcat.admin_connection_string) 57 | 58 | # Returns a list of the banned addresses 59 | results = admin_conn.async_exec("BAN localhost 1").to_a 60 | expect(results.count).to eq(2) 61 | expect(results.map{ |r| r["host"] }.uniq).to eq(["localhost"]) 62 | 63 | sleep(2) 64 | 65 | # After 2 seconds the ban should be lifted 66 | results = admin_conn.async_exec("SHOW BANS").to_a 67 | expect(results.count).to eq(0) 68 | end 69 | 70 | it "can handle bad input" do 71 | admin_conn = PG::connect(processes.pgcat.admin_connection_string) 72 | 73 | expect { admin_conn.async_exec("BAN").to_a }.to raise_error(PG::SystemError) 74 | expect { admin_conn.async_exec("BAN a").to_a }.to raise_error(PG::SystemError) 75 | expect { admin_conn.async_exec("BAN a a").to_a }.to raise_error(PG::SystemError) 76 | expect { admin_conn.async_exec("BAN a -5").to_a }.to raise_error(PG::SystemError) 77 | expect { admin_conn.async_exec("BAN a 0").to_a }.to raise_error(PG::SystemError) 78 | expect { admin_conn.async_exec("BAN a a a").to_a }.to raise_error(PG::SystemError) 79 | expect { admin_conn.async_exec("UNBAN").to_a }.to raise_error(PG::SystemError) 80 | end 81 | end 82 | end 83 | 84 | describe "SHOW USERS" do 85 | it "returns the right users" do 86 | admin_conn = PG::connect(processes.pgcat.admin_connection_string) 87 | results = admin_conn.async_exec("SHOW USERS")[0] 88 | admin_conn.close 89 | expect(results["name"]).to eq("sharding_user") 90 | expect(results["pool_mode"]).to eq("transaction") 91 | end 92 | end 93 | 94 | [ 95 | "SHOW ME THE MONEY", 96 | "SHOW ME THE WAY", 97 | "SHOW UP", 98 | "SHOWTIME", 99 | "HAMMER TIME", 100 | "SHOWN TO BE TRUE", 101 | "SHOW ", 102 | "SHOW ", 103 | "SHOW 1", 104 | ";;;;;" 105 | ].each do |cmd| 106 | describe "Bad command #{cmd}" do 107 | it "does not panic and responds with PG::SystemError" do 108 | admin_conn = PG::connect(processes.pgcat.admin_connection_string) 109 | expect { admin_conn.async_exec(cmd) }.to raise_error(PG::SystemError).with_message(/Unsupported/) 110 | admin_conn.close 111 | end 112 | end 113 | end 114 | 115 | describe "PAUSE" do 116 | it "pauses all pools" do 117 | admin_conn = PG::connect(processes.pgcat.admin_connection_string) 118 | results = admin_conn.async_exec("SHOW DATABASES").to_a 119 | expect(results.map{ |r| r["paused"] }.uniq).to eq(["0"]) 120 | 121 | admin_conn.async_exec("PAUSE") 122 | 123 | results = admin_conn.async_exec("SHOW DATABASES").to_a 124 | expect(results.map{ |r| r["paused"] }.uniq).to eq(["1"]) 125 | 126 | admin_conn.async_exec("RESUME") 127 | 128 | results = admin_conn.async_exec("SHOW DATABASES").to_a 129 | expect(results.map{ |r| r["paused"] }.uniq).to eq(["0"]) 130 | end 131 | 132 | it "handles errors" do 133 | admin_conn = PG::connect(processes.pgcat.admin_connection_string) 134 | expect { admin_conn.async_exec("PAUSE foo").to_a }.to raise_error(PG::SystemError) 135 | expect { admin_conn.async_exec("PAUSE foo,bar").to_a }.to raise_error(PG::SystemError) 136 | end 137 | end 138 | end 139 | -------------------------------------------------------------------------------- /tests/ruby/capture: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/postgresml/pgcat/5b038813eb14f181434ab7b5509e74d9b1fe123b/tests/ruby/capture -------------------------------------------------------------------------------- /tests/ruby/copy_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | require_relative 'spec_helper' 3 | 4 | 5 | describe "COPY Handling" do 6 | let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 5) } 7 | before do 8 | new_configs = processes.pgcat.current_config 9 | 10 | # Allow connections in the pool to expire faster 11 | new_configs["general"]["idle_timeout"] = 5 12 | processes.pgcat.update_config(new_configs) 13 | # We need to kill the old process that was using the default configs 14 | processes.pgcat.stop 15 | processes.pgcat.start 16 | processes.pgcat.wait_until_ready 17 | end 18 | 19 | before do 20 | processes.all_databases.first.with_connection do |conn| 21 | conn.async_exec "CREATE TABLE copy_test_table (a TEXT,b TEXT,c TEXT,d TEXT)" 22 | end 23 | end 24 | 25 | after do 26 | processes.all_databases.first.with_connection do |conn| 27 | conn.async_exec "DROP TABLE copy_test_table;" 28 | end 29 | end 30 | 31 | after do 32 | processes.all_databases.map(&:reset) 33 | processes.pgcat.shutdown 34 | end 35 | 36 | describe "COPY FROM" do 37 | context "within transaction" do 38 | it "finishes within alloted time" do 39 | conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) 40 | Timeout.timeout(3) do 41 | conn.async_exec("BEGIN") 42 | conn.copy_data "COPY copy_test_table FROM STDIN CSV" do 43 | sleep 0.5 44 | conn.put_copy_data "some,data,to,copy\n" 45 | conn.put_copy_data "more,data,to,copy\n" 46 | end 47 | conn.async_exec("COMMIT") 48 | end 49 | 50 | res = conn.async_exec("SELECT * FROM copy_test_table").to_a 51 | expect(res).to eq([ 52 | {"a"=>"some", "b"=>"data", "c"=>"to", "d"=>"copy"}, 53 | {"a"=>"more", "b"=>"data", "c"=>"to", "d"=>"copy"} 54 | ]) 55 | end 56 | end 57 | 58 | context "outside transaction" do 59 | it "finishes within alloted time" do 60 | conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) 61 | Timeout.timeout(3) do 62 | conn.copy_data "COPY copy_test_table FROM STDIN CSV" do 63 | sleep 0.5 64 | conn.put_copy_data "some,data,to,copy\n" 65 | conn.put_copy_data "more,data,to,copy\n" 66 | end 67 | end 68 | 69 | res = conn.async_exec("SELECT * FROM copy_test_table").to_a 70 | expect(res).to eq([ 71 | {"a"=>"some", "b"=>"data", "c"=>"to", "d"=>"copy"}, 72 | {"a"=>"more", "b"=>"data", "c"=>"to", "d"=>"copy"} 73 | ]) 74 | end 75 | end 76 | end 77 | 78 | describe "COPY TO" do 79 | before do 80 | conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) 81 | conn.async_exec("BEGIN") 82 | conn.copy_data "COPY copy_test_table FROM STDIN CSV" do 83 | conn.put_copy_data "some,data,to,copy\n" 84 | conn.put_copy_data "more,data,to,copy\n" 85 | end 86 | conn.async_exec("COMMIT") 87 | conn.close 88 | end 89 | 90 | it "works" do 91 | res = [] 92 | conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) 93 | conn.copy_data "COPY copy_test_table TO STDOUT CSV" do 94 | while row=conn.get_copy_data 95 | res << row 96 | end 97 | end 98 | expect(res).to eq(["some,data,to,copy\n", "more,data,to,copy\n"]) 99 | end 100 | end 101 | 102 | end 103 | -------------------------------------------------------------------------------- /tests/ruby/helpers/auth_query_helper.rb: -------------------------------------------------------------------------------- 1 | module Helpers 2 | module AuthQuery 3 | def self.single_shard_auth_query( 4 | pg_user:, 5 | config_user:, 6 | pool_name:, 7 | extra_conf: {}, 8 | log_level: 'debug', 9 | wait_until_ready: true 10 | ) 11 | 12 | user = { 13 | "pool_size" => 10, 14 | "statement_timeout" => 0, 15 | } 16 | 17 | pgcat = PgcatProcess.new(log_level) 18 | pgcat_cfg = pgcat.current_config.deep_merge(extra_conf) 19 | 20 | primary = PgInstance.new(5432, pg_user["username"], pg_user["password"], "shard0") 21 | replica = PgInstance.new(10432, pg_user["username"], pg_user["password"], "shard0") 22 | 23 | # Main proxy configs 24 | pgcat_cfg["pools"] = { 25 | "#{pool_name}" => { 26 | "default_role" => "any", 27 | "pool_mode" => "transaction", 28 | "load_balancing_mode" => "random", 29 | "primary_reads_enabled" => false, 30 | "query_parser_enabled" => false, 31 | "sharding_function" => "pg_bigint_hash", 32 | "shards" => { 33 | "0" => { 34 | "database" => "shard0", 35 | "servers" => [ 36 | ["localhost", primary.port.to_i, "primary"], 37 | ["localhost", replica.port.to_i, "replica"], 38 | ] 39 | }, 40 | }, 41 | "users" => { "0" => user.merge(config_user) } 42 | } 43 | } 44 | pgcat_cfg["general"]["port"] = pgcat.port.to_i 45 | pgcat.update_config(pgcat_cfg) 46 | pgcat.start 47 | 48 | pgcat.wait_until_ready( 49 | pgcat.connection_string( 50 | "sharded_db", 51 | pg_user['username'], 52 | pg_user['password'] 53 | ) 54 | ) if wait_until_ready 55 | 56 | OpenStruct.new.tap do |struct| 57 | struct.pgcat = pgcat 58 | struct.primary = primary 59 | struct.replicas = [replica] 60 | struct.all_databases = [primary] 61 | end 62 | end 63 | 64 | def self.two_pools_auth_query( 65 | pg_user:, 66 | config_user:, 67 | pool_names:, 68 | extra_conf: {}, 69 | log_level: 'debug' 70 | ) 71 | 72 | user = { 73 | "pool_size" => 10, 74 | "statement_timeout" => 0, 75 | } 76 | 77 | pgcat = PgcatProcess.new(log_level) 78 | pgcat_cfg = pgcat.current_config 79 | 80 | primary = PgInstance.new(5432, pg_user["username"], pg_user["password"], "shard0") 81 | replica = PgInstance.new(10432, pg_user["username"], pg_user["password"], "shard0") 82 | 83 | pool_template = Proc.new do |database| 84 | { 85 | "default_role" => "any", 86 | "pool_mode" => "transaction", 87 | "load_balancing_mode" => "random", 88 | "primary_reads_enabled" => false, 89 | "query_parser_enabled" => false, 90 | "sharding_function" => "pg_bigint_hash", 91 | "shards" => { 92 | "0" => { 93 | "database" => database, 94 | "servers" => [ 95 | ["localhost", primary.port.to_i, "primary"], 96 | ["localhost", replica.port.to_i, "replica"], 97 | ] 98 | }, 99 | }, 100 | "users" => { "0" => user.merge(config_user) } 101 | } 102 | end 103 | # Main proxy configs 104 | pgcat_cfg["pools"] = { 105 | "#{pool_names[0]}" => pool_template.call("shard0"), 106 | "#{pool_names[1]}" => pool_template.call("shard1") 107 | } 108 | 109 | pgcat_cfg["general"]["port"] = pgcat.port 110 | pgcat.update_config(pgcat_cfg.deep_merge(extra_conf)) 111 | pgcat.start 112 | 113 | pgcat.wait_until_ready(pgcat.connection_string("sharded_db0", pg_user['username'], pg_user['password'])) 114 | 115 | OpenStruct.new.tap do |struct| 116 | struct.pgcat = pgcat 117 | struct.primary = primary 118 | struct.replicas = [replica] 119 | struct.all_databases = [primary] 120 | end 121 | end 122 | 123 | def self.create_query_auth_function(user) 124 | return <<-SQL 125 | CREATE OR REPLACE FUNCTION public.user_lookup(in i_username text, out uname text, out phash text) 126 | RETURNS record AS $$ 127 | BEGIN 128 | SELECT usename, passwd FROM pg_catalog.pg_shadow 129 | WHERE usename = i_username INTO uname, phash; 130 | RETURN; 131 | END; 132 | $$ LANGUAGE plpgsql SECURITY DEFINER; 133 | 134 | GRANT EXECUTE ON FUNCTION public.user_lookup(text) TO #{user}; 135 | SQL 136 | end 137 | 138 | def self.exec_in_instances(query:, instance_ports: [ 5432, 10432 ], database: 'postgres', user: 'postgres', password: 'postgres') 139 | instance_ports.each do |port| 140 | c = PG.connect("postgres://#{user}:#{password}@localhost:#{port}/#{database}") 141 | c.exec(query) 142 | c.close 143 | end 144 | end 145 | 146 | def self.set_up_auth_query_for_user(user:, password:, instance_ports: [ 5432, 10432 ], database: 'shard0' ) 147 | instance_ports.each do |port| 148 | connection = PG.connect("postgres://postgres:postgres@localhost:#{port}/#{database}") 149 | connection.exec(self.drop_query_auth_function(user)) rescue PG::UndefinedFunction 150 | connection.exec("DROP ROLE #{user}") rescue PG::UndefinedObject 151 | connection.exec("CREATE ROLE #{user} ENCRYPTED PASSWORD '#{password}' LOGIN;") 152 | connection.exec(self.create_query_auth_function(user)) 153 | connection.close 154 | end 155 | end 156 | 157 | def self.tear_down_auth_query_for_user(user:, password:, instance_ports: [ 5432, 10432 ], database: 'shard0' ) 158 | instance_ports.each do |port| 159 | connection = PG.connect("postgres://postgres:postgres@localhost:#{port}/#{database}") 160 | connection.exec(self.drop_query_auth_function(user)) rescue PG::UndefinedFunction 161 | connection.exec("DROP ROLE #{user}") 162 | connection.close 163 | end 164 | end 165 | 166 | def self.drop_query_auth_function(user) 167 | return <<-SQL 168 | REVOKE ALL ON FUNCTION public.user_lookup(text) FROM public, #{user}; 169 | DROP FUNCTION public.user_lookup(in i_username text, out uname text, out phash text); 170 | SQL 171 | end 172 | end 173 | end 174 | -------------------------------------------------------------------------------- /tests/ruby/helpers/pg_instance.rb: -------------------------------------------------------------------------------- 1 | require 'pg' 2 | require 'toxiproxy' 3 | 4 | class PgInstance 5 | attr_reader :port 6 | attr_reader :username 7 | attr_reader :password 8 | attr_reader :database_name 9 | 10 | def self.mass_takedown(databases) 11 | raise StandardError "block missing" unless block_given? 12 | 13 | databases.each do |database| 14 | database.toxiproxy.toxic(:limit_data, bytes: 1).toxics.each(&:save) 15 | end 16 | sleep 0.1 17 | yield 18 | ensure 19 | databases.each do |database| 20 | database.toxiproxy.toxics.each(&:destroy) 21 | end 22 | end 23 | 24 | def initialize(port, username, password, database_name) 25 | @original_port = port.to_i 26 | @toxiproxy_port = 10000 + port.to_i 27 | @port = @toxiproxy_port.to_i 28 | 29 | @username = username 30 | @password = password 31 | @database_name = database_name 32 | @toxiproxy_name = "database_#{@original_port}" 33 | Toxiproxy.populate([{ 34 | name: @toxiproxy_name, 35 | listen: "0.0.0.0:#{@toxiproxy_port}", 36 | upstream: "localhost:#{@original_port}", 37 | }]) 38 | 39 | # Toxiproxy server will outlive our PgInstance objects 40 | # so we want to destroy our proxies before exiting 41 | # Ruby finalizer is ideal for doing this 42 | ObjectSpace.define_finalizer(@toxiproxy_name, proc { Toxiproxy[@toxiproxy_name].destroy }) 43 | end 44 | 45 | def with_connection 46 | conn = PG.connect("postgres://#{@username}:#{@password}@localhost:#{port}/#{database_name}") 47 | yield conn 48 | ensure 49 | conn&.close 50 | end 51 | 52 | def reset 53 | reset_toxics 54 | reset_stats 55 | drop_connections 56 | sleep 0.1 57 | end 58 | 59 | def toxiproxy 60 | Toxiproxy[@toxiproxy_name] 61 | end 62 | 63 | def take_down 64 | if block_given? 65 | Toxiproxy[@toxiproxy_name].toxic(:limit_data, bytes: 1).apply { yield } 66 | else 67 | Toxiproxy[@toxiproxy_name].toxic(:limit_data, bytes: 1).toxics.each(&:save) 68 | end 69 | end 70 | 71 | def add_latency(latency) 72 | if block_given? 73 | Toxiproxy[@toxiproxy_name].toxic(:latency, latency: latency).apply { yield } 74 | else 75 | Toxiproxy[@toxiproxy_name].toxic(:latency, latency: latency).toxics.each(&:save) 76 | end 77 | end 78 | 79 | def delete_proxy 80 | Toxiproxy[@toxiproxy_name].delete 81 | end 82 | 83 | def reset_toxics 84 | Toxiproxy[@toxiproxy_name].toxics.each(&:destroy) 85 | sleep 0.1 86 | end 87 | 88 | def reset_stats 89 | with_connection { |c| c.async_exec("SELECT pg_stat_statements_reset()") } 90 | end 91 | 92 | def drop_connections 93 | username = with_connection { |c| c.async_exec("SELECT current_user")[0]["current_user"] } 94 | with_connection { |c| c.async_exec("SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE pid <> pg_backend_pid() AND usename='#{username}'") } 95 | end 96 | 97 | def count_connections 98 | with_connection { |c| c.async_exec("SELECT COUNT(*) as count FROM pg_stat_activity")[0]["count"].to_i } 99 | end 100 | 101 | def count_query(query) 102 | with_connection { |c| c.async_exec("SELECT SUM(calls) FROM pg_stat_statements WHERE query = '#{query}'")[0]["sum"].to_i } 103 | end 104 | 105 | def count_select_1_plus_2 106 | with_connection { |c| c.async_exec("SELECT SUM(calls) FROM pg_stat_statements WHERE query LIKE '%SELECT $1 + $2%'")[0]["sum"].to_i } 107 | end 108 | end 109 | -------------------------------------------------------------------------------- /tests/ruby/helpers/pgcat_helper.rb: -------------------------------------------------------------------------------- 1 | require 'json' 2 | require 'ostruct' 3 | require_relative 'pgcat_process' 4 | require_relative 'pg_instance' 5 | require_relative 'pg_socket' 6 | 7 | class ::Hash 8 | def deep_merge(second) 9 | merger = proc { |key, v1, v2| Hash === v1 && Hash === v2 ? v1.merge(v2, &merger) : v2 } 10 | self.merge(second, &merger) 11 | end 12 | end 13 | 14 | module Helpers 15 | module Pgcat 16 | def self.three_shard_setup(pool_name, pool_size, pool_mode="transaction", lb_mode="random", log_level="info") 17 | user = { 18 | "password" => "sharding_user", 19 | "pool_size" => pool_size, 20 | "statement_timeout" => 0, 21 | "username" => "sharding_user" 22 | } 23 | 24 | pgcat = PgcatProcess.new(log_level) 25 | primary0 = PgInstance.new(5432, user["username"], user["password"], "shard0") 26 | primary1 = PgInstance.new(7432, user["username"], user["password"], "shard1") 27 | primary2 = PgInstance.new(8432, user["username"], user["password"], "shard2") 28 | 29 | pgcat_cfg = pgcat.current_config 30 | pgcat_cfg["pools"] = { 31 | "#{pool_name}" => { 32 | "default_role" => "any", 33 | "pool_mode" => pool_mode, 34 | "load_balancing_mode" => lb_mode, 35 | "primary_reads_enabled" => true, 36 | "query_parser_enabled" => true, 37 | "query_parser_read_write_splitting" => true, 38 | "automatic_sharding_key" => "data.id", 39 | "sharding_function" => "pg_bigint_hash", 40 | "shards" => { 41 | "0" => { "database" => "shard0", "servers" => [["localhost", primary0.port.to_i, "primary"]] }, 42 | "1" => { "database" => "shard1", "servers" => [["localhost", primary1.port.to_i, "primary"]] }, 43 | "2" => { "database" => "shard2", "servers" => [["localhost", primary2.port.to_i, "primary"]] }, 44 | }, 45 | "users" => { "0" => user }, 46 | "plugins" => { 47 | "intercept" => { 48 | "enabled" => true, 49 | "queries" => { 50 | "0" => { 51 | "query" => "select current_database() as a, current_schemas(false) as b", 52 | "schema" => [ 53 | ["a", "text"], 54 | ["b", "text"], 55 | ], 56 | "result" => [ 57 | ["${DATABASE}", "{public}"], 58 | ] 59 | } 60 | } 61 | } 62 | } 63 | } 64 | } 65 | pgcat.update_config(pgcat_cfg) 66 | 67 | pgcat.start 68 | pgcat.wait_until_ready 69 | 70 | OpenStruct.new.tap do |struct| 71 | struct.pgcat = pgcat 72 | struct.shards = [primary0, primary1, primary2] 73 | struct.all_databases = [primary0, primary1, primary2] 74 | end 75 | end 76 | 77 | def self.single_instance_setup(pool_name, pool_size, pool_mode="transaction", lb_mode="random", log_level="trace") 78 | user = { 79 | "password" => "sharding_user", 80 | "pool_size" => pool_size, 81 | "statement_timeout" => 0, 82 | "username" => "sharding_user" 83 | } 84 | 85 | pgcat = PgcatProcess.new(log_level) 86 | pgcat_cfg = pgcat.current_config 87 | 88 | primary = PgInstance.new(5432, user["username"], user["password"], "shard0") 89 | 90 | # Main proxy configs 91 | pgcat_cfg["pools"] = { 92 | "#{pool_name}" => { 93 | "default_role" => "primary", 94 | "pool_mode" => pool_mode, 95 | "load_balancing_mode" => lb_mode, 96 | "primary_reads_enabled" => false, 97 | "query_parser_enabled" => false, 98 | "sharding_function" => "pg_bigint_hash", 99 | "shards" => { 100 | "0" => { 101 | "database" => "shard0", 102 | "servers" => [ 103 | ["localhost", primary.port.to_i, "primary"] 104 | ] 105 | }, 106 | }, 107 | "users" => { "0" => user } 108 | } 109 | } 110 | pgcat_cfg["general"]["port"] = pgcat.port 111 | pgcat.update_config(pgcat_cfg) 112 | pgcat.start 113 | pgcat.wait_until_ready 114 | 115 | OpenStruct.new.tap do |struct| 116 | struct.pgcat = pgcat 117 | struct.primary = primary 118 | struct.all_databases = [primary] 119 | end 120 | end 121 | 122 | def self.single_shard_setup(pool_name, pool_size, pool_mode="transaction", lb_mode="random", log_level="info", pool_settings={}) 123 | user = { 124 | "password" => "sharding_user", 125 | "pool_size" => pool_size, 126 | "statement_timeout" => 0, 127 | "username" => "sharding_user" 128 | } 129 | 130 | pgcat = PgcatProcess.new(log_level) 131 | pgcat_cfg = pgcat.current_config 132 | 133 | primary = PgInstance.new(5432, user["username"], user["password"], "shard0") 134 | replica0 = PgInstance.new(7432, user["username"], user["password"], "shard0") 135 | replica1 = PgInstance.new(8432, user["username"], user["password"], "shard0") 136 | replica2 = PgInstance.new(9432, user["username"], user["password"], "shard0") 137 | 138 | pool_config = { 139 | "default_role" => "any", 140 | "pool_mode" => pool_mode, 141 | "load_balancing_mode" => lb_mode, 142 | "primary_reads_enabled" => false, 143 | "query_parser_enabled" => false, 144 | "sharding_function" => "pg_bigint_hash", 145 | "shards" => { 146 | "0" => { 147 | "database" => "shard0", 148 | "servers" => [ 149 | ["localhost", primary.port.to_i, "primary"], 150 | ["localhost", replica0.port.to_i, "replica"], 151 | ["localhost", replica1.port.to_i, "replica"], 152 | ["localhost", replica2.port.to_i, "replica"] 153 | ] 154 | }, 155 | }, 156 | "users" => { "0" => user } 157 | } 158 | 159 | pool_config = pool_config.merge(pool_settings) 160 | 161 | # Main proxy configs 162 | pgcat_cfg["pools"] = { 163 | "#{pool_name}" => pool_config, 164 | } 165 | pgcat_cfg["general"]["port"] = pgcat.port 166 | pgcat.update_config(pgcat_cfg) 167 | pgcat.start 168 | pgcat.wait_until_ready 169 | 170 | OpenStruct.new.tap do |struct| 171 | struct.pgcat = pgcat 172 | struct.primary = primary 173 | struct.replicas = [replica0, replica1, replica2] 174 | struct.all_databases = [primary, replica0, replica1, replica2] 175 | end 176 | end 177 | end 178 | end 179 | -------------------------------------------------------------------------------- /tests/ruby/helpers/pgcat_process.rb: -------------------------------------------------------------------------------- 1 | require 'pg' 2 | require 'json' 3 | require 'tempfile' 4 | require 'fileutils' 5 | require 'securerandom' 6 | 7 | class ConfigReloadFailed < StandardError; end 8 | class PgcatProcess 9 | attr_reader :port 10 | attr_reader :pid 11 | 12 | def self.finalize(pid, log_filename, config_filename) 13 | if pid 14 | Process.kill("TERM", pid) 15 | Process.wait(pid) 16 | end 17 | 18 | File.delete(config_filename) if File.exist?(config_filename) 19 | File.delete(log_filename) if File.exist?(log_filename) 20 | end 21 | 22 | def initialize(log_level) 23 | @env = {} 24 | @port = rand(20000..32760) 25 | @log_level = log_level 26 | @log_filename = "/tmp/pgcat_log_#{SecureRandom.urlsafe_base64}.log" 27 | @config_filename = "/tmp/pgcat_cfg_#{SecureRandom.urlsafe_base64}.toml" 28 | 29 | command_path = if ENV['CARGO_TARGET_DIR'] then 30 | "#{ENV['CARGO_TARGET_DIR']}/debug/pgcat" 31 | else 32 | '../../target/debug/pgcat' 33 | end 34 | 35 | @command = "#{command_path} #{@config_filename} --log-level #{@log_level}" 36 | 37 | FileUtils.cp("../../pgcat.toml", @config_filename) 38 | cfg = current_config 39 | cfg["general"]["port"] = @port.to_i 40 | cfg["general"]["enable_prometheus_exporter"] = false 41 | 42 | update_config(cfg) 43 | end 44 | 45 | def logs 46 | File.read(@log_filename) 47 | end 48 | 49 | def update_config(config_hash) 50 | @original_config = current_config 51 | Tempfile.create('json_out', '/tmp') do |f| 52 | f.write(config_hash.to_json) 53 | f.flush 54 | `cat #{f.path} | yj -jt > #{@config_filename}` 55 | end 56 | end 57 | 58 | def current_config 59 | JSON.parse(`cat #{@config_filename} | yj -tj`) 60 | end 61 | 62 | def raw_config_file 63 | File.read(@config_filename) 64 | end 65 | 66 | def reload_config 67 | conn = PG.connect(admin_connection_string) 68 | 69 | conn.async_exec("RELOAD") 70 | rescue PG::ConnectionBad => e 71 | errors = logs.split("Reloading config").last 72 | errors = errors.gsub(/\e\[([;\d]+)?m/, '') # Remove color codes 73 | errors = errors. 74 | split("\n").select{|line| line.include?("ERROR") }. 75 | map { |line| line.split("pgcat::config: ").last } 76 | raise ConfigReloadFailed, errors.join("\n") 77 | ensure 78 | conn&.close 79 | end 80 | 81 | def start 82 | raise StandardError, "Process is already started" unless @pid.nil? 83 | @pid = Process.spawn(@env, @command, err: @log_filename, out: @log_filename) 84 | Process.detach(@pid) 85 | ObjectSpace.define_finalizer(@log_filename, proc { PgcatProcess.finalize(@pid, @log_filename, @config_filename) }) 86 | 87 | return self 88 | end 89 | 90 | def wait_until_ready(connection_string = nil) 91 | exc = nil 92 | 10.times do 93 | Process.kill 0, @pid 94 | PG::connect(connection_string || example_connection_string).close 95 | 96 | return self 97 | rescue Errno::ESRCH 98 | raise StandardError, "Process #{@pid} died. #{logs}" 99 | rescue => e 100 | exc = e 101 | sleep(0.5) 102 | end 103 | puts exc 104 | raise StandardError, "Process #{@pid} never became ready. Logs #{logs}" 105 | end 106 | 107 | def stop 108 | return unless @pid 109 | 110 | Process.kill("TERM", @pid) 111 | Process.wait(@pid) 112 | @pid = nil 113 | end 114 | 115 | def shutdown 116 | stop 117 | File.delete(@config_filename) if File.exist?(@config_filename) 118 | File.delete(@log_filename) if File.exist?(@log_filename) 119 | end 120 | 121 | def admin_connection_string 122 | cfg = current_config 123 | username = cfg["general"]["admin_username"] 124 | password = cfg["general"]["admin_password"] 125 | 126 | "postgresql://#{username}:#{password}@0.0.0.0:#{@port}/pgcat" 127 | end 128 | 129 | def connection_string(pool_name, username, password = nil, parameters: {}) 130 | cfg = current_config 131 | user_idx, user_obj = cfg["pools"][pool_name]["users"].detect { |k, user| user["username"] == username } 132 | connection_string = "postgresql://#{username}:#{password || user_obj["password"]}@0.0.0.0:#{@port}/#{pool_name}" 133 | 134 | # Add the additional parameters to the connection string 135 | parameter_string = parameters.map { |key, value| "#{key}=#{value}" }.join("&") 136 | connection_string += "?#{parameter_string}" unless parameter_string.empty? 137 | 138 | connection_string 139 | end 140 | 141 | def example_connection_string 142 | cfg = current_config 143 | first_pool_name = cfg["pools"].keys[0] 144 | 145 | db_name = first_pool_name 146 | 147 | username = cfg["pools"][first_pool_name]["users"]["0"]["username"] 148 | password = cfg["pools"][first_pool_name]["users"]["0"]["password"] 149 | 150 | "postgresql://#{username}:#{password}@0.0.0.0:#{@port}/#{db_name}?application_name=example_app" 151 | end 152 | end 153 | -------------------------------------------------------------------------------- /tests/ruby/mirrors_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | require 'uri' 3 | require_relative 'spec_helper' 4 | 5 | describe "Query Mirroing" do 6 | let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 10) } 7 | let(:mirror_pg) { PgInstance.new(8432, "sharding_user", "sharding_user", "shard2")} 8 | let(:pgcat_conn_str) { processes.pgcat.connection_string("sharded_db", "sharding_user") } 9 | let(:mirror_host) { "localhost" } 10 | 11 | before do 12 | new_configs = processes.pgcat.current_config 13 | new_configs["pools"]["sharded_db"]["shards"]["0"]["mirrors"] = [ 14 | [mirror_host, mirror_pg.port.to_i, 0], 15 | [mirror_host, mirror_pg.port.to_i, 0], 16 | [mirror_host, mirror_pg.port.to_i, 0], 17 | ] 18 | processes.pgcat.update_config(new_configs) 19 | processes.pgcat.reload_config 20 | end 21 | 22 | after do 23 | processes.all_databases.map(&:reset) 24 | mirror_pg.reset 25 | processes.pgcat.shutdown 26 | end 27 | 28 | xit "can mirror a query" do 29 | conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) 30 | runs = 15 31 | runs.times { conn.async_exec("SELECT 1 + 2") } 32 | sleep 0.5 33 | expect(processes.all_databases.first.count_select_1_plus_2).to eq(runs) 34 | # Allow some slack in mirroring successes 35 | expect(mirror_pg.count_select_1_plus_2).to be > ((runs - 5) * 3) 36 | end 37 | 38 | context "when main server connection is closed" do 39 | it "closes the mirror connection" do 40 | baseline_count = processes.all_databases.first.count_connections 41 | 5.times do |i| 42 | # Force pool cycling to detect zombie mirror connections 43 | new_configs = processes.pgcat.current_config 44 | new_configs["pools"]["sharded_db"]["idle_timeout"] = 5000 + i 45 | new_configs["pools"]["sharded_db"]["shards"]["0"]["mirrors"] = [ 46 | [mirror_host, mirror_pg.port.to_i, 0], 47 | [mirror_host, mirror_pg.port.to_i, 0], 48 | [mirror_host, mirror_pg.port.to_i, 0], 49 | ] 50 | processes.pgcat.update_config(new_configs) 51 | processes.pgcat.reload_config 52 | end 53 | conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) 54 | conn.async_exec("SELECT 1 + 2") 55 | sleep 0.5 56 | # Expect same number of connection even after pool cycling 57 | expect(processes.all_databases.first.count_connections).to be < baseline_count + 2 58 | end 59 | end 60 | 61 | xcontext "when mirror server goes down temporarily" do 62 | it "continues to transmit queries after recovery" do 63 | conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) 64 | mirror_pg.take_down do 65 | conn.async_exec("SELECT 1 + 2") 66 | sleep 0.1 67 | end 68 | 10.times { conn.async_exec("SELECT 1 + 2") } 69 | sleep 1 70 | expect(mirror_pg.count_select_1_plus_2).to be >= 2 71 | end 72 | end 73 | 74 | context "when a mirror is down" do 75 | let(:mirror_host) { "badhost" } 76 | 77 | it "does not fail to send the main query" do 78 | conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) 79 | # No Errors here 80 | conn.async_exec("SELECT 1 + 2") 81 | expect(processes.all_databases.first.count_select_1_plus_2).to eq(1) 82 | end 83 | 84 | it "does not fail to send the main query (even after thousands of mirror attempts)" do 85 | conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) 86 | # No Errors here 87 | 1000.times { conn.async_exec("SELECT 1 + 2") } 88 | expect(processes.all_databases.first.count_select_1_plus_2).to eq(1000) 89 | end 90 | end 91 | end 92 | -------------------------------------------------------------------------------- /tests/ruby/plugins_spec.rb: -------------------------------------------------------------------------------- 1 | require_relative 'spec_helper' 2 | 3 | 4 | describe "Plugins" do 5 | let(:processes) { Helpers::Pgcat.three_shard_setup("sharded_db", 5) } 6 | 7 | context "intercept" do 8 | it "will intercept an intellij query" do 9 | conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) 10 | res = conn.exec("select current_database() as a, current_schemas(false) as b") 11 | expect(res.values).to eq([["sharded_db", "{public}"]]) 12 | end 13 | end 14 | end 15 | -------------------------------------------------------------------------------- /tests/ruby/protocol_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | require_relative 'spec_helper' 3 | 4 | 5 | describe "Portocol handling" do 6 | let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 1, "session") } 7 | let(:sequence) { [] } 8 | let(:pgcat_socket) { PostgresSocket.new('localhost', processes.pgcat.port) } 9 | let(:pgdb_socket) { PostgresSocket.new('localhost', processes.all_databases.first.port) } 10 | 11 | after do 12 | pgdb_socket.close 13 | pgcat_socket.close 14 | processes.all_databases.map(&:reset) 15 | processes.pgcat.shutdown 16 | end 17 | 18 | def run_comparison(sequence, socket_a, socket_b) 19 | sequence.each do |msg, *args| 20 | socket_a.send(msg, *args) 21 | socket_b.send(msg, *args) 22 | 23 | compare_messages( 24 | socket_a.read_from_server, 25 | socket_b.read_from_server 26 | ) 27 | end 28 | end 29 | 30 | def compare_messages(msg_arr0, msg_arr1) 31 | if msg_arr0.count != msg_arr1.count 32 | error_output = [] 33 | 34 | error_output << "#{msg_arr0.count} : #{msg_arr1.count}" 35 | error_output << "PgCat Messages" 36 | error_output += msg_arr0.map { |message| "\t#{message[:code]} - #{message[:bytes].map(&:chr).join(" ")}" } 37 | error_output << "PgServer Messages" 38 | error_output += msg_arr1.map { |message| "\t#{message[:code]} - #{message[:bytes].map(&:chr).join(" ")}" } 39 | error_desc = error_output.join("\n") 40 | raise StandardError, "Message count mismatch #{error_desc}" 41 | end 42 | 43 | (0..msg_arr0.count - 1).all? do |i| 44 | msg0 = msg_arr0[i] 45 | msg1 = msg_arr1[i] 46 | 47 | result = [ 48 | msg0[:code] == msg1[:code], 49 | msg0[:len] == msg1[:len], 50 | msg0[:bytes] == msg1[:bytes], 51 | ].all? 52 | 53 | next result if result 54 | 55 | if result == false 56 | error_string = [] 57 | if msg0[:code] != msg1[:code] 58 | error_string << "code #{msg0[:code]} != #{msg1[:code]}" 59 | end 60 | if msg0[:len] != msg1[:len] 61 | error_string << "len #{msg0[:len]} != #{msg1[:len]}" 62 | end 63 | if msg0[:bytes] != msg1[:bytes] 64 | error_string << "bytes #{msg0[:bytes]} != #{msg1[:bytes]}" 65 | end 66 | err = error_string.join("\n") 67 | 68 | raise StandardError, "Message mismatch #{err}" 69 | end 70 | end 71 | end 72 | 73 | RSpec.shared_examples "at parity with database" do 74 | before do 75 | pgcat_socket.send_startup_message("sharding_user", "sharded_db", "sharding_user") 76 | pgdb_socket.send_startup_message("sharding_user", "shard0", "sharding_user") 77 | end 78 | 79 | it "works" do 80 | run_comparison(sequence, pgcat_socket, pgdb_socket) 81 | end 82 | end 83 | 84 | context "Cancel Query" do 85 | let(:sequence) { 86 | [ 87 | [:send_query_message, "SELECT pg_sleep(5)"], 88 | [:cancel_query] 89 | ] 90 | } 91 | 92 | it_behaves_like "at parity with database" 93 | end 94 | 95 | xcontext "Simple query after parse" do 96 | let(:sequence) { 97 | [ 98 | [:send_parse_message, "SELECT 5"], 99 | [:send_query_message, "SELECT 1"], 100 | [:send_bind_message], 101 | [:send_describe_message, "P"], 102 | [:send_execute_message], 103 | [:send_sync_message], 104 | ] 105 | } 106 | 107 | # Known to fail due to PgCat not supporting flush 108 | it_behaves_like "at parity with database" 109 | end 110 | 111 | xcontext "Flush message" do 112 | let(:sequence) { 113 | [ 114 | [:send_parse_message, "SELECT 1"], 115 | [:send_flush_message] 116 | ] 117 | } 118 | 119 | # Known to fail due to PgCat not supporting flush 120 | it_behaves_like "at parity with database" 121 | end 122 | 123 | xcontext "Bind without parse" do 124 | let(:sequence) { 125 | [ 126 | [:send_bind_message] 127 | ] 128 | } 129 | # This is known to fail. 130 | # Server responds immediately, Proxy buffers the message 131 | it_behaves_like "at parity with database" 132 | end 133 | 134 | context "Simple message" do 135 | let(:sequence) { 136 | [[:send_query_message, "SELECT 1"]] 137 | } 138 | 139 | it_behaves_like "at parity with database" 140 | end 141 | 142 | context "Extended protocol" do 143 | let(:sequence) { 144 | [ 145 | [:send_parse_message, "SELECT 1"], 146 | [:send_bind_message], 147 | [:send_describe_message, "P"], 148 | [:send_execute_message], 149 | [:send_sync_message], 150 | ] 151 | } 152 | 153 | it_behaves_like "at parity with database" 154 | end 155 | end 156 | -------------------------------------------------------------------------------- /tests/ruby/routing_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | require_relative 'spec_helper' 3 | 4 | 5 | describe "Routing" do 6 | let(:processes) { Helpers::Pgcat.single_shard_setup("sharded_db", 5) } 7 | after do 8 | processes.all_databases.map(&:reset) 9 | processes.pgcat.shutdown 10 | end 11 | 12 | describe "SET ROLE" do 13 | context "primary" do 14 | it "routes queries only to primary" do 15 | conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) 16 | conn.async_exec("SET SERVER ROLE to 'primary'") 17 | 18 | query_count = 30 19 | failed_count = 0 20 | 21 | query_count.times do 22 | conn.async_exec("SELECT 1 + 2") 23 | rescue 24 | failed_count += 1 25 | end 26 | 27 | expect(failed_count).to eq(0) 28 | processes.replicas.map(&:count_select_1_plus_2).each do |instance_share| 29 | expect(instance_share).to eq(0) 30 | end 31 | 32 | expect(processes.primary.count_select_1_plus_2).to eq(query_count) 33 | end 34 | end 35 | context "replica" do 36 | it "routes queries only to replicas" do 37 | conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) 38 | conn.async_exec("SET SERVER ROLE to 'replica'") 39 | 40 | expected_share = QUERY_COUNT / processes.replicas.count 41 | failed_count = 0 42 | 43 | QUERY_COUNT.times do 44 | conn.async_exec("SELECT 1 + 2") 45 | rescue 46 | failed_count += 1 47 | end 48 | 49 | expect(failed_count).to eq(0) 50 | 51 | processes.replicas.map(&:count_select_1_plus_2).each do |instance_share| 52 | expect(instance_share).to be_within(expected_share * MARGIN_OF_ERROR).of(expected_share) 53 | end 54 | 55 | expect(processes.primary.count_select_1_plus_2).to eq(0) 56 | end 57 | end 58 | 59 | context "any" do 60 | it "routes queries to all instances" do 61 | conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) 62 | conn.async_exec("SET SERVER ROLE to 'any'") 63 | 64 | expected_share = QUERY_COUNT / processes.all_databases.count 65 | failed_count = 0 66 | 67 | QUERY_COUNT.times do 68 | conn.async_exec("SELECT 1 + 2") 69 | rescue 70 | failed_count += 1 71 | end 72 | 73 | expect(failed_count).to eq(0) 74 | 75 | processes.all_databases.map(&:count_select_1_plus_2).each do |instance_share| 76 | expect(instance_share).to be_within(expected_share * MARGIN_OF_ERROR).of(expected_share) 77 | end 78 | end 79 | end 80 | end 81 | end 82 | -------------------------------------------------------------------------------- /tests/ruby/spec_helper.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'pg' 4 | require_relative 'helpers/pgcat_helper' 5 | 6 | QUERY_COUNT = 300 7 | MARGIN_OF_ERROR = 0.35 8 | 9 | def with_captured_stdout_stderr 10 | sout = STDOUT.clone 11 | serr = STDERR.clone 12 | STDOUT.reopen("/tmp/out.txt", "w+") 13 | STDERR.reopen("/tmp/err.txt", "w+") 14 | STDOUT.sync = true 15 | STDERR.sync = true 16 | yield 17 | return File.read('/tmp/out.txt'), File.read('/tmp/err.txt') 18 | ensure 19 | STDOUT.reopen(sout) 20 | STDERR.reopen(serr) 21 | end 22 | 23 | def clients_connected_to_pool(pool_index: 0, processes:) 24 | admin_conn = PG::connect(processes.pgcat.admin_connection_string) 25 | results = admin_conn.async_exec("SHOW POOLS")[pool_index] 26 | admin_conn.close 27 | results['cl_idle'].to_i + results['cl_active'].to_i + results['cl_waiting'].to_i 28 | end 29 | -------------------------------------------------------------------------------- /tests/ruby/tests.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | require 'pg' 3 | require 'active_record' 4 | 5 | # Uncomment these two to see all queries. 6 | # ActiveRecord.verbose_query_logs = true 7 | # ActiveRecord::Base.logger = Logger.new(STDOUT) 8 | 9 | ActiveRecord::Base.establish_connection( 10 | adapter: 'postgresql', 11 | host: '127.0.0.1', 12 | port: 6432, 13 | username: 'sharding_user', 14 | password: 'sharding_user', 15 | database: 'sharded_db', 16 | application_name: 'testing_pgcat', 17 | prepared_statements: false, # Transaction mode 18 | advisory_locks: false # Same 19 | ) 20 | 21 | class TestSafeTable < ActiveRecord::Base 22 | self.table_name = 'test_safe_table' 23 | end 24 | 25 | class ShouldNeverHappenException < RuntimeError 26 | end 27 | 28 | class CreateSafeShardedTable < ActiveRecord::Migration[7.0] 29 | # Disable transasctions or things will fly out of order! 30 | disable_ddl_transaction! 31 | 32 | SHARDS = 3 33 | 34 | def up 35 | SHARDS.times do |x| 36 | # This will make this migration reversible! 37 | connection.execute "SET SHARD TO '#{x.to_i}'" 38 | connection.execute "SET SERVER ROLE TO 'primary'" 39 | 40 | connection.execute <<-SQL 41 | CREATE TABLE test_safe_table ( 42 | id BIGINT PRIMARY KEY, 43 | name VARCHAR, 44 | description TEXT 45 | ) PARTITION BY HASH (id); 46 | 47 | CREATE TABLE test_safe_table_data PARTITION OF test_safe_table 48 | FOR VALUES WITH (MODULUS #{SHARDS.to_i}, REMAINDER #{x.to_i}); 49 | SQL 50 | end 51 | end 52 | 53 | def down 54 | SHARDS.times do |x| 55 | connection.execute "SET SHARD TO '#{x.to_i}'" 56 | connection.execute "SET SERVER ROLE TO 'primary'" 57 | connection.execute 'DROP TABLE test_safe_table CASCADE' 58 | end 59 | end 60 | end 61 | 62 | SHARDS = 3 63 | 64 | 2.times do 65 | begin 66 | CreateSafeShardedTable.migrate(:down) 67 | rescue Exception 68 | puts "Tables don't exist yet" 69 | end 70 | 71 | CreateSafeShardedTable.migrate(:up) 72 | 73 | SHARDS.times do |x| 74 | TestSafeTable.connection.execute "SET SHARD TO '#{x.to_i}'" 75 | TestSafeTable.connection.execute "SET SERVER ROLE TO 'primary'" 76 | TestSafeTable.connection.execute "TRUNCATE #{TestSafeTable.table_name}" 77 | end 78 | 79 | # Equivalent to Makara's stick_to_master! except it sticks until it's changed. 80 | TestSafeTable.connection.execute "SET SERVER ROLE TO 'primary'" 81 | 82 | 200.times do |x| 83 | x += 1 # Postgres ids start at 1 84 | TestSafeTable.connection.execute "SET SHARDING KEY TO '#{x.to_i}'" 85 | TestSafeTable.create(id: x, name: "something_special_#{x.to_i}", description: "It's a surprise!") 86 | end 87 | 88 | TestSafeTable.connection.execute "SET SERVER ROLE TO 'replica'" 89 | 90 | 100.times do |x| 91 | x += 1 # 0 confuses our sharding function 92 | TestSafeTable.connection.execute "SET SHARDING KEY TO '#{x.to_i}'" 93 | TestSafeTable.find_by_id(x).id 94 | end 95 | 96 | # Will use the query parser to direct reads to replicas 97 | TestSafeTable.connection.execute "SET SERVER ROLE TO 'auto'" 98 | 99 | 100.times do |x| 100 | x += 101 101 | TestSafeTable.connection.execute "SET SHARDING KEY TO '#{x.to_i}'" 102 | TestSafeTable.find_by_id(x).id 103 | end 104 | end 105 | 106 | # Test wrong shard 107 | TestSafeTable.connection.execute "SET SHARD TO '1'" 108 | begin 109 | TestSafeTable.create(id: 5, name: 'test', description: 'test description') 110 | raise ShouldNeverHappenException('Uh oh') 111 | rescue ActiveRecord::StatementInvalid 112 | puts 'OK' 113 | end 114 | -------------------------------------------------------------------------------- /tests/rust/.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | -------------------------------------------------------------------------------- /tests/rust/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rust" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | sqlx = { version = "0.6.2", features = [ "runtime-tokio-rustls", "postgres", "json", "tls", "migrate", "time", "uuid", "ipnetwork"] } 10 | tokio = { version = "1", features = ["full"] } 11 | -------------------------------------------------------------------------------- /tests/rust/src/main.rs: -------------------------------------------------------------------------------- 1 | #[tokio::main] 2 | async fn main() { 3 | test_prepared_statements().await; 4 | } 5 | 6 | async fn test_prepared_statements() { 7 | let pool = sqlx::postgres::PgPoolOptions::new() 8 | .max_connections(5) 9 | .connect("postgres://sharding_user:sharding_user@127.0.0.1:6432/sharded_db") 10 | .await 11 | .unwrap(); 12 | 13 | let mut handles = Vec::new(); 14 | 15 | for _ in 0..5 { 16 | let pool = pool.clone(); 17 | let handle = tokio::task::spawn(async move { 18 | for i in 0..1000 { 19 | match sqlx::query(&format!("SELECT {:?}", i % 5)).fetch_all(&pool).await { 20 | Ok(_) => (), 21 | Err(err) => { 22 | panic!("prepared statement error: {}", err); 23 | } 24 | } 25 | } 26 | }); 27 | 28 | handles.push(handle); 29 | } 30 | 31 | for handle in handles { 32 | handle.await.unwrap(); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /tests/sharding/README.md: -------------------------------------------------------------------------------- 1 | # Sharding tests 2 | 3 | This helps us test the sharding algorithm we implemented. 4 | 5 | 6 | ## Setup 7 | 8 | We setup 3 Postgres DBs, `shard0`, `shard1`, and `shard2`. In each database, we create a partitioned table called `data`. The table is partitioned by hash, and each database will only have _one_ partition, `shard0` will satisfy `modulus 3, remainder 0`, `shard1` will satisfy `modulus 3, remainder 1`, etc. 9 | 10 | To set this up, you can just run: 11 | 12 | ```bash 13 | psql -f query_routing_setup.sql 14 | ``` 15 | 16 | ## Run the tests 17 | 18 | Start up PgCat by running `cargo run --release` in the root of the repo. In a different tab, run this: 19 | 20 | ```bash 21 | psql -h 127.0.0.1 -p 6432 -f query_routing_test_insert.sql 22 | psql -h 127.0.0.1 -p 6432 -f query_routing_test_select.sql 23 | ``` 24 | 25 | Note that no errors should take place. If our sharding logic was incorrect, we would get some errors 26 | about unsatisfiable partition bounds. We don't because the pooler picked the correct databases 27 | given the sharding keys. 28 | 29 | Finally, you can validate the result again by running 30 | 31 | ```bash 32 | psql -f query_routing_test_validate.sql 33 | ``` 34 | 35 | ## That's it! -------------------------------------------------------------------------------- /tests/sharding/partition_hash_test_setup.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS shards CASCADE; 2 | 3 | CREATE TABLE shards ( 4 | id BIGINT, 5 | value VARCHAR 6 | ) PARTITION BY HASH (id); 7 | 8 | -- DROP TABLE IF EXISTS shard_0; 9 | CREATE TABLE shard_0 PARTITION OF shards FOR VALUES WITH (MODULUS 5, REMAINDER 0); 10 | -- DROP TABLE IF EXISTS shard_1; 11 | CREATE TABLE shard_1 PARTITION OF shards FOR VALUES WITH (MODULUS 5, REMAINDER 1); 12 | -- DROP TABLE IF EXISTS shard_2; 13 | CREATE TABLE shard_2 PARTITION OF shards FOR VALUES WITH (MODULUS 5, REMAINDER 2); 14 | -- DROP TABLE IF EXISTS shard_3; 15 | CREATE TABLE shard_3 PARTITION OF shards FOR VALUES WITH (MODULUS 5, REMAINDER 3); 16 | -- DROP TABLE IF EXISTS shard_4; 17 | CREATE TABLE shard_4 PARTITION OF shards FOR VALUES WITH (MODULUS 5, REMAINDER 4); 18 | 19 | 20 | INSERT INTO shards SELECT generate_series(1, 500), 'value'; 21 | 22 | SELECT * FROM shard_0 ORDER BY id LIMIT 10; 23 | SELECT * FROM shard_1 ORDER BY id LIMIT 10; 24 | SELECT * FROM shard_2 ORDER BY id LIMIT 10; 25 | SELECT * FROM shard_3 ORDER BY id LIMIT 10; 26 | SELECT * FROM shard_4 ORDER BY id LIMIT 10; 27 | -------------------------------------------------------------------------------- /tests/sharding/query_routing.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | # Setup all the shards. 5 | # sudo service postgresql restart 6 | 7 | echo "Giving Postgres 5 seconds to start up..." 8 | 9 | # sleep 5 10 | 11 | # psql -f query_routing_setup.sql 12 | 13 | psql -h 127.0.0.1 -p 6432 -f query_routing_test_insert.sql 14 | 15 | psql -h 127.0.0.1 -p 6432 -f query_routing_test_select.sql 16 | 17 | psql -e -h 127.0.0.1 -p 6432 -f query_routing_test_primary_replica.sql 18 | 19 | psql -f query_routing_test_validate.sql 20 | -------------------------------------------------------------------------------- /tests/sharding/query_routing_setup.sql: -------------------------------------------------------------------------------- 1 | DROP DATABASE IF EXISTS shard0; 2 | DROP DATABASE IF EXISTS shard1; 3 | DROP DATABASE IF EXISTS shard2; 4 | DROP DATABASE IF EXISTS some_db; 5 | 6 | CREATE DATABASE shard0; 7 | CREATE DATABASE shard1; 8 | CREATE DATABASE shard2; 9 | CREATE DATABASE some_db; 10 | 11 | \c shard0 12 | 13 | DROP TABLE IF EXISTS data CASCADE; 14 | 15 | CREATE TABLE data ( 16 | id BIGINT, 17 | value VARCHAR 18 | ) PARTITION BY HASH (id); 19 | 20 | CREATE TABLE data_shard_0 PARTITION OF data FOR VALUES WITH (MODULUS 3, REMAINDER 0); 21 | 22 | \c shard1 23 | 24 | DROP TABLE IF EXISTS data CASCADE; 25 | 26 | CREATE TABLE data ( 27 | id BIGINT, 28 | value VARCHAR 29 | ) PARTITION BY HASH (id); 30 | 31 | CREATE TABLE data_shard_1 PARTITION OF data FOR VALUES WITH (MODULUS 3, REMAINDER 1); 32 | 33 | 34 | \c shard2 35 | 36 | DROP TABLE IF EXISTS data CASCADE; 37 | 38 | CREATE TABLE data ( 39 | id BIGINT, 40 | value VARCHAR 41 | ) PARTITION BY HASH (id); 42 | 43 | CREATE TABLE data_shard_2 PARTITION OF data FOR VALUES WITH (MODULUS 3, REMAINDER 2); 44 | 45 | 46 | \c some_db 47 | 48 | DROP TABLE IF EXISTS data CASCADE; 49 | 50 | CREATE TABLE data ( 51 | id BIGINT, 52 | value VARCHAR 53 | ); 54 | 55 | DROP ROLE IF EXISTS sharding_user; 56 | DROP ROLE IF EXISTS other_user; 57 | DROP ROLE IF EXISTS simple_user; 58 | CREATE ROLE sharding_user ENCRYPTED PASSWORD 'sharding_user' LOGIN; 59 | CREATE ROLE other_user ENCRYPTED PASSWORD 'other_user' LOGIN; 60 | CREATE ROLE simple_user ENCRYPTED PASSWORD 'simple_user' LOGIN; 61 | 62 | GRANT CONNECT ON DATABASE shard0 TO sharding_user; 63 | GRANT CONNECT ON DATABASE shard1 TO sharding_user; 64 | GRANT CONNECT ON DATABASE shard2 TO sharding_user; 65 | 66 | GRANT CONNECT ON DATABASE shard0 TO other_user; 67 | GRANT CONNECT ON DATABASE shard1 TO other_user; 68 | GRANT CONNECT ON DATABASE shard2 TO other_user; 69 | 70 | GRANT CONNECT ON DATABASE some_db TO simple_user; 71 | 72 | \c shard0 73 | CREATE EXTENSION IF NOT EXISTS pg_stat_statements; 74 | GRANT EXECUTE ON FUNCTION pg_stat_statements_reset TO sharding_user; 75 | GRANT ALL ON SCHEMA public TO sharding_user; 76 | GRANT ALL ON TABLE data TO sharding_user; 77 | GRANT ALL ON SCHEMA public TO other_user; 78 | GRANT ALL ON TABLE data TO other_user; 79 | GRANT EXECUTE ON FUNCTION pg_stat_statements_reset TO other_user; 80 | 81 | \c shard1 82 | CREATE EXTENSION IF NOT EXISTS pg_stat_statements; 83 | GRANT EXECUTE ON FUNCTION pg_stat_statements_reset TO sharding_user; 84 | GRANT ALL ON SCHEMA public TO sharding_user; 85 | GRANT ALL ON TABLE data TO sharding_user; 86 | GRANT ALL ON SCHEMA public TO other_user; 87 | GRANT ALL ON TABLE data TO other_user; 88 | GRANT EXECUTE ON FUNCTION pg_stat_statements_reset TO other_user; 89 | 90 | 91 | \c shard2 92 | CREATE EXTENSION IF NOT EXISTS pg_stat_statements; 93 | GRANT EXECUTE ON FUNCTION pg_stat_statements_reset TO sharding_user; 94 | GRANT ALL ON SCHEMA public TO sharding_user; 95 | GRANT ALL ON TABLE data TO sharding_user; 96 | GRANT ALL ON SCHEMA public TO other_user; 97 | GRANT ALL ON TABLE data TO other_user; 98 | GRANT EXECUTE ON FUNCTION pg_stat_statements_reset TO other_user; 99 | 100 | \c some_db 101 | CREATE EXTENSION IF NOT EXISTS pg_stat_statements; 102 | GRANT EXECUTE ON FUNCTION pg_stat_statements_reset TO simple_user; 103 | GRANT ALL ON SCHEMA public TO simple_user; 104 | GRANT ALL ON TABLE data TO simple_user; 105 | -------------------------------------------------------------------------------- /tests/sharding/query_routing_test_insert.sql: -------------------------------------------------------------------------------- 1 | \set ON_ERROR_STOP on 2 | 3 | SET SHARDING KEY TO '1'; 4 | INSERT INTO data (id, value) VALUES (1, 'value_1'); 5 | 6 | SET SHARDING KEY TO '2'; 7 | INSERT INTO data (id, value) VALUES (2, 'value_1'); 8 | 9 | SET SHARDING KEY TO '3'; 10 | INSERT INTO data (id, value) VALUES (3, 'value_1'); 11 | 12 | SET SHARDING KEY TO '4'; 13 | INSERT INTO data (id, value) VALUES (4, 'value_1'); 14 | 15 | SET SHARDING KEY TO '5'; 16 | INSERT INTO data (id, value) VALUES (5, 'value_1'); 17 | 18 | SET SHARDING KEY TO '6'; 19 | INSERT INTO data (id, value) VALUES (6, 'value_1'); 20 | 21 | SET SHARDING KEY TO '7'; 22 | INSERT INTO data (id, value) VALUES (7, 'value_1'); 23 | 24 | SET SHARDING KEY TO '8'; 25 | INSERT INTO data (id, value) VALUES (8, 'value_1'); 26 | 27 | SET SHARDING KEY TO '9'; 28 | INSERT INTO data (id, value) VALUES (9, 'value_1'); 29 | 30 | SET SHARDING KEY TO '10'; 31 | INSERT INTO data (id, value) VALUES (10, 'value_1'); 32 | 33 | SET SHARDING KEY TO '11'; 34 | INSERT INTO data (id, value) VALUES (11, 'value_1'); 35 | 36 | SET SHARDING KEY TO '12'; 37 | INSERT INTO data (id, value) VALUES (12, 'value_1'); 38 | 39 | SET SHARDING KEY TO '13'; 40 | INSERT INTO data (id, value) VALUES (13, 'value_1'); 41 | 42 | SET SHARDING KEY TO '14'; 43 | INSERT INTO data (id, value) VALUES (14, 'value_1'); 44 | 45 | SET SHARDING KEY TO '15'; 46 | INSERT INTO data (id, value) VALUES (15, 'value_1'); 47 | 48 | SET SHARDING KEY TO '16'; 49 | INSERT INTO data (id, value) VALUES (16, 'value_1'); 50 | 51 | set sharding key to '17'; 52 | INSERT INTO data (id, value) VALUES (17, 'value_1'); 53 | 54 | SeT SHaRDInG KeY to '18'; 55 | INSERT INTO data (id, value) VALUES (18, 'value_1'); 56 | -------------------------------------------------------------------------------- /tests/sharding/query_routing_test_primary_replica.sql: -------------------------------------------------------------------------------- 1 | \set ON_ERROR_STOP on 2 | 3 | SET SERVER ROLE TO 'primary'; 4 | SET SHARDING KEY TO '1'; 5 | INSERT INTO data (id, value) VALUES (1, 'value_1'); 6 | 7 | SET SERVER ROLE TO 'replica'; 8 | SET SHARDING KEY TO '1'; 9 | SELECT * FROM data WHERE id = 1; 10 | 11 | --- 12 | 13 | SET SERVER ROLE TO 'primary'; 14 | SET SHARDING KEY TO '2'; 15 | INSERT INTO data (id, value) VALUES (2, 'value_1'); 16 | 17 | SET SERVER ROLE TO 'replica'; 18 | SET SHARDING KEY TO '2'; 19 | SELECT * FROM data WHERE id = 2; 20 | 21 | --- 22 | 23 | SET SERVER ROLE TO 'primary'; 24 | SET SHARDING KEY TO '3'; 25 | INSERT INTO data (id, value) VALUES (3, 'value_1'); 26 | 27 | SET SERVER ROLE TO 'replica'; 28 | SET SHARDING KEY TO '3'; 29 | SELECT * FROM data WHERE id = 3; 30 | 31 | --- 32 | 33 | SET SERVER ROLE TO 'primary'; 34 | SET SHARDING KEY TO '4'; 35 | INSERT INTO data (id, value) VALUES (4, 'value_1'); 36 | 37 | SET SERVER ROLE TO 'replica'; 38 | SET SHARDING KEY TO '4'; 39 | SELECT * FROM data WHERE id = 4; 40 | 41 | --- 42 | 43 | SET SERVER ROLE TO 'primary'; 44 | SET SHARDING KEY TO '5'; 45 | INSERT INTO data (id, value) VALUES (5, 'value_1'); 46 | 47 | SET SERVER ROLE TO 'replica'; 48 | SET SHARDING KEY TO '5'; 49 | SELECT * FROM data WHERE id = 5; 50 | 51 | --- 52 | 53 | SET SERVER ROLE TO 'primary'; 54 | SET SHARDING KEY TO '6'; 55 | INSERT INTO data (id, value) VALUES (6, 'value_1'); 56 | 57 | SET SERVER ROLE TO 'replica'; 58 | SET SHARDING KEY TO '6'; 59 | SELECT * FROM data WHERE id = 6; 60 | 61 | --- 62 | 63 | SET SERVER ROLE TO 'primary'; 64 | SET SHARDING KEY TO '7'; 65 | INSERT INTO data (id, value) VALUES (7, 'value_1'); 66 | 67 | SET SERVER ROLE TO 'replica'; 68 | SET SHARDING KEY TO '7'; 69 | SELECT * FROM data WHERE id = 7; 70 | 71 | --- 72 | 73 | SET SERVER ROLE TO 'primary'; 74 | SET SHARDING KEY TO '8'; 75 | INSERT INTO data (id, value) VALUES (8, 'value_1'); 76 | 77 | SET SERVER ROLE TO 'replica'; 78 | SET SHARDING KEY TO '8'; 79 | SELECT * FROM data WHERE id = 8; 80 | 81 | --- 82 | 83 | SET SERVER ROLE TO 'primary'; 84 | SET SHARDING KEY TO '9'; 85 | INSERT INTO data (id, value) VALUES (9, 'value_1'); 86 | 87 | SET SERVER ROLE TO 'replica'; 88 | SET SHARDING KEY TO '9'; 89 | SELECT * FROM data WHERE id = 9; 90 | 91 | --- 92 | 93 | \set ON_ERROR_STOP on 94 | 95 | SET SERVER ROLE TO 'primary'; 96 | SET SHARDING KEY TO '10'; 97 | INSERT INTO data (id, value) VALUES (10, 'value_1'); 98 | 99 | SET SERVER ROLE TO 'replica'; 100 | SET SHARDING KEY TO '10'; 101 | SELECT * FROM data WHERE id = 10; 102 | 103 | --- 104 | 105 | SET SERVER ROLE TO 'primary'; 106 | SET SHARDING KEY TO '11'; 107 | INSERT INTO data (id, value) VALUES (11, 'value_1'); 108 | 109 | SET SERVER ROLE TO 'replica'; 110 | SET SHARDING KEY TO '11'; 111 | SELECT * FROM data WHERE id = 11; 112 | 113 | --- 114 | 115 | SET SERVER ROLE TO 'primary'; 116 | SET SHARDING KEY TO '12'; 117 | INSERT INTO data (id, value) VALUES (12, 'value_1'); 118 | 119 | SET SERVER ROLE TO 'replica'; 120 | SET SHARDING KEY TO '12'; 121 | SELECT * FROM data WHERE id = 12; 122 | 123 | --- 124 | 125 | SET SERVER ROLE TO 'primary'; 126 | SET SHARDING KEY TO '13'; 127 | INSERT INTO data (id, value) VALUES (13, 'value_1'); 128 | 129 | SET SERVER ROLE TO 'replica'; 130 | SET SHARDING KEY TO '13'; 131 | SELECT * FROM data WHERE id = 13; 132 | 133 | --- 134 | 135 | SET SERVER ROLE TO 'primary'; 136 | SET SHARDING KEY TO '14'; 137 | INSERT INTO data (id, value) VALUES (14, 'value_1'); 138 | 139 | SET SERVER ROLE TO 'replica'; 140 | SET SHARDING KEY TO '14'; 141 | SELECT * FROM data WHERE id = 14; 142 | 143 | --- 144 | 145 | SET SERVER ROLE TO 'primary'; 146 | SELECT 1; 147 | 148 | SET SERVER ROLE TO 'replica'; 149 | SELECT 1; 150 | 151 | set server role to 'replica'; 152 | SeT SeRver Role TO 'PrImARY'; 153 | select 1; 154 | 155 | SET PRIMARY READS TO 'on'; 156 | SELECT 1; 157 | 158 | SET PRIMARY READS TO 'off'; 159 | SELECT 1; 160 | 161 | SET PRIMARY READS TO 'default'; 162 | SELECT 1; 163 | -------------------------------------------------------------------------------- /tests/sharding/query_routing_test_select.sql: -------------------------------------------------------------------------------- 1 | \set ON_ERROR_STOP on 2 | 3 | SET SHARDING KEY TO '1'; 4 | SELECT * FROM data WHERE id = 1; 5 | 6 | SET SHARDING KEY TO '2'; 7 | SELECT * FROM data WHERE id = 2; 8 | 9 | SET SHARDING KEY TO '3'; 10 | SELECT * FROM data WHERE id = 3; 11 | 12 | SET SHARDING KEY TO '4'; 13 | SELECT * FROM data WHERE id = 4; 14 | 15 | SET SHARDING KEY TO '5'; 16 | SELECT * FROM data WHERE id = 5; 17 | 18 | SET SHARDING KEY TO '6'; 19 | SELECT * FROM data WHERE id = 6; 20 | 21 | SET SHARDING KEY TO '7'; 22 | SELECT * FROM data WHERE id = 7; 23 | 24 | SET SHARDING KEY TO '8'; 25 | SELECT * FROM data WHERE id = 8; 26 | 27 | SET SHARDING KEY TO '9'; 28 | SELECT * FROM data WHERE id = 9; 29 | 30 | SET SHARDING KEY TO '10'; 31 | SELECT * FROM data WHERE id = 10; 32 | 33 | SET SHARDING KEY TO '11'; 34 | SELECT * FROM data WHERE id = 11; 35 | 36 | SET SHARDING KEY TO '12'; 37 | SELECT * FROM data WHERE id = 12; 38 | 39 | SET SHARDING KEY TO '13'; 40 | SELECT * FROM data WHERE id = 13; 41 | 42 | SET SHARDING KEY TO '14'; 43 | SELECT * FROM data WHERE id = 14; 44 | 45 | SET SHARDING KEY TO '15'; 46 | SELECT * FROM data WHERE id = 15; 47 | 48 | SET SHARDING KEY TO '16'; 49 | SELECT * FROM data WHERE id = 16; 50 | -------------------------------------------------------------------------------- /tests/sharding/query_routing_test_validate.sql: -------------------------------------------------------------------------------- 1 | \c shard0 2 | 3 | SELECT * FROM data; 4 | 5 | \c shard1 6 | 7 | SELECT * FROM data; 8 | 9 | \c shard2 10 | 11 | SELECT * FROM data; 12 | -------------------------------------------------------------------------------- /utilities/deb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Build an Ubuntu deb. 4 | # 5 | script_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 6 | deb_dir="/tmp/pgcat-build" 7 | export PACKAGE_VERSION=${1:-"1.1.1"} 8 | if [[ $(arch) == "x86_64" ]]; then 9 | export ARCH=amd64 10 | else 11 | export ARCH=arm64 12 | fi 13 | 14 | cd "$script_dir/.." 15 | cargo build --release 16 | 17 | rm -rf "$deb_dir" 18 | mkdir -p "$deb_dir/DEBIAN" 19 | mkdir -p "$deb_dir/usr/bin" 20 | mkdir -p "$deb_dir/etc/systemd/system" 21 | 22 | cp target/release/pgcat "$deb_dir/usr/bin/pgcat" 23 | chmod +x "$deb_dir/usr/bin/pgcat" 24 | 25 | cp pgcat.toml "$deb_dir/etc/pgcat.example.toml" 26 | cp pgcat.service "$deb_dir/etc/systemd/system/pgcat.service" 27 | 28 | (cat control | envsubst) > "$deb_dir/DEBIAN/control" 29 | cp postinst "$deb_dir/DEBIAN/postinst" 30 | cp postrm "$deb_dir/DEBIAN/postrm" 31 | cp prerm "$deb_dir/DEBIAN/prerm" 32 | 33 | chmod +x ${deb_dir}/DEBIAN/post* 34 | chmod +x ${deb_dir}/DEBIAN/pre* 35 | 36 | dpkg-deb \ 37 | --root-owner-group \ 38 | -z1 \ 39 | --build "$deb_dir" \ 40 | pgcat-${PACKAGE_VERSION}-ubuntu22.04-${ARCH}.deb 41 | -------------------------------------------------------------------------------- /utilities/generate_config_docs.py: -------------------------------------------------------------------------------- 1 | import re 2 | import tomli 3 | 4 | class DocGenerator: 5 | def __init__(self, filename): 6 | self.doc = [] 7 | self.current_section = "" 8 | self.current_comment = [] 9 | self.current_field_name = "" 10 | self.current_field_value = [] 11 | self.current_field_unset = False 12 | self.filename = filename 13 | 14 | def write(self): 15 | with open("../CONFIG.md", "w") as text_file: 16 | text_file.write("# PgCat Configurations \n") 17 | for entry in self.doc: 18 | if entry["name"] == "__section__": 19 | text_file.write("## `" + entry["section"] + "` Section" + "\n") 20 | text_file.write("\n") 21 | continue 22 | text_file.write("### " + entry["name"]+ "\n") 23 | text_file.write("```"+ "\n") 24 | text_file.write("path: " + entry["fqdn"]+ "\n") 25 | text_file.write("default: " + entry["defaults"].strip()+ "\n") 26 | if entry["example"] is not None: 27 | text_file.write("example: " + entry["example"].strip()+ "\n") 28 | text_file.write("```"+ "\n") 29 | text_file.write("\n") 30 | text_file.write(entry["comment"]+ "\n") 31 | text_file.write("\n") 32 | 33 | def save_entry(self): 34 | if len(self.current_field_name) == 0: 35 | return 36 | if len(self.current_comment) == 0: 37 | return 38 | self.current_section = self.current_section.replace("sharded_db", "") 39 | self.current_section = self.current_section.replace("simple_db", "") 40 | self.current_section = self.current_section.replace("users.0", "users.") 41 | self.current_section = self.current_section.replace("users.1", "users.") 42 | self.current_section = self.current_section.replace("shards.0", "shards.") 43 | self.current_section = self.current_section.replace("shards.1", "shards.") 44 | self.doc.append( 45 | { 46 | "name": self.current_field_name, 47 | "fqdn": self.current_section + "." + self.current_field_name, 48 | "section": self.current_section, 49 | "comment": "\n".join(self.current_comment), 50 | "defaults": self.current_field_value if not self.current_field_unset else "", 51 | "example": self.current_field_value if self.current_field_unset else None 52 | } 53 | ) 54 | self.current_comment = [] 55 | self.current_field_name = "" 56 | self.current_field_value = [] 57 | def parse(self): 58 | with open("../pgcat.toml", "r") as f: 59 | for line in f.readlines(): 60 | line = line.strip() 61 | if len(line) == 0: 62 | self.save_entry() 63 | 64 | if line.startswith("["): 65 | self.current_section = line[1:-1] 66 | self.current_field_name = "__section__" 67 | self.current_field_unset = False 68 | self.save_entry() 69 | 70 | elif line.startswith("#"): 71 | results = re.search("^#\s*([A-Za-z0-9_]+)\s*=(.+)$", line) 72 | if results is not None: 73 | self.current_field_name = results.group(1) 74 | self.current_field_value = results.group(2) 75 | self.current_field_unset = True 76 | self.save_entry() 77 | else: 78 | self.current_comment.append(line[1:].strip()) 79 | else: 80 | results = re.search("^\s*([A-Za-z0-9_]+)\s*=(.+)$", line) 81 | if results is None: 82 | continue 83 | self.current_field_name = results.group(1) 84 | self.current_field_value = results.group(2) 85 | self.current_field_unset = False 86 | self.save_entry() 87 | self.save_entry() 88 | return self 89 | 90 | 91 | DocGenerator("../pgcat.toml").parse().write() 92 | 93 | -------------------------------------------------------------------------------- /utilities/requirements.txt: -------------------------------------------------------------------------------- 1 | tomli 2 | --------------------------------------------------------------------------------