├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ ├── go.yml │ └── release.yml ├── .gitignore ├── .golangci.yml ├── .goreleaser.yaml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── SECURITY.md ├── api ├── api-bundle.yml ├── error │ ├── error400.yml │ ├── error403.yml │ ├── error404.yml │ └── error500.yml ├── index.html ├── kuvasz-streamer.yml ├── schema │ ├── db.json │ ├── dbs.json │ ├── error.json │ ├── map.json │ ├── maps.json │ ├── tbl.json │ ├── tbls.json │ ├── url.json │ └── urls.json ├── styles.min.css └── web-components.min.js ├── docs ├── .gitignore ├── 010-use-cases.md ├── 020-installation.md ├── 030-getting-started.md ├── 040-streaming-modes.md ├── 045-running-modes.md ├── 050-configuration.md ├── 060-postgres-configuration.md ├── 065-metrics.md ├── 070-maintenance.md ├── 080-schema-modification.md ├── 090-architecture.md ├── 100-implementation.md ├── 404.html ├── Gemfile ├── Gemfile.lock ├── _config.yml ├── _sass │ └── color_schemes │ │ └── wider.scss ├── assets │ ├── images │ │ ├── architecture.png │ │ ├── consolidation.png │ │ ├── crash.png │ │ ├── favicon.ico │ │ ├── full_sync.png │ │ ├── initialsync.png │ │ ├── logo.png │ │ ├── multitenant.png │ │ ├── optimize.png │ │ ├── streaming.png │ │ └── upgrade.png │ └── mermaid │ │ ├── jquery.min.js │ │ └── mermaid.min.js ├── index.md └── loadtest.md ├── go.mod ├── go.sum ├── go.work ├── go.work.sum ├── package ├── etc │ ├── kuvasz │ │ ├── kuvasz-streamer.toml │ │ └── map.yaml │ └── rsyslog.d │ │ └── kuvasz-streamer.conf ├── kuvasz-streamer.service ├── postinstall.sh └── postremove.sh ├── streamer ├── api.go ├── config.go ├── configdb.go ├── full_sync.go ├── handle_auth.go ├── handle_db.go ├── handle_map.go ├── handle_tbl.go ├── handle_url.go ├── kuvasz-streamer.go ├── log.go ├── map.go ├── metadata.go ├── metrics.go ├── migrate.go ├── migrations │ └── 0001_initial.sql ├── process_clone.go ├── process_history.go ├── process_message.go ├── publication.go ├── replicate_database.go └── worker.go ├── test ├── conf │ ├── kuvasz-streamer-lite.toml │ ├── kuvasz-streamer-rate.toml │ ├── kuvasz-streamer-sqlite.toml │ ├── kuvasz-streamer.toml │ ├── map.yaml │ └── rate.yaml ├── database │ ├── dest.sql │ └── source.sql ├── detailed_testsuite │ └── 50-datatypes.robot ├── docker-compose.yml ├── kuvasz-streamer-gold.db ├── kuvasz-streamer-lite.db ├── load │ ├── conf │ │ ├── kuvasz-streamer.toml │ │ └── map.yaml │ ├── init │ ├── log │ │ └── .gitkeep │ ├── pgbench.sql │ ├── restart │ ├── run │ └── truncate.sql ├── log │ └── .gitkeep ├── map.sql ├── run └── testsuite │ ├── 00-common.robot │ ├── 10-sync.robot │ ├── 20-clone.robot │ ├── 30-append.robot │ ├── 40-history.robot │ ├── 50-datatypes.robot │ ├── 60-Toast.robot │ ├── 70-partitions.robot │ ├── 71-schema.robot │ ├── 80-api-db.robot │ ├── 81-api-url.robot │ ├── 82-api-tbl.robot │ └── 83-api-map.robot └── web ├── .eslintrc.js ├── .gitignore ├── index.html ├── package.json ├── prettier.config.js ├── public ├── favicon.ico └── manifest.json ├── src ├── app-bar.tsx ├── app.tsx ├── auth-provider.ts ├── common.tsx ├── data-provider.ts ├── db.tsx ├── in-memory-jwt.js ├── index.tsx ├── layout.tsx ├── logo.tsx ├── map.tsx ├── soft-theme.ts ├── tbl.tsx ├── url.tsx ├── users.json └── vite-env.d.ts ├── tsconfig.json ├── vite.config.ts └── yarn.lock /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG]" 5 | labels: bug 6 | assignees: kuvasz-io 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Setup source database 16 | 2. Configure 17 | 3. Run streamer 18 | 4. Check destination database 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Logs** 24 | - Source database logs: 25 | - Destination database logs: 26 | - Kuvasz-streamer logs: 27 | 28 | **Environment** 29 | - OS: 30 | - Source database version: 31 | - Destination database version: 32 | 33 | **Additional context** 34 | Add any other context about the problem here. 35 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: "[FR]" 5 | labels: enhancement 6 | assignees: kuvasz-io 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | jobs: 10 | build: 11 | runs-on: self-hosted 12 | steps: 13 | - uses: actions/checkout@v4 14 | - name: Web 15 | run: make web 16 | - name: Vet 17 | run: make check 18 | - name: Build 19 | run: make build 20 | - name: Run tests 21 | run: make test 22 | - name: Build and publish docs 23 | run: make docs 24 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - "*" 7 | 8 | permissions: 9 | contents: write 10 | packages: write 11 | 12 | jobs: 13 | goreleaser: 14 | runs-on: self-hosted 15 | steps: 16 | - name: Checkout 17 | uses: actions/checkout@v4 18 | with: 19 | fetch-depth: 0 20 | - name: Docker Login 21 | uses: docker/login-action@v1 22 | with: 23 | registry: ghcr.io 24 | username: ${{ github.repository_owner }} 25 | password: ${{ secrets.GITHUB_TOKEN }} 26 | - name: Release 27 | run: goreleaser release 28 | env: 29 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 30 | NFPM_DEFAULT_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} 31 | - name: Update RPM repository 32 | run: make rpmrepo 33 | - name: Update APT repository 34 | env: 35 | GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} 36 | run: make aptrepo 37 | 38 | 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | kuvasz-streamer 2 | .vscode 3 | .DS_Store 4 | streamer/admin/ 5 | test/kuvasz-streamer.db 6 | test/log/*.xml 7 | test/log/*.log 8 | test/log/*.html 9 | test/*.xml 10 | test/*.log 11 | test/*.html 12 | dist/ 13 | go.work 14 | go.work.sum -------------------------------------------------------------------------------- /.goreleaser.yaml: -------------------------------------------------------------------------------- 1 | version: 1 2 | 3 | before: 4 | hooks: 5 | - make web 6 | 7 | gomod: 8 | proxy: true 9 | env: 10 | - GOPROXY=https://proxy.golang.org,direct 11 | - GOSUMDB=sum.golang.org 12 | - GOPRIVATE=github.com/kuvasz-io/kuvasz-streamer 13 | 14 | mod: readonly 15 | gobinary: go 16 | 17 | builds: 18 | - env: 19 | - CGO_ENABLED=0 20 | goos: 21 | - linux 22 | - windows 23 | - darwin 24 | - freebsd 25 | goarch: 26 | - amd64 27 | - arm64 28 | 29 | main: ./streamer/ 30 | 31 | archives: 32 | - format: tar.gz 33 | name_template: >- 34 | {{ .ProjectName }}_ 35 | {{- title .Os }}_ 36 | {{- if eq .Arch "amd64" }}x86_64 37 | {{- else if eq .Arch "386" }}i386 38 | {{- else }}{{ .Arch }}{{ end }} 39 | {{- if .Arm }}v{{ .Arm }}{{ end }} 40 | format_overrides: 41 | - goos: windows 42 | format: zip 43 | 44 | nfpms: 45 | - id: default 46 | vendor: Kuvasz.io 47 | homepage: https://streamer.kuvasz.io/ 48 | maintainer: Kuvasz 49 | description: |- 50 | Kuvasz-Streamer is a Postgres-to-Postgres 51 | data consolidation and change data capture project. 52 | license: AGPL-3.0 53 | formats: 54 | - deb 55 | - rpm 56 | umask: 0o002 57 | provides: 58 | - kuvasz-streamer 59 | contents: 60 | - src: package/kuvasz-streamer.service 61 | dst: /usr/lib/systemd/system/kuvasz-streamer.service 62 | - src: package/etc/ 63 | dst: /etc 64 | type: tree 65 | scripts: 66 | postinstall: package/postinstall.sh 67 | postremove: package/postremove.sh 68 | rpm: 69 | signature: 70 | key_file: "{{ .Env.HOME }}/private.pgp" 71 | deb: 72 | signature: 73 | key_file: "{{ .Env.HOME }}/private.pgp" 74 | 75 | dockers: 76 | - image_templates: ["ghcr.io/kuvasz-io/{{ .ProjectName }}:{{ .Version }}-amd64"] 77 | dockerfile: Dockerfile 78 | use: buildx 79 | build_flag_templates: 80 | - --platform=linux/amd64 81 | - --label=org.opencontainers.image.title={{ .ProjectName }} 82 | - --label=org.opencontainers.image.description={{ .ProjectName }} 83 | - --label=org.opencontainers.image.url=https://github.com/kuvasz-io/{{ .ProjectName }} 84 | - --label=org.opencontainers.image.source=https://github.com/kuvasz-io/{{ .ProjectName }} 85 | - --label=org.opencontainers.image.version={{ .Version }} 86 | - --label=org.opencontainers.image.created={{ time "2006-01-02T15:04:05Z07:00" }} 87 | - --label=org.opencontainers.image.revision={{ .FullCommit }} 88 | - --label=org.opencontainers.image.licenses=AGPL-3.0 89 | - image_templates: ["ghcr.io/kuvasz-io/{{ .ProjectName }}:{{ .Version }}-arm64v8"] 90 | goarch: arm64 91 | dockerfile: Dockerfile 92 | use: buildx 93 | build_flag_templates: 94 | - --platform=linux/arm64/v8 95 | - --label=org.opencontainers.image.title={{ .ProjectName }} 96 | - --label=org.opencontainers.image.description={{ .ProjectName }} 97 | - --label=org.opencontainers.image.url=https://github.com/kuvasz-io/{{ .ProjectName }} 98 | - --label=org.opencontainers.image.source=https://github.com/kuvasz-io/{{ .ProjectName }} 99 | - --label=org.opencontainers.image.version={{ .Version }} 100 | - --label=org.opencontainers.image.created={{ time "2006-01-02T15:04:05Z07:00" }} 101 | - --label=org.opencontainers.image.revision={{ .FullCommit }} 102 | - --label=org.opencontainers.image.licenses=MIT 103 | docker_manifests: 104 | - name_template: ghcr.io/kuvasz-io/{{ .ProjectName }}:{{ .Version }} 105 | image_templates: 106 | - ghcr.io/kuvasz-io/{{ .ProjectName }}:{{ .Version }}-amd64 107 | - ghcr.io/kuvasz-io/{{ .ProjectName }}:{{ .Version }}-arm64v8 108 | - name_template: ghcr.io/kuvasz-io/{{ .ProjectName }}:latest 109 | image_templates: 110 | - ghcr.io/kuvasz-io/{{ .ProjectName }}:{{ .Version }}-amd64 111 | - ghcr.io/kuvasz-io/{{ .ProjectName }}:{{ .Version }}-arm64v8 112 | 113 | changelog: 114 | disable: true 115 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | abuse@kuvasz.io. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## Contributing 2 | 3 | Contributions are welcome, and they are greatly appreciated! Every little helps, and credit will always be given. 4 | 5 | When contributing to this repository, please first discuss the change you wish to make on the GitHub Discussions board before making a change. 6 | 7 | Please note we have a code of conduct (here)[https://github.com/kuvasz-io/kuvasz-streamer/blob/main/CODE_OF_CONDUCT.md], please follow it in all your interactions with the project. 8 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | 3 | LABEL maintainer="kuvasz.io " 4 | 5 | COPY ./kuvasz-streamer / 6 | 7 | ENTRYPOINT ["/kuvasz-streamer"] 8 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | BINARY := kuvasz-streamer 2 | GITBRANCH := $(shell git branch | grep \* | cut -d ' ' -f2) 3 | CI_COMMIT_REF_NAME ?= ${GITBRANCH} 4 | HASH := $(shell git rev-parse --short HEAD) 5 | COUNTREF := $(shell git rev-list HEAD | wc -l | tr -d ' ') 6 | VERSION := ${CI_COMMIT_REF_NAME}-${COUNTREF}-${HASH} 7 | BUILD := $(shell date +%Y%m%d%H%M%S) 8 | CONTAINER := ${REGISTRY}/${BINARY}:${VERSION} 9 | CONTAINER-CI := ${REGISTRY}/${BINARY}:ci 10 | CONTAINER-LATEST := ${REGISTRY}/${BINARY}:${CI_COMMIT_REF_NAME} 11 | LDFLAGS += -X ${BINARY}.Version=${VERSION} 12 | LDFLAGS += -X ${BINARY}.Build=${BUILD} 13 | 14 | all: web check build vulncheck 15 | 16 | web: 17 | cd web; yarn install; yarn build --outDir ../streamer/admin 18 | 19 | check: 20 | staticcheck -checks=all ./... 21 | go vet ./... 22 | golangci-lint run 23 | govulncheck ./... 24 | 25 | build: 26 | go build -o ${BINARY} -ldflags="${LDFLAGS}" ./streamer/*.go 27 | 28 | vulncheck: 29 | govulncheck -mode=binary kuvasz-streamer 30 | 31 | release: 32 | goreleaser release --clean --snapshot 33 | 34 | rpmrepo: 35 | cp dist/*.rpm /var/www/caddy/rpm 36 | /var/www/caddy/rpm; createrepo_c -v /var/www/caddy/rpm 37 | 38 | aptrepo: 39 | aptly repo add kuvasz dist/*.deb 40 | aptly publish update --passphrase="${GPG_PASSPHRASE}" --batch=true stable filesystem:caddy: 41 | 42 | test: 43 | cd test; ./run 44 | 45 | docs: 46 | cd docs; bundle exec jekyll build 47 | rm -rf /var/www/caddy/streamer/* 48 | cp -r docs/_site/* /var/www/caddy/streamer 49 | 50 | clean: 51 | rm -rf ${BINARY} streamer/admin web/dist dist 52 | 53 | .PHONY: web check build vulncheck release rpmrepo aptrepo test docs clean 54 | 55 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kuvasz-Streamer 2 | 3 | Kuvasz-streamer is an open source change data capture (CDC) project that focuses exclusively on Postgres. It is tightly integrated with Postgres Logical Replication to provide high performance, low latency replication. 4 | 5 | ## Features 6 | 7 | ### Lightweight 8 | 9 | Kuvasz-streamer is a lightweight service written in Go that has no dependencies and no queuing. Run it as a system service or in a Docker container. It can run in a full declarative mode where the configuration map is stored in a read-only YAML file and no files are written to disk. This mode is suitable for a CI/CD pipeline based configuration and a Kubernetes deployment. An interactive, database-backed mode is supported where the web interface can be used to modify the mapping configuration at runtime. 10 | 11 | ### High-performance 12 | 13 | Kuvasz-streamer uses the Postgres COPY protocol to perform the initial sync and the logical replication protocol later. 14 | 15 | It opens multiple connections to the destination database and load-shares among them. 16 | 17 | It batches updates into separate transactions to significantly increase performance. 18 | 19 | And in order not to overload a production database server, it also supports global rate-limiting. 20 | 21 | Kuvasz-streamer was [benchmarked](https://kuvasz.io/kuvasz-streamer-load-test/) at 10K tps with less than 1 second latency. 22 | 23 | ### Batteries included 24 | 25 | Kuvasz-streamer takes the pain out of managing publications and replications slots: 26 | 27 | - It creates missing publications and replications slots on startup 28 | - It adds and removes configured tables from publications automatically 29 | - It performs a full sync whenever a new table is added 30 | 31 | It is also fully observable providing Prometheus metrics and extensive logging. 32 | 33 | ### Flexible 34 | 35 | Multiple table streaming modes are supported 36 | 37 | - Clone: replicate the source table as-is 38 | - Append-only: replicate the source table but don't delete any records 39 | - History: Keep a full history of all changes with a timestamp 40 | 41 | ### Full Postgres support 42 | 43 | Full PostgreSQL support is guaranteed with an extensive test suite: 44 | 45 | - All recent PostgreSQL versions 46 | - from 12 to 17 47 | - All data types 48 | - Partitions 49 | - Schemas 50 | - Source tables can be in any database and in any schema 51 | - Destination tables are in a single database and a single schema 52 | 53 | ### API and web interface 54 | 55 | The service provides an optional API and a web interface to easily manage publications and mapping. 56 | 57 | ## Use cases 58 | 59 | Kuvasz-streamer can be used for data consolidation, major version upgrades and other cases. 60 | 61 | ### Microservice database consolidation 62 | 63 | In a microservices architecture, each service has its own database. Kuvasz-streamer consolidates all the database of all services into a single data warehouse. The schema in the data warehouse does not have to follow the same one as the original services. 64 | 65 | ### Multitenant database consolidation 66 | 67 | In a sensitive multi-tenant environment, each tenant may be assigned a separate database to ensure that no cross-pollination of data occurs. Kuvasz-streamer can then be used to consolidate all the data in a single table with a tenant identifier to ease reporting. 68 | 69 | ### Database performance optimization 70 | 71 | In a typical microservice architecture, history data is kept to a minimum in order to provide quick query time and low latency to end users. However, historical data is important for AI/ML and reporting. `kuvasz-streamer` implements a no-delete strategy to some tables that dows not propagate `DELETE` operations. Example usage includes transaction tables and audit history tables. 72 | 73 | ### Postgres major version upgrade 74 | 75 | Upgrading major versions of Postgres is a time-consuming task that requires substantial downtime. Kuvasz-streamer can be used to synchronize databases between different versions of Postgres and performing a quick switchover. 76 | 77 | ## Documentation 78 | 79 | The documentation is available at https://streamer.kuvasz.io/ 80 | 81 | ## Installation 82 | 83 | Check the [Installation Guide](https://streamer.kuvasz.io/installation/) in the documentation. 84 | 85 | ## Getting started 86 | 87 | Detailed instructions are available in the [Getting started](https://streamer.kuvasz.io/getting-started/) section of the documentation 88 | 89 | ## Discuss 90 | 91 | All ideas and discussions are welcome. We use the [GitHub Discussions](https://github.com/kuvasz-io/kuvasz-streamer/discussions) and [Mattermost](https://mattermost.kuvasz.io/signup_user_complete/?id=dxb6abuw3fgj5egbh7cz6gx3yy&md=link&sbr=fa) for that. 92 | 93 | ### Pull Request Process 94 | 95 | Add tests for your changes. 96 | Ensure the project builds and passes all tests. 97 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | We release patches for security vulnerabilities. Which versions are eligible for 6 | receiving such patches depends on the CVSS v3.0 Rating: 7 | 8 | | CVSS v3.0 | Supported Versions | 9 | | --------- | ----------------------------------------- | 10 | | 9.0-10.0 | Releases within the previous three months | 11 | | 4.0-8.9 | Most recent release | 12 | 13 | ## Reporting a Vulnerability 14 | 15 | Please report (suspected) security vulnerabilities to 16 | **[security@kuvasz.io](mailto:security@kuvasz.io)**. You will receive a response from 17 | us within 48 hours. If the issue is confirmed, we will release a patch as soon 18 | as possible depending on complexity but historically within a few days. 19 | -------------------------------------------------------------------------------- /api/error/error400.yml: -------------------------------------------------------------------------------- 1 | description: Bad Request 2 | content: 3 | application/json: 4 | schema: 5 | type: object 6 | properties: 7 | code: 8 | type: string 9 | description: Error code 10 | example: "0001" 11 | message: 12 | type: string 13 | description: Badly formatted request 14 | example: Invalid request, check parameters 15 | details: 16 | type: string 17 | description: Low level error to help debugging, present only in developer mode 18 | example: Unmarshal error 19 | required: 20 | - code 21 | - message 22 | -------------------------------------------------------------------------------- /api/error/error403.yml: -------------------------------------------------------------------------------- 1 | description: Forbidden 2 | content: 3 | application/json: 4 | schema: 5 | type: object 6 | properties: 7 | code: 8 | type: string 9 | description: Error code 10 | example: 0003 11 | message: 12 | type: string 13 | description: User is not authorized to access this resource. Check authorization permissions and passed token. 14 | example: User is not authorized to access this resource. 15 | details: 16 | type: string 17 | description: Low level error to help debugging, present only in developer mode 18 | required: 19 | - code 20 | - message 21 | -------------------------------------------------------------------------------- /api/error/error404.yml: -------------------------------------------------------------------------------- 1 | description: Not found 2 | content: 3 | application/json: 4 | schema: 5 | type: object 6 | properties: 7 | code: 8 | type: string 9 | description: Error code 10 | example: NOT_FOUND 11 | message: 12 | type: string 13 | description: ID is not found in collection 14 | example: Database schema not found 15 | required: 16 | - code 17 | - message 18 | -------------------------------------------------------------------------------- /api/error/error500.yml: -------------------------------------------------------------------------------- 1 | description: Server error 2 | content: 3 | application/json: 4 | schema: 5 | type: object 6 | properties: 7 | code: 8 | type: string 9 | description: Error code 10 | example: 0000 11 | message: 12 | type: string 13 | description: Service is currently unable to process request. 14 | example: Server is currently unable to process request. Try later. 15 | details: 16 | type: string 17 | description: Low level error to help debugging, present only in developer mode 18 | example: No database connection 19 | required: 20 | - code 21 | - message 22 | -------------------------------------------------------------------------------- /api/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Juggle OpenAPI 7 | 8 | 9 | 10 | 11 | 12 | 13 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /api/schema/db.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "description": "Database schema", 4 | "properties": { 5 | "id": { 6 | "type": "integer", 7 | "description": "Database identifier", 8 | "example": 1 9 | }, 10 | "name": { 11 | "type": "string", 12 | "description": "Database name", 13 | "example": "wordpress" 14 | } 15 | }, 16 | "required": [ 17 | "id", "name" 18 | ] 19 | } -------------------------------------------------------------------------------- /api/schema/dbs.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "array", 3 | "items": { 4 | "$ref": "file:../api/schema/db.json" 5 | } 6 | } -------------------------------------------------------------------------------- /api/schema/error.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "code": { 5 | "type": "string", 6 | "description": "Error code", 7 | "example": "system_error" 8 | }, 9 | "error": { 10 | "type": "string", 11 | "description": "Localized error message", 12 | "example": "Invalid database ID" 13 | }, 14 | "info": { 15 | "type": "string", 16 | "description": "Low level error to help debugging, present only in developer mode", 17 | "example": "pq: invalid attribute length: name" 18 | } 19 | }, 20 | "required": [ 21 | "code", 22 | "error" 23 | ] 24 | } -------------------------------------------------------------------------------- /api/schema/map.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "id": { 5 | "type": "integer", 6 | "description": "Map entry identifier", 7 | "example": 1 8 | }, 9 | "db_id": { 10 | "type": "integer", 11 | "description": "Database identifier", 12 | "example": 1 13 | }, 14 | "db_name": { 15 | "type": "string", 16 | "description": "Database name", 17 | "example": "wordpress" 18 | }, 19 | "schema": { 20 | "type": "string", 21 | "description": "schema name", 22 | "example": "public" 23 | }, 24 | "name": { 25 | "type": "string", 26 | "description": "table name", 27 | "example": "accounts" 28 | }, 29 | "type": { 30 | "type": "string", 31 | "description": "Table type: clone, append, history", 32 | "example": "clone" 33 | }, 34 | "target": { 35 | "type": "string", 36 | "description": "Target table name", 37 | "example": "admin_accounts" 38 | }, 39 | "partitions": { 40 | "type": ["array","null"], 41 | "description": "Array of partition names", 42 | "items": { 43 | "type": "string" 44 | } 45 | }, 46 | "partitions_regex": { 47 | "type": ["string","null"], 48 | "description": "Regular expression matching partition names", 49 | "example": "^accounts_.*", 50 | "optional": true 51 | }, 52 | "source_columns": { 53 | "type": "object", 54 | "description":"Map of source column names", 55 | "properties": { 56 | "column_type": { 57 | "type": "string", 58 | "description": "Column type (text, int4, timestamp, ......)", 59 | "example": "text" 60 | }, 61 | "data_type_oid": { 62 | "type": "integer", 63 | "description": "Column postgres data type identifier", 64 | "example": 25 65 | }, 66 | "primary key": { 67 | "type":"boolean", 68 | "description": "Column is part of a primary key", 69 | "example": true 70 | } 71 | } 72 | }, 73 | "target_columns": { 74 | "type": "object", 75 | "description":"Map of source column names", 76 | "properties": { 77 | "column_type": { 78 | "type": "string", 79 | "description": "Column type (text, int4, timestamp, ......)", 80 | "example": "text" 81 | }, 82 | "data_type_oid": { 83 | "type": "integer", 84 | "description": "Column postgres data type identifier", 85 | "example": 25 86 | }, 87 | "primary key": { 88 | "type":"boolean", 89 | "description": "Column is part of a primary key", 90 | "example": true 91 | } 92 | } 93 | } 94 | }, 95 | "required": [ 96 | "id", "db_id", "db_name","name", "type", "target" 97 | ] 98 | } -------------------------------------------------------------------------------- /api/schema/maps.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "array", 3 | "items": { 4 | "$ref": "file:../api/schema/map.json" 5 | } 6 | } -------------------------------------------------------------------------------- /api/schema/tbl.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "id": { 5 | "type": "integer", 6 | "description": "Table identifier", 7 | "example": 1 8 | }, 9 | "db_id": { 10 | "type": "integer", 11 | "description": "Database identifier", 12 | "example": 1 13 | }, 14 | "db_name": { 15 | "type": "string", 16 | "description": "Database name", 17 | "example": "wordpress" 18 | }, 19 | "name": { 20 | "type": "string", 21 | "description": "table name", 22 | "example": "accounts" 23 | }, 24 | "type": { 25 | "type": "string", 26 | "description": "Table type: clone, append, history", 27 | "example": "clone" 28 | }, 29 | "target": { 30 | "type": "string", 31 | "description": "Target table name", 32 | "example": "admin_accounts" 33 | }, 34 | "partitions_regex": { 35 | "type": ["string","null"], 36 | "description": "Regular expression matching partition names", 37 | "example": "^accounts_.*", 38 | "optional": true 39 | } 40 | }, 41 | "required": [ 42 | "id", "db_id", "name", "type" 43 | ] 44 | } -------------------------------------------------------------------------------- /api/schema/tbls.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "array", 3 | "items": { 4 | "$ref": "file:../api/schema/tbl.json" 5 | } 6 | } -------------------------------------------------------------------------------- /api/schema/url.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "id": { 5 | "type": "integer", 6 | "description": "URL identifier", 7 | "example": 1 8 | }, 9 | "db_id": { 10 | "type": "integer", 11 | "description": "Database identifier", 12 | "example": 1 13 | }, 14 | "db_name": { 15 | "type": "string", 16 | "description": "Database name", 17 | "example": "wordpress" 18 | }, 19 | "sid": { 20 | "type": "string", 21 | "description": "Source or tenant identifier for multi-tenant databases with the same schema", 22 | "example": "customer1" 23 | }, 24 | "url": { 25 | "type": "string", 26 | "description": "Postgres connection string", 27 | "example": "postgres://user:password@db.example.com/mydb" 28 | }, 29 | "up": { 30 | "type": "boolean", 31 | "description": "Database connection status", 32 | "example": true 33 | }, 34 | "error": { 35 | "type": "string", 36 | "description": "Last error message while connecting to database", 37 | "example": "Host unreachable" 38 | } 39 | }, 40 | "required": [ 41 | "id", "db_id", "sid", "url" 42 | ] 43 | } -------------------------------------------------------------------------------- /api/schema/urls.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "array", 3 | "items": { 4 | "$ref": "file:../api/schema/url.json" 5 | } 6 | } -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | _site 2 | .sass-cache 3 | .jekyll-cache 4 | .jekyll-metadata 5 | vendor 6 | log.html 7 | report.html -------------------------------------------------------------------------------- /docs/010-use-cases.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: page 3 | title: Use cases 4 | permalink: /use-cases/ 5 | nav_order: 10 6 | --- 7 | # Use cases 8 | 9 | ## 1. Microservice database consolidation 10 | ![Consolidation](/assets/images/consolidation.png) 11 | 12 | In a microservices architecture, each service has its own database. This poses a number of problems for reporting: 13 | - Performing reporting queries on a production database will slow it down and make access times unpredictable. 14 | - Indexes required for reporting add an overhead on the insert/update/delete operations. 15 | - It is not possible to query across multiple databases. 16 | 17 | `kuvasz-streamer` consolidates all the databases of all services into a single data warehouse. Only the required tables and columns are replicated. 18 | 19 | The schema in the data warehouse does not have to follow the same one as the original services. 20 | 21 | ## 2. Multi-tenant database consolidation 22 | ![multinenant](/assets/images/multitenant.png) 23 | 24 | In a sensitive multi-tenant environment, each tenant is assigned a separate database to ensure that no cross-pollination of data occurs. However, all the tenant databases have the exact same schema. 25 | 26 | This poses a problem for cross-tenant reporting and customer support. `kuvasz-streamer` can be used to consolidate all the data in a single database. A newly added column `sid` identifies the source tenant database. 27 | 28 | ## 3. Database performance optimization 29 | ![optimize](/assets/images/optimize.png) 30 | In a high-performance system, historical data is kept to a minimum in order to provide quick query time and low latency to end users. A cleaner process usually deletes all data older than a certain number of weeks. If tables are partitioned, old paritions are dropped. 31 | 32 | However, historical data is important for, AI/ML, reporting and forensics. `kuvasz-streamer` can be configured to ignore the `DELETE` and `TRUNCATE` operations on some tables and only apply the `INSERT` and `UPDATE` operations. A separate cleaner running on the data warehouse database takes care of removing older historical data. 33 | 34 | Example usage includes transaction tables and audit history tables. 35 | 36 | ## 4. Postgres major version upgrade 37 | ![upgrade](/assets/images/upgrade.png) 38 | Upgrading major versions of Postgres is a time-consuming task that requires substantial downtime. `kuvasz-streamer` can be used to synchronize databases between different versions of Postgres and then performing a quick switchover. -------------------------------------------------------------------------------- /docs/020-installation.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: page 3 | title: Installation 4 | permalink: /installation/ 5 | nav_order: 20 6 | --- 7 | # Installation 8 | 9 | ## Install on DEB based systems (Debian, Ubuntu, Kali, Raspbian, ...) 10 | 11 | ### Install kuvasz.io APT repository if it is not installed 12 | 13 | ```bash 14 | sudo mkdir -m 0755 -p /etc/apt/keyrings/ 15 | wget -O- https://apt.kuvasz.io/kuvasz.gpg | gpg --dearmor | sudo tee /etc/apt/keyrings/kuvasz.gpg > /dev/null 16 | sudo chmod 644 /etc/apt/keyrings/kuvasz.gpg 17 | echo "deb [signed-by=/etc/apt/keyrings/kuvasz.gpg] https://apt.kuvasz.io stable main" | sudo tee /etc/apt/sources.list.d/kuvasz.list 18 | sudo chmod 644 /etc/apt/sources.list.d/kuvasz.list 19 | ``` 20 | 21 | ### Install `kuvasz-streamer` 22 | 23 | ```bash 24 | sudo apt-get update 25 | sudo apt-get install kuvasz-streamer 26 | ``` 27 | 28 | ## Install on RPM based systems (RHEL/OEL/RockyLinux/...) 29 | 30 | ### Install kuvasz.io RPM repository if it is not already installed 31 | 32 | ```bash 33 | sudo cat < /etc/yum.repos.d/kuvasz.repo 34 | [kuvasz] 35 | name=Kuvasz.io 36 | baseurl=https://rpm.kuvasz.io 37 | enabled=1 38 | gpgcheck=1 39 | gpgkey=https://rpm.kuvasz.io/RPM-GPG-KEY-kuvasz 40 | sslverify=1 41 | sslcacert=/etc/pki/tls/certs/ca-bundle.crt 42 | EOF 43 | ``` 44 | 45 | ### Install `kuvasz-streamer` 46 | 47 | ```bash 48 | sudo dnf install -y kuvasz-streamer 49 | ``` 50 | 51 | ## Install manually 52 | 53 | 1. Navigate to the [Releases Page](https://github.com/kuvasz-io/kuvasz-streamer/releases). 54 | 1. Scroll down to the Assets section under the version that you want to install. 55 | 1. Download the .tar,gz or .zip version needed. 56 | 1. Unzip the package contents. 57 | 1. Create the necessary config and map files 58 | 1. Run 59 | 60 | ## Build from source 61 | 62 | Building from source assumes you are on Ubuntu 22.04 LTS 63 | 64 | ### Install dependencies 65 | 66 | Minimal requirements are `Make` and `git`, but you will also need PostgreSQL client for testing. 67 | 68 | ```bash 69 | sudo apt install build-essential git postgresql postgresql-contrib 70 | ``` 71 | 72 | ### Install web tools 73 | 74 | Install `node` and `yarn` to build the web administration interface. 75 | 76 | ```bash 77 | sudo snap install node --channel=20/stable --classic 78 | ``` 79 | 80 | ### Install Go and tools 81 | 82 | `kuvasz-streamer` requires Go 1.23 or higher. Install Go and GoReleaser using snaps, then install `staticcheck` and `govulncheck` from source and `golangci-lint` binary from its repository. Finally, add the local Go bin directory to the PATH. 83 | 84 | ```bash 85 | sudo snap install go --channel=1.23/stable --classic 86 | sudo snap install goreleaser --classic 87 | go install honnef.co/go/tools/cmd/staticcheck@latest 88 | go install golang.org/x/vuln/cmd/govulncheck@latest 89 | curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.61.0 90 | export PATH=${PATH}:$(go env GOPATH)/bin 91 | ``` 92 | 93 | ### Clone repository 94 | 95 | Clone repo from GitHub 96 | 97 | ```bash 98 | git clone https://github.com/kuvasz-io/kuvasz-streamer.git 99 | cd kuvasz-streamer 100 | ``` 101 | 102 | ### Build 103 | 104 | This step will download all dependencies and build the web interface and the binary for the underlying architecture 105 | 106 | ```bash 107 | make web 108 | make build 109 | ``` 110 | 111 | Run code checks 112 | 113 | This will run `staticcheck` and `golangci-lint` and `go vet` on the code to ensure it is clean. 114 | 115 | ```bash 116 | make check 117 | ``` 118 | 119 | Build packages 120 | 121 | This will build RPMs, DEBs and tarballs for all supported architectures. 122 | Create a GPG key for signing the packages then export it to a file before running the `goreleaser` command. 123 | 124 | ```bash 125 | gpg --generate-key 126 | gpg --output ${HOME}/private.pgp --armor --export-secret-key 127 | export NFPM_DEFAULT_PASSPHRASE= 128 | make release 129 | ``` 130 | 131 | ## Run test suite 132 | 133 | The test suite relies on Docker to set up instances of all supported version of PostgreSQL 134 | and on Robot Framework to run end-to-end tests for all the supported features. 135 | 136 | ### Install Docker 137 | 138 | First, install Docker following the instructions [here](https://docs.docker.com/engine/install/). 139 | Then start it with 140 | 141 | ```bash 142 | sudo systemctl enable --now docker 143 | ``` 144 | 145 | ### Install pip 146 | 147 | Install the `pip` package manager and Postgres driver 148 | 149 | ```bash 150 | sudo apt install python3-pip 151 | ``` 152 | 153 | ### Install Robot Framework 154 | 155 | Then use `pip` to install Robot Framework and its dependencies 156 | 157 | ```bash 158 | pip3 install psycopg2-binary robotframework robotframework-databaselibrary 159 | ``` 160 | 161 | ### Run the test suite 162 | 163 | ```bash 164 | make test 165 | ``` 166 | -------------------------------------------------------------------------------- /docs/030-getting-started.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: page 3 | title: Getting started 4 | permalink: /getting-started/ 5 | nav_order: 30 6 | --- 7 | # Getting started 8 | 9 | This guide runs a source and destination instance in Docker and `kuvasz-streamer` as a system service. It assumes running under Ubuntu 22.04 LTS 10 | 11 | ## Make sure Docker and Postgres are installed on the host 12 | 13 | ```bash 14 | sudo apt install docker.io postgresql postgresql-contrib 15 | ``` 16 | 17 | ## Start source database on port 6015 and create source schema 18 | 19 | Run the following in a first window 20 | 21 | ```bash 22 | sudo docker pull postgres:15 23 | sudo docker run -i -t --rm --name source \ 24 | -p 6015:5432 \ 25 | -e POSTGRES_PASSWORD=postgres \ 26 | postgres:15 -c wal_level=logical \ 27 | -c log_connections=on \ 28 | -c log_min_duration_statement=0 29 | ``` 30 | 31 | ## Start destination database on port 6016 and create destination schema 32 | 33 | Run this in a second window 34 | 35 | ```bash 36 | sudo docker pull postgres:16 37 | sudo docker run -i -t --rm --name dest \ 38 | -p 6016:5432 \ 39 | -e POSTGRES_PASSWORD=postgres \ 40 | postgres:16 \ 41 | -c log_connections=on \ 42 | -c log_min_duration_statement=0 43 | ``` 44 | 45 | ## Configure streamer 46 | 47 | In a third window, prepare the schemas in source and destination databases. 48 | 49 | ```bash 50 | psql postgres://postgres:postgres@127.0.0.1:6015/postgres -c "create database source" 51 | psql postgres://postgres:postgres@127.0.0.1:6015/source -c "create table employee(id serial, name text, dob date, salary numeric)" 52 | psql postgres://postgres:postgres@127.0.0.1:6016/postgres -c "create database dest" 53 | psql postgres://postgres:postgres@127.0.0.1:6016/dest -c "create table emp(sid text, id int, name text, dob date)" 54 | ``` 55 | 56 | Then create streamer config file with minimal configuration 57 | 58 | ```bash 59 | cat < kuvasz-streamer.toml 60 | [database] 61 | url = "postgres://postgres:postgres@dest/dest?application_name=kuvasz-streamer" 62 | [app] 63 | map_file = "/etc/kuvasz/map.yaml" 64 | EOF 65 | ``` 66 | 67 | Create map file 68 | 69 | ```bash 70 | cat < map.yaml 71 | - database: source 72 | urls: 73 | - url: postgres://postgres:postgres@source/source?replication=database&application_name=repl_source 74 | sid: source 75 | tables: 76 | employee: 77 | target: emp 78 | type: append 79 | EOF 80 | ``` 81 | 82 | Start the streamer as a container 83 | 84 | ```bash 85 | sudo docker run -i -t --rm --name kuvasz-streamer \ 86 | --link source \ 87 | --link dest \ 88 | -v ./kuvasz-streamer.toml:/etc/kuvasz/kuvasz-streamer.toml \ 89 | -v ./map.yaml:/etc/kuvasz/map.yaml ghcr.io/kuvasz-io/kuvasz-streamer \ 90 | /kuvasz-streamer 91 | ``` 92 | 93 | ## Test 94 | 95 | In a fourth window, insert a record in the source database 96 | 97 | ```bash 98 | psql postgres://postgres:postgres@127.0.0.1:6015/source \ 99 | -c "insert into employee(name, dob, salary) values('tata', '1970-01-02', 2000)" 100 | ``` 101 | 102 | Now check it has been replicated to the destination database 103 | 104 | ```bash 105 | psql postgres://postgres:postgres@127.0.0.1:6016/dest \ 106 | -c "select * from emp" 107 | ``` 108 | -------------------------------------------------------------------------------- /docs/040-streaming-modes.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: page 3 | title: Streaming modes 4 | permalink: /streaming-modes/ 5 | nav_order: 40 6 | --- 7 | 8 | # Streaming modes 9 | 10 | The streaming mode can be defined per table and affects how replication operations are applied on the destination. This is one of the main differences with normal Postgres logical replication where only exact copies are supported. 11 | 12 | ## type = `clone` 13 | These are table that need to be identical between source and destination and where historical data is not important. Example: product types, colors. 14 | 15 | - INSERT 16 | ```sql 17 | INSERT INTO destination(sid, ...) 18 | VALUES (SID, ...) 19 | ``` 20 | If key already exists, log error 21 | - UPDATE 22 | ```sql 23 | UPDATE destination 24 | SET ...=... 25 | WHERE sid=SID and PK=... 26 | ``` 27 | If key does not exist: insert row and log error. 28 | - DELETE 29 | ```sql 30 | DELETE FROM destination 31 | WHERE sid=SID AND PK=... 32 | ``` 33 | If key does not exist: log error. 34 | 35 | ## type = `append` 36 | In high-performance systems, it is important to keep a small number of historical events in the live service table and keep a much larger history in a data warfehouse. Examples: audit events, notifications, transactions. 37 | 38 | These tables behave the same as `clone` tables with the exception that `DELETE` and `TRUNCATE` are ignored. 39 | 40 | ## type = `history` 41 | History tables implement Slowly Changing Dimensions (SCD) type 2. They are useful to keep a complete history of all changes. Examples include changes in the salary field of an employee. History tables should be used carefully as they generate a lot of rows and the destination table may grow out of control. 42 | 43 | More information can be found in this [Wikipedia Article](https://en.wikipedia.org/wiki/Slowly_changing_dimension) 44 | 45 | - INSERT 46 | ```sql 47 | INSERT INTO destination(sid, ..., kvsz_start, kvsz_end, kvsz_deleted) 48 | VALUES(SID, ..., '1900-01-01', '9999-01-01', false) 49 | ``` 50 | - If key already exists, log error 51 | - UPDATE 52 | ```sql 53 | UPDATE destination 54 | SET kvsz_end=now() 55 | WHERE sid=SID AND kvsz_end='9999-01-01' AND PK=... 56 | ``` 57 | ```sql 58 | INSERT INTO destination(sid, ..., kvsz_start, kvsz_end, kvsz_deleted) 59 | VALUES(SID, ..., now(), '9999-01-01', false) 60 | ``` 61 | If key does not exist, just insert the row and log error. 62 | - DELETE 63 | ```sql 64 | UPDATE destination 65 | SET kvsz_end=now(), kvsz_deleted=true 66 | WHERE sid=SID AND kvsz_end='9999-01-01' AND PK=... 67 | ``` 68 | If key does not exist, log error. 69 | - SELECT latest values 70 | ```sql 71 | SELECT * 72 | FROM destination 73 | WHERE sid=SID and id=ID and kvsz_end='9999-01-01' 74 | - SELECT historical values 75 | ```sql 76 | SELECT * 77 | FROM destination 78 | WHERE sid=SID and id=ID and '2023-01-28' between kvsz_start and kvsz_end 79 | ``` 80 | 81 | ## History table example 82 | 83 | ### Add record 2020-01-01, salary=1000 84 | 85 | |sid|id|first_name|last_name|salary|kvsz_start|kvsz_end|kvsz_deleted| 86 | |---|--|----------|---------|------|----------|--------|------------| 87 | |1|1|John|Doe|1000|1900-01-01|9999-01-01|false 88 | 89 | 90 | ### Update record on 2023-01-01, salary=1200 91 | 92 | |sid|id|first_name|last_name|salary|kvsz_start|kvsz_end|kvsz_deleted| 93 | |---|--|----------|---------|------|----------|--------|------------| 94 | |1|1|John|Doe|1000|1900-01-01|2023-01-01|false 95 | |1|1|John|Doe|1200|2023-01-01|9999-01-01|false 96 | 97 | ### Update record on 2024-01-01, salary=2000 98 | 99 | |sid|id|first_name|last_name|salary|kvsz_start|kvsz_end|kvsz_deleted| 100 | |---|--|----------|---------|------|----------|--------|------------| 101 | |1|1|John|Doe|1000|1900-01-01|2023-01-01|false 102 | |1|1|John|Doe|1200|2023-01-01|2023-01-01|false 103 | |1|1|John|Doe|2000|2024-01-01|9999-01-01|false 104 | 105 | ### Delete record on 2024-06-01 106 | 107 | |sid|id|first_name|last_name|salary|kvsz_start|kvsz_end|kvsz_deleted| 108 | |---|--|----------|---------|------|----------|--------|------------| 109 | |1|1|John|Doe|1000|1900-01-01|2023-01-01|false 110 | |1|1|John|Doe|1200|2023-01-01|2023-01-01|false 111 | |1|1|John|Doe|2000|2024-01-01|2024-06-01|true 112 | -------------------------------------------------------------------------------- /docs/045-running-modes.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: page 3 | title: Running modes 4 | permalink: /running-modes/ 5 | nav_order: 45 6 | --- 7 | 8 | # Running modes 9 | 10 | `kuvasz-streamer` supports two running modes, each suitable for a different environment. 11 | 12 | ## Declarative mode 13 | In this mode, the mapping configuration (databases, URLs, mappings) is statically configured in a YAML file. Any modification of the file requires a restart of the service. 14 | 15 | This mode is suitable in Kubernetes clusters where the streamer reads its configuration from file generated in GitOps CI/CD pipelines. It does not require any mounted ephemeral or persistent storage. The web administration and APIs runs in read-only mode. 16 | 17 | This mode is enabled when no database is specified in the configuration, ie when `app.map_database` is empty. 18 | 19 | ## Database mode 20 | In database mode, the streamer requires a persistent read/write storage for an SQLite database containing its mapping configuration. This allows the administrator to add and remove databases and mappings at runtime and call an API to refresh the configuration. 21 | 22 | This mode is suitable when running as a system service and experimentation with various mappings is desired. It is enabled by specifying the SQLite database path. All schema migrations are handled transparently by the service. 23 | 24 | 25 | -------------------------------------------------------------------------------- /docs/050-configuration.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: page 3 | title: Configuration 4 | permalink: /configuration/ 5 | nav_order: 50 6 | --- 7 | # Configuration 8 | 9 | ## Service configuration 10 | 11 | `kuvasz-streamer` supports three configuration sources with the following order of priority. 12 | 13 | 1. Configuration file with TOML syntax. If the file name is not specified with the 14 | `--conf` command line switch, it searches for the following three files in order: 15 | - `kuvasz-streamer.toml` 16 | - `./conf/kuvasz-streamer.toml` 17 | - `/etc/kuvasz/kuvasz-streamer.toml` 18 | 19 | 2. Environment variables starting with the format `KUVASZ_section_parameter=value` 20 | 21 | 3. Command line arguments in the form `--section.parameters=value` 22 | 23 | |Section|Parameter|Type|Default|Description| 24 | |-------|---------|----|-------|-----------| 25 | |`server`|`name`|String|`kuvasz-streamer`|Server name to use in log shipping| 26 | |`server`|`address`|String|:8000|Server bind address| 27 | |`server`|`max_goroutines`|Integer|100|Number of concurrent API calls to process| 28 | |`server`|`read_timeout`|Integer|30|Maximum time (in seconds) allowed to send the whole request| 29 | |`server`|`read_header_timeout`|30|Integer|Maximum time (in seconds) allowed to send the header| 30 | |`server`|`write_timeout`|Integer|30|Maximum time (in seconds) allowed to write the whole response| 31 | |`server`|`idle_timeout`|Integer|30|Maximum time (in seconds) allowed between two requests on the same connection| 32 | |`server`|`max_header_bytes`|Integer|1000|Maximum size (in bytes) of the headers| 33 | |`maintenance`|`pprof`|String||Pprof bind adddress, typically `127.0.0.1:6060` when enabled| 34 | |`maintenance`|`start_delay`|Integer|0|Testing only: delay between full sync and replication start| 35 | |`database`|`url`|String||Destination database URL| 36 | |`database`|`schema`|String|`public`|Destination database schema to use| 37 | |`cors`|`allowed_origins`|Array of strings|Origin sites to allow, Use * for testing| 38 | |`cors`|`allow_methods`|String|`GET,POST,PATCH,PUT,DELETE`|Comma separated list of allowed methods, should not be changed| 39 | |`cors`|`allow_headers`|String|`Authorization,User-Agent,If-Modified-Since,Cache-Control,Content-Type,X-Total-Count`|Comma separated list of allowed headers, should not be changed| 40 | |`cors`|`allow_credentials`|Boolean|true|Switch to allow Authorization header| 41 | |`cors`|`max_age`|Integer|86400|Maximum time to use the CORS response in seconds| 42 | |`auth`|`admin_password`|String|`hash(admin)`|Web administrator password. Compatible with `mkpasswd` output. | 43 | |`auth`|`jwt_key`|String|`Y3OYHx7Y1KsRJPzJKqHGWfEaHsPbmwwSpPrXcND95Pw=`|JWT signing key. Generate a cryptographycally secure key with `openssl rand -base64 32`| 44 | |`auth`|`ttl`|Integer|300|Token validity period in seconds| 45 | |`app`|`map_file`|String|`map.yaml`|Table mapping file| 46 | |`app`|`map_database`|String||Table mapping file| 47 | |`app`|`num_workers`|Integer|2|Number of workers writing to the destination database| 48 | |`app`|`commit_delay`|Float|1.0|Delay in seconds between commits on the destination database| 49 | |`app`|`default_schema`|String|`public`|Default schema in source database| 50 | |`app`|`sync_rate`|Float|1_000_000_000|Number of rows/second to read globally when doing a full sync in order not to overload the source database| 51 | |`app`|`sync_burst`|Integer|1000|Number of rows to burst in case of delays in writing rows in the destination| 52 | 53 | 54 | ## Mapping file 55 | 56 | The mapping file is a YAML formatted file that maps the source databases and tables to the destinations. For each source database schema, create a top-level key with an identifier. Then list all the URLs to access engines with this schema and for each engine, specify a source ID `sid` to differentiate in the destination database. 57 | 58 | ```yaml 59 | # Top level key for all databases with the same schema 60 | - database: db1 61 | urls: 62 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6012/db1?replication=database&application_name=repl_db1 63 | sid: i1 # identifier in the destination database 64 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6013/db1?replication=database&application_name=repl_db1 65 | sid: i2 66 | # List all tables to be replicated 67 | tables: 68 | t1: 69 | t2: 70 | target: rt2 # Table name in destination database 71 | t3: 72 | type: append # Specify table type append 73 | t4: 74 | type: history # specify table type history 75 | - database: db2 76 | urls: 77 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6012/db2?replication=database&application_name=repl_db2 78 | sid: 12 79 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6013/db2?replication=database&application_name=repl_db2 80 | sid: 13 81 | tables: 82 | s1: 83 | ``` -------------------------------------------------------------------------------- /docs/060-postgres-configuration.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: page 3 | title: Postgres configuration 4 | permalink: /postgres-configuration/ 5 | nav_order: 60 6 | --- 7 | # Postgres Configuration 8 | 9 | ## Postgres server configuration 10 | 11 | - Configure replication slots in `postgresql.conf` 12 | 13 | ```ini 14 | max_replication_slots = 10 15 | max_wal_senders = 10` -- there should be one slot for each replicated database plus one slot for each secondary server 16 | wal_level = logical 17 | ``` 18 | 19 | - Configure replication host in `pg_hba.conf` depending on where `kuvasz-streamer` is running. 20 | 21 | ```text 22 | host replication all 0.0.0.0/0 scram-sha-256 23 | ``` 24 | 25 | - Create a replication user exclusively for `kuvasz-streamer` 26 | 27 | ```sql 28 | CREATE ROLE kuvasz-streamer WITH REPLICATION LOGIN PASSWORD 'streamer'; 29 | ``` 30 | 31 | ## Destination Schema 32 | 33 | The following constraints apply to the destination schema 34 | 35 | - Target tables can have a subset of the source tables 36 | - Columns must have the same names and the same data types 37 | - The target table primary key should be the same as the source primary key -------------------------------------------------------------------------------- /docs/065-metrics.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: page 3 | title: Metrics 4 | permalink: /metrics/ 5 | nav_order: 65 6 | --- 7 | # Metrics 8 | 9 | `kuvasz-steramer` maintains Prometheus metrics available on `/metrics` endpoint. 10 | 11 | |Metric|Type|Labels|Description| 12 | |------|----|------|-----------| 13 | |`streamer_operations_total`|Counter|`database`, `sid`, `table`, `operation`, `result`|Total number of INSERT/UPDATE/DELETE operations| 14 | |`streamer_operations_seconds`|Histogram|`database`, `sid`, `table`, `operation`, `result`|Duration of INSERT/UPDATE/DELETE operations| 15 | |`streamer_sync_total_rows`|Counter|`database`, `sid`, `table`|Total number of rows synced| 16 | |`streamer_sync_total_bytes`|Counter|`database`, `sid`, `table`|Total number of bytes synced| 17 | |`streamer_jobs_total`|Counter|`channel`|Total number of jobs received per channel| 18 | |`url_heartbeat`|Gauge|`database`,`sid`|Timestamp of last known activity| 19 | -------------------------------------------------------------------------------- /docs/070-maintenance.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: page 3 | title: Maintenance 4 | permalink: /maintenance/ 5 | nav_order: 70 6 | --- 7 | # Maintenance 8 | 9 | The best way to monitor the replication state is to use `kuvasz-agent` and the associated Postgres Grafana dashboard. 10 | 11 | - To check the replication slots 12 | ```sql 13 | SELECT * 14 | FROM pg_replication_slots; 15 | ``` 16 | 17 | - To check the replication status 18 | ```sql 19 | SELECT client_addr, state, sent_lsn write_lsn, flush_lsn, replay_lsn 20 | FROM pg_stat_replication; 21 | ``` 22 | -------------------------------------------------------------------------------- /docs/080-schema-modification.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: page 3 | title: Schema Modification 4 | permalink: /schema-modification/ 5 | nav_order: 80 6 | --- 7 | # Schema Modification 8 | 9 | ## Adding columns 10 | 11 | If a column is added in a source database, it is ignored until it is added in the destination database. There is no automatic synchronization of columns. In most data consolidation scenarios, a subset of the source columns is required. 12 | 13 | ## Deleting columns 14 | 15 | Columns should not be deleted from source tables. If they are deleted for any reason, they will be ignored in the destination table and the default value will be used. If the destination column does not allow NULLs and no default value is defined, the insert/update will fail. 16 | 17 | ## Changing column types 18 | 19 | The destination column type should also be changed. 20 | 21 | -------------------------------------------------------------------------------- /docs/090-architecture.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: page 3 | title: Architecture 4 | permalink: /architecture/ 5 | nav_order: 90 6 | mermaid: true 7 | --- 8 | # Architecture 9 | 10 | `kuvasz-streamer` opens a logical replication connection to each of the source databases and a number of connections to the destination database corresponding to the number of workers. 11 | 12 | Depending on the running mode, it also reads its mapping configuration either from a static YAML file or from an SQLite database. 13 | 14 | ![Architecture](/assets/images/architecture.png){: width="50%"} 15 | 16 | ## Initial synchronization 17 | 18 | On startup `kuvasz-streamer` will check the state of the publications and replication slots. If they don't exist, it will create them and initiate a full sync. If the slot exists but some tables have been added, a full sync for these tables only is performed. 19 | 20 | ![Architecture](/assets/images/initialsync.png) 21 | 22 | A separate goroutine is created for each source to handle the initial sync process. Source tables are synchronized sequentially within that source. Parallelizing this would increase the load substantially on the source server but may be something to look at in the future. 23 | 24 | ## Streaming mode 25 | 26 | After it has finished initial synchronization, Kuvasz-streamer enters streaming mode. In this mode, Kuvasz-streamer listens to the Postgres logical replication slot and processes the logical replication records as they arrive. 27 | 28 | ![Streaming](/assets/images/streaming.png) 29 | 30 | For each source database, `kuvasz-streamer` creates a single dedicated Reader goroutine. This goroutine open a Postgres replication connection. It reads replication messages and send updates to the source. 31 | 32 | For each worker, `kuvasz-streamer` creates a dedicated worker goroutine that opens a regular connection to the destination. This worker applies changes on the destination database. Having multiple workers allows parallelizing write queries and enhancing performance. 33 | 34 | When a replication message (XlogData) is received, `kuvasz-streamer` computes the SQL statement to apply on the destination. Then it selects the worker based on a hash of the source table. It then creates an operation (OP) and sends it to that worker. This mechanism ensures that all changes to a given table are processed in the order they were received. 35 | 36 | A worker creates a transaction and uses it as a container for all received messages. After a configurable timeout, usually, 1 second, the transaction is committed and the committed LSN is recorded in a shared map for use by the Reader goroutines. 37 | 38 | The Reader goroutines periodically calculate the committed LSN and send a Standby Status Update message to the source. This ensures that these messages are deleted from the replication slot. The Committed LSN is computed to guarantee that all operations from a particular source have been applied on all worker connections. 39 | 40 | A simple example: 41 | 42 | ```mermaid 43 | sequenceDiagram 44 | participant S1 as Source 1 45 | participant S2 as Source N 46 | participant R1 as Reader 1 47 | participant R2 as Reader N 48 | participant W1 as Worker 1 49 | participant W2 as Worker 2 50 | participant D1 as Destination
Connection 1 51 | participant D2 as Destination
Connection 2 52 | W1->>D1: BEGIN 53 | W2->>D2: BEGIN 54 | S1->>R1: XlogData T1.insert LSN=1 55 | R1->>W2: OP: INSERT INTO T1(...) 56 | W2->>D2: INSERT INTO T1(...) 57 | Note over W2: S1.WrittenLSN=1 58 | S2->>R2: XlogData T3.insert LSN=55 59 | R2->>W1: OP: INSERT INTO T3(...) 60 | W1->>D1: INSERT INTO T3(...) 61 | Note over W2: S2.WrittenLSN=55 62 | S1->>R1: XlogData T2.update LSN=2 63 | R1->>W1: OP: UPDATE T2 SET ... 64 | W1->>D2: UPDATE T2 SET ... 65 | Note over W1: S1.WrittenLSN=2 66 | S1->>R1: XlogData T1.insert LSN=3 67 | R1->>W2: OP: INSERT INTO T1(...) 68 | W2->>D2: INSERT INTO T1(...) 69 | Note over W2: S1.WritenLSN=3 70 | W1->>D1: COMMIT 71 | Note over W1: S1.CommittedLSN=2
S2.CommittedLSN=0 72 | W2->>D2: COMMIT 73 | Note over W2: S1.CommittedLSN=3
S2.CommittedLSN=55 74 | R1->>S1: StandbyStatusUpdate CommittedLSN=3 75 | R2->>S2: StandbyStatusUpdate CommittedLSN=55 76 | ``` 77 | -------------------------------------------------------------------------------- /docs/100-implementation.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: page 3 | title: Implementation details 4 | permalink: /implementation/ 5 | nav_order: 100 6 | --- 7 | # Implementation details 8 | 9 | ## Postgres replication protol use-cases 10 | 11 | The postgres replication protocol provides the necessary information to propagate the update. This table summarizes the various cases used to build a WHERE clause. 12 | 13 | |Operation|Indicator|Values|OldValues|Where|Notes| 14 | |---------|---------|------|---------|-----|-----| 15 | |UPDATE|K|Modified values|Old primary Key|PK=OldValues.PK|Primary key was modified| 16 | |UPDATE|O|Modified values|Full row|columns=OldValues|Replica identity full| 17 | |UPDATE|00|Modified values including PK||PK=Values.PK|Primary Key was not modified| 18 | |DELETE|K|Primary key||PK=Values.PK|Primary key was deleted| 19 | |DELETE|O|Full row||columns=Values|Replica Identity Full 20 | |DELETE|00|Should never happen| 21 | 22 | ## References 23 | 24 | - Articles 25 | - [Fastware](https://www.postgresql.fastware.com/blog/inside-logical-replication-in-postgresql) 26 | - [Dolthub](https://www.dolthub.com/blog/2024-03-08-postgres-logical-replication/) 27 | - Postgres Documentation 28 | - [Logical replication](https://www.postgresql.org/docs/current/logical-replication.html) 29 | - [Logical decoding](https://www.postgresql.org/docs/current/logicaldecoding.html) 30 | - Protocol: 31 | - [Streaming Replication](https://www.postgresql.org/docs/current/protocol-replication.html) 32 | - [Logical Replication](https://www.postgresql.org/docs/current/protocol-logical-replication.html) 33 | - Go pgx and tools 34 | - [jackc/pgx](https://github.com/jackc/pgx) 35 | - [jack/pglogrepl](https://github.com/jackc/pglogrepl) 36 | -------------------------------------------------------------------------------- /docs/404.html: -------------------------------------------------------------------------------- 1 | --- 2 | permalink: /404.html 3 | layout: default 4 | --- 5 | 6 | 19 | 20 |
21 |

404

22 | 23 |

Page not found :(

24 |

The requested page could not be found.

25 |
26 | -------------------------------------------------------------------------------- /docs/Gemfile: -------------------------------------------------------------------------------- 1 | source "https://rubygems.org" 2 | # Hello! This is where you manage which Jekyll version is used to run. 3 | # When you want to use a different version, change it below, save the 4 | # file and run `bundle install`. Run Jekyll with `bundle exec`, like so: 5 | # 6 | # bundle exec jekyll serve 7 | # 8 | # This will help ensure the proper Jekyll version is running. 9 | # Happy Jekylling! 10 | gem "jekyll", "~> 4.3.3" 11 | # This is the default theme for new Jekyll sites. You may change this to anything you like. 12 | gem "minima", "~> 2.5" 13 | # If you want to use GitHub Pages, remove the "gem "jekyll"" above and 14 | # uncomment the line below. To upgrade, run `bundle update github-pages`. 15 | # gem "github-pages", group: :jekyll_plugins 16 | # If you have any plugins, put them here! 17 | group :jekyll_plugins do 18 | gem "jekyll-feed", "~> 0.12" 19 | end 20 | 21 | # Windows and JRuby does not include zoneinfo files, so bundle the tzinfo-data gem 22 | # and associated library. 23 | platforms :mingw, :x64_mingw, :mswin, :jruby do 24 | gem "tzinfo", ">= 1", "< 3" 25 | gem "tzinfo-data" 26 | end 27 | 28 | # Performance-booster for watching directories on Windows 29 | gem "wdm", "~> 0.1.1", :platforms => [:mingw, :x64_mingw, :mswin] 30 | 31 | # Lock `http_parser.rb` gem to `v0.6.x` on JRuby builds since newer versions of the gem 32 | # do not have a Java counterpart. 33 | gem "http_parser.rb", "~> 0.6.0", :platforms => [:jruby] 34 | 35 | gem "just-the-docs" -------------------------------------------------------------------------------- /docs/Gemfile.lock: -------------------------------------------------------------------------------- 1 | GEM 2 | remote: https://rubygems.org/ 3 | specs: 4 | addressable (2.8.7) 5 | public_suffix (>= 2.0.2, < 7.0) 6 | colorator (1.1.0) 7 | concurrent-ruby (1.3.4) 8 | em-websocket (0.5.3) 9 | eventmachine (>= 0.12.9) 10 | http_parser.rb (~> 0) 11 | eventmachine (1.2.7) 12 | ffi (1.17.0) 13 | ffi (1.17.0-arm64-darwin) 14 | ffi (1.17.0-x86_64-darwin) 15 | forwardable-extended (2.6.0) 16 | google-protobuf (3.25.5) 17 | google-protobuf (3.25.5-arm64-darwin) 18 | google-protobuf (3.25.5-x86_64-darwin) 19 | google-protobuf (3.25.5-x86_64-linux) 20 | http_parser.rb (0.8.0) 21 | i18n (1.14.6) 22 | concurrent-ruby (~> 1.0) 23 | jekyll (4.3.4) 24 | addressable (~> 2.4) 25 | colorator (~> 1.0) 26 | em-websocket (~> 0.5) 27 | i18n (~> 1.0) 28 | jekyll-sass-converter (>= 2.0, < 4.0) 29 | jekyll-watch (~> 2.0) 30 | kramdown (~> 2.3, >= 2.3.1) 31 | kramdown-parser-gfm (~> 1.0) 32 | liquid (~> 4.0) 33 | mercenary (>= 0.3.6, < 0.5) 34 | pathutil (~> 0.9) 35 | rouge (>= 3.0, < 5.0) 36 | safe_yaml (~> 1.0) 37 | terminal-table (>= 1.8, < 4.0) 38 | webrick (~> 1.7) 39 | jekyll-feed (0.17.0) 40 | jekyll (>= 3.7, < 5.0) 41 | jekyll-include-cache (0.2.1) 42 | jekyll (>= 3.7, < 5.0) 43 | jekyll-sass-converter (3.0.0) 44 | sass-embedded (~> 1.54) 45 | jekyll-seo-tag (2.8.0) 46 | jekyll (>= 3.8, < 5.0) 47 | jekyll-watch (2.2.1) 48 | listen (~> 3.0) 49 | just-the-docs (0.10.0) 50 | jekyll (>= 3.8.5) 51 | jekyll-include-cache 52 | jekyll-seo-tag (>= 2.0) 53 | rake (>= 12.3.1) 54 | kramdown (2.4.0) 55 | rexml 56 | kramdown-parser-gfm (1.1.0) 57 | kramdown (~> 2.0) 58 | liquid (4.0.4) 59 | listen (3.9.0) 60 | rb-fsevent (~> 0.10, >= 0.10.3) 61 | rb-inotify (~> 0.9, >= 0.9.10) 62 | mercenary (0.4.0) 63 | minima (2.5.2) 64 | jekyll (>= 3.5, < 5.0) 65 | jekyll-feed (~> 0.9) 66 | jekyll-seo-tag (~> 2.1) 67 | pathutil (0.16.2) 68 | forwardable-extended (~> 2.6) 69 | public_suffix (6.0.1) 70 | rake (13.2.1) 71 | rb-fsevent (0.11.2) 72 | rb-inotify (0.11.1) 73 | ffi (~> 1.0) 74 | rexml (3.3.9) 75 | rouge (4.4.0) 76 | safe_yaml (1.0.5) 77 | sass-embedded (1.69.5) 78 | google-protobuf (~> 3.23) 79 | rake (>= 13.0.0) 80 | sass-embedded (1.69.5-arm64-darwin) 81 | google-protobuf (~> 3.23) 82 | sass-embedded (1.69.5-x86_64-darwin) 83 | google-protobuf (~> 3.23) 84 | terminal-table (3.0.2) 85 | unicode-display_width (>= 1.1.1, < 3) 86 | unicode-display_width (2.6.0) 87 | webrick (1.9.0) 88 | 89 | PLATFORMS 90 | arm64-darwin 91 | ruby 92 | x86_64-darwin 93 | x86_64-linux 94 | 95 | DEPENDENCIES 96 | http_parser.rb (~> 0.6.0) 97 | jekyll (~> 4.3.3) 98 | jekyll-feed (~> 0.12) 99 | just-the-docs 100 | minima (~> 2.5) 101 | tzinfo (>= 1, < 3) 102 | tzinfo-data 103 | wdm (~> 0.1.1) 104 | 105 | BUNDLED WITH 106 | 2.5.6 107 | -------------------------------------------------------------------------------- /docs/_config.yml: -------------------------------------------------------------------------------- 1 | title: Kuvasz-streamer 2 | email: kuvasz@kuvasz.io 3 | description: >- 4 | Kuvasz-streamer is an open source change data capture (CDC) 5 | project that focuses exclusively on Postgres. It is tightly integrated 6 | with Postgres Logical Replication to provide high performance, 7 | low latency replication. 8 | baseurl: "/" # the subpath of your site, e.g. /blog 9 | url: "https://streamer.kuvasz.io" 10 | github_username: kuvasz-io 11 | theme: just-the-docs 12 | color_scheme: wider 13 | plugins: 14 | - jekyll-feed 15 | ga_tracking: G-R854TNLS2V 16 | logo: /assets/images/logo.png 17 | favicon_ico: /assets/images/favicon.ico 18 | footer_content: "Copyright © 2024-2025 George Yazbek. Distributed by an AGPL license." 19 | mermaid: 20 | # Version of mermaid library 21 | # Pick an available version from https://cdn.jsdelivr.net/npm/mermaid/ 22 | version: "11.4.1" 23 | 24 | nav_external_links: 25 | - title: Functional test report 26 | url: /report.html 27 | - title: Load test report 28 | url: https://kuvasz.io/kuvasz-streamer-load-test/ 29 | - title: Github 30 | url: https://github.com/kuvasz-io/kuvasz-streamer 31 | - title: Kuvasz Tech blog 32 | url: https://kuvasz.io -------------------------------------------------------------------------------- /docs/_sass/color_schemes/wider.scss: -------------------------------------------------------------------------------- 1 | $content-width: 62.6rem; 2 | -------------------------------------------------------------------------------- /docs/assets/images/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuvasz-io/kuvasz-streamer/106a4e05e7cc5c64228ce2ab5e6f5f087c1f8b56/docs/assets/images/architecture.png -------------------------------------------------------------------------------- /docs/assets/images/consolidation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuvasz-io/kuvasz-streamer/106a4e05e7cc5c64228ce2ab5e6f5f087c1f8b56/docs/assets/images/consolidation.png -------------------------------------------------------------------------------- /docs/assets/images/crash.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuvasz-io/kuvasz-streamer/106a4e05e7cc5c64228ce2ab5e6f5f087c1f8b56/docs/assets/images/crash.png -------------------------------------------------------------------------------- /docs/assets/images/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuvasz-io/kuvasz-streamer/106a4e05e7cc5c64228ce2ab5e6f5f087c1f8b56/docs/assets/images/favicon.ico -------------------------------------------------------------------------------- /docs/assets/images/full_sync.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuvasz-io/kuvasz-streamer/106a4e05e7cc5c64228ce2ab5e6f5f087c1f8b56/docs/assets/images/full_sync.png -------------------------------------------------------------------------------- /docs/assets/images/initialsync.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuvasz-io/kuvasz-streamer/106a4e05e7cc5c64228ce2ab5e6f5f087c1f8b56/docs/assets/images/initialsync.png -------------------------------------------------------------------------------- /docs/assets/images/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuvasz-io/kuvasz-streamer/106a4e05e7cc5c64228ce2ab5e6f5f087c1f8b56/docs/assets/images/logo.png -------------------------------------------------------------------------------- /docs/assets/images/multitenant.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuvasz-io/kuvasz-streamer/106a4e05e7cc5c64228ce2ab5e6f5f087c1f8b56/docs/assets/images/multitenant.png -------------------------------------------------------------------------------- /docs/assets/images/optimize.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuvasz-io/kuvasz-streamer/106a4e05e7cc5c64228ce2ab5e6f5f087c1f8b56/docs/assets/images/optimize.png -------------------------------------------------------------------------------- /docs/assets/images/streaming.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuvasz-io/kuvasz-streamer/106a4e05e7cc5c64228ce2ab5e6f5f087c1f8b56/docs/assets/images/streaming.png -------------------------------------------------------------------------------- /docs/assets/images/upgrade.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuvasz-io/kuvasz-streamer/106a4e05e7cc5c64228ce2ab5e6f5f087c1f8b56/docs/assets/images/upgrade.png -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: home 3 | --- 4 | Kuvasz-streamer is an open source change data capture (CDC) project that focuses exclusively on Postgres. It is tightly integrated with Postgres Logical Replication to provide high performance, low latency replication. 5 | 6 | ## Features 7 | 8 | ### Lightweight 9 | 10 | Kuvasz-streamer is a lightweight service written in Go that has zero dependencies and no queuing. Run it as a system service or in a Docker container. It can run in a full declarative mode where the configuration map is stored in a read-only YAML file and no files are written to disk. This mode is suitable for a CI/CD pipeline based configuration and a Kubernetes deployment. An interactive, database-backed mode is supported where the web interface can be used to modify the mapping configuration at runtime. 11 | 12 | ### High-performance, low latency 13 | 14 | Kuvasz-streamer uses the following mechanisms for performance: 15 | 16 | - Postgres COPY protocol to perform the initial sync and the logical replication protocol later. 17 | - Multiple parallel connections to the destination database with load sharing. 18 | - Batch updates into periodic transactions. 19 | - Single multi-threaded process with no queuing. 20 | - Rate-limiting on the source connections to avoid source server overload. 21 | 22 | Kuvasz-streamer was [benchmarked](https://kuvasz.io/kuvasz-streamer-load-test/) at 10K tps with less than 1 second latency. 23 | 24 | ### High guarantees 25 | 26 | Kuvasz-streamer guarantees 27 | 28 | - In-order delivery: changes are applied in the strict order they are received. Although multiple writers are used in parallel, all write to a specific table go to the same writer. 29 | - At-least-once delivery semantics: changes committed on the destination database are relayed back to the source in a status update message. In case of a crash in the streamer or in the destination database, unconfirmed messages are re-applied. Having the same primary keys on the destination and the source guarantees a single application of any update. 30 | 31 | ### Batteries included 32 | 33 | Kuvasz-streamer takes the pain out of managing publications and replications slots: 34 | 35 | - It creates missing publications and replications slots on startup 36 | - It adds and removes configured tables from publications automatically 37 | - It performs a full sync whenever a new table is added 38 | 39 | It is also fully observable providing [Prometheus metrics]({% link 065-metrics.md %}) and extensive logging. 40 | 41 | ### Rich streaming modes 42 | 43 | Multiple table [streaming modes]({% link 040-streaming-modes.md %}) are supported 44 | 45 | - Clone: replicate the source table as-is 46 | - Append-only: replicate the source table but don't delete any records 47 | - History: Keep a full history of all changes with a timestamp 48 | 49 | ### Full Postgres support 50 | 51 | Full PostgreSQL support is guaranteed with an extensive test suite: 52 | 53 | - All recent PostgreSQL versions (12 to 17) 54 | - All data types 55 | - Partitions 56 | - Schemas 57 | - Source tables can be in any database and in any schema 58 | - Destination tables are in a single database and a single schema 59 | 60 | ### API and web interface 61 | 62 | The service provides an optional API and a web interface to easily manage publications and mapping. 63 | 64 | ## Use cases 65 | 66 | Kuvasz-streamer can be [used](/use-cases/) for data consolidation, major version upgrades and other cases. 67 | -------------------------------------------------------------------------------- /docs/loadtest.md: -------------------------------------------------------------------------------- 1 | # DB Version: 16 2 | # OS Type: linux 3 | # DB Type: oltp 4 | # Total Memory (RAM): 64 GB 5 | # CPUs num: 16 6 | # Data Storage: ssd 7 | 8 | max_connections = 300 9 | shared_buffers = 16GB 10 | effective_cache_size = 48GB 11 | maintenance_work_mem = 2GB 12 | checkpoint_completion_target = 0.9 13 | wal_buffers = 16MB 14 | default_statistics_target = 100 15 | random_page_cost = 1.1 16 | effective_io_concurrency = 200 17 | work_mem = 13981kB 18 | huge_pages = try 19 | min_wal_size = 2GB 20 | max_wal_size = 8GB 21 | max_worker_processes = 16 22 | max_parallel_workers_per_gather = 4 23 | max_parallel_workers = 16 24 | max_parallel_maintenance_workers = 4 25 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/kuvasz-io/kuvasz-streamer 2 | 3 | go 1.23.0 4 | 5 | require ( 6 | github.com/deckarep/golang-set/v2 v2.7.0 7 | github.com/golang-jwt/jwt v3.2.2+incompatible 8 | github.com/gorilla/mux v1.8.1 9 | github.com/jackc/pglogrepl v0.0.0-20240307033717-828fbfe908e9 10 | github.com/jackc/pgx/v5 v5.7.1 11 | github.com/knadh/koanf/parsers/toml v0.1.0 12 | github.com/knadh/koanf/providers/env v1.0.0 13 | github.com/knadh/koanf/providers/file v1.1.2 14 | github.com/knadh/koanf/providers/posflag v0.1.0 15 | github.com/knadh/koanf/v2 v2.1.2 16 | github.com/lmittmann/tint v1.0.5 17 | github.com/mattn/go-isatty v0.0.20 18 | github.com/mattn/go-sqlite3 v1.14.24 19 | github.com/pressly/goose/v3 v3.23.0 20 | github.com/prometheus/client_golang v1.20.5 21 | github.com/prometheus/client_model v0.6.1 22 | github.com/spf13/pflag v1.0.5 23 | golang.org/x/crypto v0.31.0 24 | golang.org/x/time v0.8.0 25 | gopkg.in/yaml.v2 v2.4.0 26 | ) 27 | 28 | require ( 29 | github.com/beorn7/perks v1.0.1 // indirect 30 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 31 | github.com/fsnotify/fsnotify v1.8.0 // indirect 32 | github.com/go-viper/mapstructure/v2 v2.2.1 // indirect 33 | github.com/jackc/pgio v1.0.0 // indirect 34 | github.com/jackc/pgpassfile v1.0.0 // indirect 35 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect 36 | github.com/jackc/puddle/v2 v2.2.2 // indirect 37 | github.com/klauspost/compress v1.17.11 // indirect 38 | github.com/knadh/koanf/maps v0.1.1 // indirect 39 | github.com/mfridman/interpolate v0.0.2 // indirect 40 | github.com/mitchellh/copystructure v1.2.0 // indirect 41 | github.com/mitchellh/reflectwalk v1.0.2 // indirect 42 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 43 | github.com/pelletier/go-toml v1.9.5 // indirect 44 | github.com/prometheus/common v0.61.0 // indirect 45 | github.com/prometheus/procfs v0.15.1 // indirect 46 | github.com/sethvargo/go-retry v0.3.0 // indirect 47 | go.uber.org/multierr v1.11.0 // indirect 48 | golang.org/x/sync v0.10.0 // indirect 49 | golang.org/x/sys v0.28.0 // indirect 50 | golang.org/x/text v0.21.0 // indirect 51 | google.golang.org/protobuf v1.35.2 // indirect 52 | ) 53 | -------------------------------------------------------------------------------- /go.work: -------------------------------------------------------------------------------- 1 | go 1.23.0 2 | 3 | toolchain go1.23.4 4 | 5 | use . 6 | -------------------------------------------------------------------------------- /package/etc/kuvasz/kuvasz-streamer.toml: -------------------------------------------------------------------------------- 1 | [server] 2 | name = "kuvasz-streamer" 3 | address = ":8000" 4 | pprof = "" 5 | 6 | [logs] 7 | level="debug" 8 | format="text" 9 | source=false 10 | 11 | [database] 12 | url = "postgres://kuvasz:kuvasz@127.0.0.1/dest?application_name=kuvasz-streamer" 13 | 14 | [app] 15 | map_file = "/etc/kuvasz/map.yaml" 16 | 17 | -------------------------------------------------------------------------------- /package/etc/kuvasz/map.yaml: -------------------------------------------------------------------------------- 1 | - database: db1 2 | urls: 3 | - url: postgres://kuvasz:kuvasz@127.0.0.1:5432/db1 4 | sid: local 5 | tables: 6 | tbl1: 7 | -------------------------------------------------------------------------------- /package/etc/rsyslog.d/kuvasz-streamer.conf: -------------------------------------------------------------------------------- 1 | template(name="CleanFormat" type="list") { 2 | property(name="msg" droplastlf="on" ) 3 | constant(value="\n") 4 | } 5 | 6 | if $programname == 'kuvasz-streamer' then /var/log/kuvasz-streamer.log;CleanFormat 7 | & stop 8 | 9 | -------------------------------------------------------------------------------- /package/kuvasz-streamer.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kuvasz Streamer 3 | Documentation=https://streamer.kuvasz.io 4 | Wants=network-online.target 5 | After=network-online.target 6 | 7 | [Service] 8 | User=kuvasz 9 | Group=kuvasz 10 | Type=simple 11 | ExecStart=/usr/bin/kuvasz-streamer 12 | LimitNOFILE=10000 13 | Restart=always 14 | RestartSec=3 15 | StandardOutput=syslog 16 | StandardError=syslog 17 | SyslogIdentifier=kuvasz-streamer 18 | 19 | [Install] 20 | WantedBy=multi-user.target 21 | 22 | -------------------------------------------------------------------------------- /package/postinstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if ! getent group "kuvasz" > /dev/null 2>&1 ; then 3 | groupadd -r "kuvasz" 4 | fi 5 | if ! getent passwd "kuvasz" > /dev/null 2>&1 ; then 6 | useradd -r -g kuvasz -d /var/lib/kuvasz -s /sbin/nologin -c "kuvasz user" kuvasz 7 | fi 8 | touch /var/log/kuvasz-streamer.log 9 | chown syslog:adm /var/log/kuvasz-streamer.log 10 | if test -d /run/systemd/system; then 11 | systemctl restart rsyslog 12 | systemctl daemon-reload 13 | fi -------------------------------------------------------------------------------- /package/postremove.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if test -d /run/systemd/system; then 3 | systemctl daemon-reload 4 | fi 5 | -------------------------------------------------------------------------------- /streamer/configdb.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "net/url" 7 | "os" 8 | "strings" 9 | ) 10 | 11 | type SQLModifier struct { 12 | SortField string 13 | SortAsc bool 14 | } 15 | 16 | func ValuesToModifier(values url.Values, columns map[string]string) SQLModifier { 17 | m := SQLModifier{} 18 | sortArray, ok := values["sort"] 19 | if !ok { 20 | log.Debug("No sort key") 21 | return m 22 | } 23 | // use only first sort key 24 | s := strings.Trim(sortArray[0], "[]\"") 25 | a := strings.Split(s, "\",\"") 26 | if len(a) != 2 { 27 | return m 28 | } 29 | switch strings.ToLower(a[1]) { 30 | case "asc": 31 | m.SortAsc = true 32 | case "desc": 33 | m.SortAsc = false 34 | default: 35 | return m 36 | } 37 | translated, ok := columns[a[0]] 38 | if !ok { 39 | return m 40 | } 41 | m.SortField = translated 42 | return m 43 | } 44 | 45 | func BuildQuery(base string, m SQLModifier) string { 46 | var query, order string 47 | 48 | query = base 49 | 50 | if m.SortField == "" { 51 | return query 52 | } 53 | if m.SortAsc { 54 | order = "ASC" 55 | } else { 56 | order = "DESC" 57 | } 58 | query = fmt.Sprintf("%s ORDER BY %s %s", query, m.SortField, order) 59 | log.Debug("Built query", "query", query, "modifier", m) 60 | return query 61 | } 62 | 63 | func SetupConfigDB() { 64 | var err error 65 | ConfigDB, err = sql.Open("sqlite3", config.App.MapDatabase) 66 | if err != nil { 67 | log.Error("Can't open map database", "database", config.App.MapFile, "error", err) 68 | os.Exit(1) 69 | } 70 | } 71 | 72 | func CloseConfigDB() { 73 | if ConfigDB != nil { 74 | ConfigDB.Close() 75 | } 76 | ConfigDB = nil 77 | } 78 | -------------------------------------------------------------------------------- /streamer/full_sync.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "log/slog" 8 | "slices" 9 | "strings" 10 | "time" 11 | 12 | "github.com/jackc/pgx/v5/pgconn" 13 | "github.com/prometheus/client_golang/prometheus" 14 | ) 15 | 16 | type ( 17 | syncChannel struct { 18 | log *slog.Logger 19 | SyncDataChannel chan []byte 20 | CommandChannel chan string 21 | rowsTotal prometheus.Counter 22 | bytesTotal prometheus.Counter 23 | } 24 | ) 25 | 26 | var size int64 27 | 28 | func (s syncChannel) Read(p []byte) (int, error) { 29 | select { 30 | case command := <-s.CommandChannel: 31 | log.Debug("received command", "command", command) 32 | return 0, io.EOF 33 | case row := <-s.SyncDataChannel: 34 | n := copy(p, row) 35 | return n, nil 36 | } 37 | } 38 | 39 | func (s syncChannel) Write(p []byte) (int, error) { 40 | err := lim.Wait(context.Background()) 41 | if err != nil { 42 | return 0, fmt.Errorf("cannot wait for token, error=%w", err) 43 | } 44 | row := slices.Clone(p) 45 | size += int64(len(row)) 46 | s.rowsTotal.Inc() 47 | s.bytesTotal.Add(float64(len(row))) 48 | s.SyncDataChannel <- row 49 | return len(p), nil 50 | } 51 | 52 | func writeDestination(log *slog.Logger, tableName string, hasSID bool, columns string, s *syncChannel) { 53 | ctx, cancel := context.WithTimeout(context.Background(), time.Second*3600) 54 | defer cancel() 55 | conn, err := DestConnectionPool.Acquire(ctx) 56 | if err != nil { 57 | log.Error("cannot acquire connection to destination database", "error", err) 58 | return 59 | } 60 | var tag pgconn.CommandTag 61 | if hasSID { 62 | tag, err = conn.Conn().PgConn().CopyFrom(ctx, s, fmt.Sprintf("COPY %s(sid, %s) FROM STDIN;", tableName, columns)) 63 | } else { 64 | tag, err = conn.Conn().PgConn().CopyFrom(ctx, s, fmt.Sprintf("COPY %s(%s) FROM STDIN;", tableName, columns)) 65 | } 66 | if err != nil { 67 | log.Error("cannot COPY FROM", "table", tableName, "error", err) 68 | return 69 | } 70 | log.Debug("COPY FROM", "tag", tag) 71 | conn.Release() 72 | } 73 | 74 | func syncTable(log *slog.Logger, 75 | db string, 76 | sid string, 77 | sourceTableName string, 78 | destTableName string, 79 | sourceConnection *pgconn.PgConn) error { 80 | log = log.With("sourceTable", sourceTableName, "destTable", destTableName) 81 | ctx := context.Background() 82 | hasSID := false 83 | 84 | log.Debug("Starting full sync") 85 | // Prepare channels between reader and writer 86 | syncDataChannel := make(chan []byte) 87 | commandChannel := make(chan string) 88 | s := &syncChannel{ 89 | log: log, 90 | CommandChannel: commandChannel, 91 | SyncDataChannel: syncDataChannel, 92 | rowsTotal: syncRowsTotal.WithLabelValues(db, sid, sourceTableName), 93 | bytesTotal: syncBytesTotal.WithLabelValues(db, sid, sourceTableName), 94 | } 95 | 96 | // Prepare column list 97 | columns := "" 98 | for c := range DestTables[destTableName].Columns { 99 | if strings.HasPrefix(c, "kvsz_") { 100 | continue 101 | } 102 | if c == "sid" { 103 | hasSID = true 104 | continue 105 | } 106 | if columns == "" { 107 | columns = c 108 | } else { 109 | columns = fmt.Sprintf("%s, %s", columns, c) 110 | } 111 | } 112 | log.Debug("Target columns", "columns", columns) 113 | 114 | // Start writer 115 | go writeDestination(log, destTableName, hasSID, columns, s) 116 | 117 | // Start reader 118 | var copyStatement string 119 | if hasSID { 120 | copyStatement = fmt.Sprintf("COPY (SELECT '%s', %s FROM %s) TO STDOUT;", sid, columns, sourceTableName) 121 | } else { 122 | copyStatement = fmt.Sprintf("COPY (SELECT %s FROM %s) TO STDOUT;", columns, sourceTableName) 123 | } 124 | t0 := time.Now() 125 | size = 0 126 | tag, err := sourceConnection.CopyTo(ctx, s, copyStatement) 127 | if err != nil { 128 | log.Error("cannot read source table", "error", err) 129 | return fmt.Errorf("cannot perform full sync, error reading source=%s, dest=%s, error=%w", sourceTableName, destTableName, err) 130 | } 131 | log.Info("Finished full sync", 132 | "tag", tag, 133 | "duration", time.Since(t0), "size", 134 | size, "throughput", 135 | (float64(size) / (time.Since(t0).Seconds()) / 1024 / 1024)) 136 | 137 | // Stop writer 138 | commandChannel <- "stop" 139 | return nil 140 | } 141 | 142 | func syncAllTables( 143 | log *slog.Logger, 144 | db string, 145 | sid string, 146 | sourceTables SourceTables, 147 | sourceConnection *pgconn.PgConn) error { 148 | log.Info("Starting full sync for all tables", "sourceTables", sourceTables) 149 | for sourceTableName := range sourceTables { 150 | _, destTableName, err := sourceTables.GetTable(sourceTableName) 151 | if err != nil { 152 | return err 153 | } 154 | log.Info("Syncing", "sourceTable", sourceTableName, "destTable", destTableName) 155 | _ = syncTable(log, db, sid, sourceTableName, destTableName, sourceConnection) 156 | } 157 | return nil 158 | } 159 | 160 | func syncNewTables( 161 | log *slog.Logger, 162 | db string, 163 | sid string, 164 | sourceTables SourceTables, 165 | newTables []string, 166 | sourceConnection *pgconn.PgConn) error { 167 | log.Info("Starting full sync for new tables", "sourceTables", sourceTables) 168 | for i := range newTables { 169 | _, destTableName, err := sourceTables.GetTable(newTables[i]) 170 | if err != nil { 171 | return err 172 | } 173 | log.Info("Syncing", "sourceTable", newTables[i], "destTable", destTableName) 174 | _ = syncTable(log, db, sid, newTables[i], destTableName, sourceConnection) 175 | } 176 | return nil 177 | } 178 | -------------------------------------------------------------------------------- /streamer/handle_auth.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | "time" 10 | 11 | "github.com/golang-jwt/jwt" 12 | "golang.org/x/crypto/bcrypt" 13 | ) 14 | 15 | type loginRequest struct { 16 | Username string `json:"username"` 17 | Password string `json:"password"` 18 | } 19 | 20 | type loginResult struct { 21 | Token string `json:"token"` 22 | } 23 | 24 | func generateToken(username string) (string, error) { 25 | role := "admin" 26 | if config.App.MapDatabase == "" { 27 | role = "viewer" 28 | } 29 | token := jwt.NewWithClaims(jwt.SigningMethodHS256, 30 | jwt.MapClaims{ 31 | "sub": username, 32 | "nbf": time.Now().Add(-5 * time.Minute).Unix(), 33 | "exp": time.Now().Add(time.Duration(config.Auth.TTL) * time.Second).Unix(), 34 | "role": role, 35 | }) 36 | 37 | tokenString, err := token.SignedString([]byte(config.Auth.JWTKey)) 38 | if err != nil { 39 | return "", fmt.Errorf("cannot generate token: %w", err) 40 | } 41 | return tokenString, nil 42 | } 43 | 44 | func validateToken(tokenString string) (string, error) { 45 | token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { 46 | if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { 47 | return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) 48 | } 49 | return config.Auth.JWTKey, nil 50 | }) 51 | if err != nil { 52 | return "", fmt.Errorf("cannot parse token: %w", err) 53 | } 54 | 55 | if claims, ok := token.Claims.(jwt.MapClaims); ok { 56 | s, assertOK := claims["role"].(string) 57 | if assertOK { 58 | return s, nil 59 | } 60 | } 61 | return "", errors.New("cannot get claims map") 62 | } 63 | 64 | func loginHandler(w http.ResponseWriter, r *http.Request) { 65 | var item loginRequest 66 | req := PrepareReq(w, r) 67 | 68 | body, err := io.ReadAll(r.Body) 69 | if err != nil { 70 | log.Error("cannot read body", "error", err) 71 | req.ReturnError(w, http.StatusInternalServerError, "0000", "Cannot read request", err) 72 | return 73 | } 74 | err = json.Unmarshal(body, &item) 75 | if err != nil { 76 | log.Error("could not decode login", "error", err) 77 | req.ReturnError(w, http.StatusBadRequest, "invalid_request", "JSON parse error", err) 78 | return 79 | } 80 | if item.Username != "admin" { 81 | log.Error("invalid user", "username", item.Username) 82 | req.ReturnError(w, http.StatusBadRequest, "invalid_request", "Invalid user, only user admin is supported in this model", nil) 83 | return 84 | } 85 | err = bcrypt.CompareHashAndPassword([]byte(config.Auth.AdminPassword), []byte(item.Password)) 86 | if err != nil { 87 | log.Error("cannot compare bcrypt hash", "error", err) 88 | req.ReturnError(w, http.StatusForbidden, "invalid_password", "Invalid password", nil) 89 | return 90 | } 91 | tokenString, err := generateToken(item.Username) 92 | if err != nil { 93 | req.ReturnError(w, http.StatusInternalServerError, "cannot_generate_token", "Cannot generate JWT", err) 94 | return 95 | } 96 | req.ReturnOK(w, r, loginResult{Token: tokenString}, 1) 97 | } 98 | 99 | func refreshTokenHandler(w http.ResponseWriter, r *http.Request) { 100 | req := PrepareReq(w, r) 101 | tokenString, err := generateToken("admin") 102 | if err != nil { 103 | req.ReturnError(w, http.StatusInternalServerError, "cannot_generate_token", "Cannot generate JWT", err) 104 | return 105 | } 106 | req.ReturnOK(w, r, loginResult{Token: tokenString}, 1) 107 | } 108 | 109 | func logoutHandler(w http.ResponseWriter, r *http.Request) { 110 | req := PrepareReq(w, r) 111 | 112 | req.ReturnOK(w, r, nil, 0) 113 | } 114 | -------------------------------------------------------------------------------- /streamer/handle_db.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "database/sql" 5 | "encoding/json" 6 | "errors" 7 | "io" 8 | "net/http" 9 | ) 10 | 11 | type db struct { 12 | ID int64 `json:"id"` 13 | Name string `json:"name"` 14 | } 15 | 16 | var dbColumns = map[string]string{ 17 | "id": "db_id", 18 | "db_id": "db_id", 19 | "name": "name", 20 | } 21 | 22 | func dbGetOneHandler(w http.ResponseWriter, r *http.Request) { 23 | var item db 24 | req := PrepareReq(w, r) 25 | 26 | id, err := ExtractID(r) 27 | if err != nil { 28 | req.ReturnError(w, http.StatusBadRequest, "invalid_id", "Invalid ID", err) 29 | return 30 | } 31 | 32 | if config.App.MapDatabase == "" { 33 | for i := range dbmap { 34 | if dbmap[i].ID == id { 35 | item.ID = dbmap[i].ID 36 | item.Name = dbmap[i].Name 37 | req.ReturnOK(w, r, item, 1) 38 | return 39 | } 40 | } 41 | req.ReturnError(w, http.StatusNotFound, "not_found", "can't find database", err) 42 | return 43 | } 44 | err = ConfigDB.QueryRow(`SELECT db_id, name FROM db WHERE db_id = ?`, id).Scan(&item.ID, &item.Name) 45 | if errors.Is(err, sql.ErrNoRows) { 46 | req.ReturnError(w, http.StatusNotFound, "not_found", "can't find database", err) 47 | return 48 | } 49 | if err != nil { 50 | req.ReturnError(w, http.StatusInternalServerError, "SYSTEM", "can't read database schema list", err) 51 | return 52 | } 53 | req.ReturnOK(w, r, item, 1) 54 | } 55 | 56 | func dbGetManyHandler(w http.ResponseWriter, r *http.Request) { 57 | var dbs []db 58 | 59 | req := PrepareReq(w, r) 60 | 61 | // declarative mode 62 | if config.App.MapDatabase == "" { 63 | for i := range dbmap { 64 | item := db{ 65 | ID: dbmap[i].ID, 66 | Name: dbmap[i].Name, 67 | } 68 | dbs = append(dbs, item) 69 | } 70 | req.ReturnOK(w, r, dbs, len(dbs)) 71 | return 72 | } 73 | // database mode 74 | m := ValuesToModifier(r.URL.Query(), dbColumns) 75 | query := BuildQuery(`SELECT db_id, name FROM db`, m) 76 | log.Debug("running query", "query", query, "modifier", m, "values", r.URL.Query()) 77 | rows, err := ConfigDB.Query(query) 78 | if err != nil { 79 | req.ReturnError(w, http.StatusInternalServerError, "SYSTEM", "can't read database schema list", err) 80 | return 81 | } 82 | defer rows.Close() 83 | for rows.Next() { 84 | var item db 85 | err := rows.Scan(&item.ID, &item.Name) 86 | if err != nil { 87 | req.ReturnError(w, http.StatusInternalServerError, "SYSTEM", "can't scan item", err) 88 | return 89 | } 90 | dbs = append(dbs, item) 91 | } 92 | if err = rows.Err(); err != nil { 93 | req.ReturnError(w, http.StatusInternalServerError, "SYSTEM", "can't scan database item", err) 94 | } 95 | req.ReturnOK(w, r, dbs, len(dbs)) 96 | } 97 | 98 | func dbPostOneHandler(w http.ResponseWriter, r *http.Request) { 99 | var item db 100 | req := PrepareReq(w, r) 101 | 102 | body, err := io.ReadAll(r.Body) 103 | if err != nil { 104 | log.Error("cannot read body", "error", err) 105 | req.ReturnError(w, http.StatusInternalServerError, "0000", "Cannot read request", err) 106 | return 107 | } 108 | err = json.Unmarshal(body, &item) 109 | if err != nil { 110 | log.Error("could not decode db", "error", err) 111 | req.ReturnError(w, http.StatusBadRequest, "invalid_request", "JSON parse error", err) 112 | return 113 | } 114 | // err = app.Validate.Struct(item) 115 | if item.Name == "" { 116 | req.ReturnError(w, http.StatusBadRequest, "invalid_request", "Missing name or schema", nil) 117 | return 118 | } 119 | log.Debug("Creating db", "item", item) 120 | 121 | result, err := ConfigDB.Exec( 122 | `INSERT INTO db(name) VALUES (?)`, item.Name) 123 | if err != nil { 124 | req.ReturnError(w, http.StatusBadRequest, "0003", "Database error", err) 125 | return 126 | } 127 | item.ID, _ = result.LastInsertId() 128 | log.Debug("Created db", "item", item) 129 | req.ReturnOK(w, r, item, 1) 130 | } 131 | 132 | func dbDeleteOneHandler(w http.ResponseWriter, r *http.Request) { 133 | req := PrepareReq(w, r) 134 | 135 | id, err := ExtractID(r) 136 | if err != nil { 137 | log.Error("invalid id") 138 | req.ReturnError(w, http.StatusBadRequest, "invalid_id", "Invalid ID", err) 139 | return 140 | } 141 | 142 | result, err := ConfigDB.Exec(`DELETE FROM db WHERE db_id = ?`, id) 143 | if err != nil { 144 | log.Error("Cannot delete database schema", "id", id, "error", err) 145 | req.ReturnError(w, http.StatusInternalServerError, "SYSTEM", "can't delete database schema", err) 146 | return 147 | } 148 | ra, _ := result.RowsAffected() 149 | if ra == 0 { 150 | req.ReturnError(w, http.StatusNotFound, "NOT_FOUND", "database schema not found", nil) 151 | return 152 | } 153 | req.ReturnOK(w, r, nil, 0) 154 | } 155 | 156 | func dbPutOneHandler(w http.ResponseWriter, r *http.Request) { 157 | var item db 158 | req := PrepareReq(w, r) 159 | 160 | id, err := ExtractID(r) 161 | if err != nil { 162 | log.Error("invalid id") 163 | req.ReturnError(w, http.StatusBadRequest, "invalid_id", "Invalid ID", err) 164 | return 165 | } 166 | 167 | body, err := io.ReadAll(r.Body) 168 | if err != nil { 169 | log.Error("cannot read body", "error", err) 170 | req.ReturnError(w, http.StatusInternalServerError, "0000", "Cannot read request", err) 171 | return 172 | } 173 | err = json.Unmarshal(body, &item) 174 | if err != nil { 175 | log.Error("could not decode db", "error", err) 176 | req.ReturnError(w, http.StatusBadRequest, "0003", "JSON parse error", err) 177 | return 178 | } 179 | if item.Name == "" { 180 | req.ReturnError(w, http.StatusBadRequest, "invalid_request", "missing mandatory fields: name, schema", nil) 181 | return 182 | } 183 | log.Debug("Updating db", "item", item) 184 | // err = app.Validate.Struct(item) 185 | 186 | result, err := ConfigDB.Exec(`UPDATE db set name=? where db_id=?`, item.Name, id) 187 | if err != nil { 188 | req.ReturnError(w, http.StatusBadRequest, "0003", "Database error", err) 189 | return 190 | } 191 | ra, _ := result.RowsAffected() 192 | if ra == 0 { 193 | req.ReturnError(w, http.StatusNotFound, "NOT_FOUND", "database schema not found", nil) 194 | return 195 | } 196 | 197 | req.ReturnOK(w, r, item, 1) 198 | } 199 | -------------------------------------------------------------------------------- /streamer/handle_map.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | ) 8 | 9 | func mapGetOneHandler(w http.ResponseWriter, r *http.Request) { 10 | req := PrepareReq(w, r) 11 | 12 | // extract id 13 | id, err := ExtractID(r) 14 | if err != nil { 15 | req.ReturnError(w, http.StatusBadRequest, "invalid_id", "Invalid ID", err) 16 | return 17 | } 18 | 19 | // Find id 20 | if len(MappingTable) == 0 { 21 | err := RefreshMappingTable() 22 | if err != nil { 23 | req.ReturnError(w, http.StatusInternalServerError, "internal_error", "Error refreshing mapping table", err) 24 | return 25 | } 26 | } 27 | if id < 0 || id >= int64(len(MappingTable)) { 28 | req.ReturnError(w, http.StatusNotFound, "invalid_id", "Invalid ID", err) 29 | return 30 | } 31 | log := log.With("id", id) 32 | result := FindTableByID(id) 33 | if result.DBName == "" { 34 | req.ReturnError(w, http.StatusNotFound, "not_found", "Not found", nil) 35 | return 36 | } 37 | log.Debug("Got one map", "result", result) 38 | req.ReturnOK(w, r, result, 1) 39 | } 40 | 41 | func mapGetManyHandler(w http.ResponseWriter, r *http.Request) { 42 | req := PrepareReq(w, r) 43 | if len(MappingTable) == 0 { 44 | err := RefreshMappingTable() 45 | if err != nil { 46 | req.ReturnError(w, http.StatusInternalServerError, "internal_error", "Error refreshing mapping table", err) 47 | return 48 | } 49 | } 50 | req.ReturnOK(w, r, MappingTable, len(MappingTable)) 51 | } 52 | 53 | func mapCreateTableHandler(w http.ResponseWriter, r *http.Request) { 54 | req := PrepareReq(w, r) 55 | 56 | // extract id 57 | id, err := ExtractID(r) 58 | if err != nil || id < 0 || id >= int64(len(MappingTable)) { 59 | req.ReturnError(w, http.StatusNotFound, "invalid_id", "Invalid ID", err) 60 | return 61 | } 62 | t := MappingTable[id] 63 | if t.Present { 64 | req.ReturnError( 65 | w, 66 | http.StatusBadRequest, 67 | "conflict", 68 | "destination table already present", 69 | fmt.Errorf("destination table %s already present", t.Name)) 70 | return 71 | } 72 | q := "CREATE TABLE " + t.Name + "(sid text" 73 | for k, v := range t.SourceColumns { 74 | q += ", " + k + " " + v.ColumnType 75 | } 76 | q += ");" 77 | _, err = DestConnectionPool.Exec(context.Background(), q) 78 | if err != nil { 79 | req.ReturnError(w, http.StatusInternalServerError, "cannot create table", q, err) 80 | return 81 | } 82 | err = RefreshMappingTable() 83 | if err != nil { 84 | req.ReturnError(w, http.StatusInternalServerError, "internal_error", "Error refreshing mapping table", err) 85 | return 86 | } 87 | req.ReturnOK(w, r, t, 1) 88 | } 89 | 90 | func mapCloneTableHandler(w http.ResponseWriter, r *http.Request) { 91 | req := PrepareReq(w, r) 92 | 93 | // extract id 94 | id, err := ExtractID(r) 95 | if err != nil || id < 0 || id >= int64(len(MappingTable)) { 96 | req.ReturnError(w, http.StatusNotFound, "invalid_id", "Invalid ID", err) 97 | return 98 | } 99 | t := MappingTable[id] 100 | log := log.With("handler", "MapReplicateTableHandler", "id", id) 101 | 102 | // extract type 103 | tabletype := r.URL.Query().Get("type") 104 | if tabletype == "" { 105 | tabletype = TableTypeClone 106 | } 107 | if tabletype != TableTypeClone && tabletype != TableTypeAppend && tabletype != TableTypeHistory { 108 | req.ReturnError(w, http.StatusBadRequest, "invalid_type", "Invalid type", nil) 109 | return 110 | } 111 | 112 | // extract target name 113 | target := r.URL.Query().Get("target") 114 | if target == "" { 115 | target = t.Name 116 | } 117 | fullTargetName := joinSchema(config.Database.Schema, target) 118 | 119 | // extract regex 120 | regex := r.URL.Query().Get("partitions_regex") 121 | log.Debug("params", "id", id, "type", tabletype, "target", target, "regex", regex) 122 | 123 | // check the table is not partitioned 124 | if regex == "" && len(t.Partitions) > 0 { 125 | req.ReturnError(w, http.StatusBadRequest, "cannot_clone_partitioned_table", "Missing partitions regex", nil) 126 | return 127 | } 128 | 129 | log.Debug("Replicating table", "name", t.Name, "type", tabletype, "target", target, "regex", regex, "DestTables", DestTables) 130 | 131 | // Create table if not present 132 | if _, ok := DestTables[fullTargetName]; !ok { 133 | q := "CREATE TABLE " + target + "(" 134 | first := true 135 | for k, v := range t.SourceColumns { 136 | if first { 137 | first = false 138 | } else { 139 | q += ", " 140 | } 141 | q += k + " " + v.ColumnType 142 | } 143 | q += ");" 144 | log.Debug("Creating table", "name", t.Name, "columns", t.SourceColumns, "q", q) 145 | _, err = DestConnectionPool.Exec(context.Background(), q) 146 | if err != nil { 147 | req.ReturnError(w, http.StatusInternalServerError, "cannot create table", q, err) 148 | return 149 | } 150 | } 151 | 152 | // Now add it to config 153 | log.Debug("Adding entry to tbl", "db_id", t.DBId, "name", t.Name, "target", target, "regex", regex) 154 | _, err = ConfigDB.Exec( 155 | `INSERT INTO tbl(db_id, schema, name, type, target, partitions_regex) VALUES (?, ?, ?, ?, ?, ?)`, 156 | t.DBId, t.Schema, t.Name, tabletype, target, regex) 157 | if err != nil { 158 | req.ReturnError(w, http.StatusBadRequest, "0003", "cannot add entry to tbl", err) 159 | return 160 | } 161 | 162 | // Now refresh mapping table 163 | err = RefreshMappingTable() 164 | if err != nil { 165 | req.ReturnError(w, http.StatusInternalServerError, "internal_error", "Error refreshing mapping table", err) 166 | return 167 | } 168 | 169 | req.ReturnOK(w, r, t, 1) 170 | } 171 | 172 | func mapRefreshHandler(w http.ResponseWriter, r *http.Request) { 173 | req := PrepareReq(w, r) 174 | err := RefreshMappingTable() 175 | if err != nil { 176 | req.ReturnError(w, http.StatusInternalServerError, "internal_error", "Error refreshing mapping table", err) 177 | return 178 | } 179 | req.ReturnOK(w, r, nil, 0) 180 | } 181 | -------------------------------------------------------------------------------- /streamer/kuvasz-streamer.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "embed" 7 | "fmt" 8 | "net/http" 9 | _ "net/http/pprof" //nolint:gosec // suppress linter error 10 | "os" 11 | "sync" 12 | 13 | "github.com/jackc/pgx/v5/pgxpool" 14 | _ "github.com/mattn/go-sqlite3" 15 | "golang.org/x/time/rate" 16 | ) 17 | 18 | const ( 19 | // table types. 20 | TableTypeAppend = "append" 21 | TableTypeHistory = "history" 22 | TableTypeClone = "clone" 23 | StatusStarting = "starting" 24 | StatusActive = "active" 25 | StatusStopping = "stopping" 26 | ) 27 | 28 | var ( 29 | Package string 30 | Version string 31 | Build string 32 | ConfigDB *sql.DB 33 | DestConnectionPool *pgxpool.Pool 34 | DestTables PGTables 35 | dbmap DBMap 36 | URLError = make(map[string]string) 37 | RootChannel chan string 38 | wg sync.WaitGroup 39 | Status = StatusStarting 40 | lim *rate.Limiter 41 | 42 | //go:embed migrations/*.sql 43 | embedMigrations embed.FS 44 | 45 | //go:embed admin 46 | webDist embed.FS 47 | ) 48 | 49 | func SetStatus(s string) { 50 | log.Info("Setting status to", "status", s) 51 | Status = s 52 | } 53 | 54 | func main() { 55 | Configure( 56 | []string{"./conf/kuvasz-streamer.toml", "/etc/kuvasz/kuvasz-streamer.toml"}, 57 | "KUVASZ", 58 | ) 59 | SetupLogs(config.Logs) 60 | log.Debug("Starting...") 61 | 62 | // Start pprof if configured 63 | if config.Maintenance.Pprof != "" { 64 | go func() { 65 | //nolint:forbidigo,gosec // pprof requires this 66 | fmt.Println(http.ListenAndServe(config.Maintenance.Pprof, nil)) 67 | }() 68 | } 69 | 70 | // Start destination processing worker routines 71 | StartWorkers(config.App.NumWorkers) 72 | 73 | // Start API Server 74 | go APIServer(log) 75 | 76 | // Start rate limiter 77 | lim = rate.NewLimiter(config.App.SyncRate, config.App.SyncBurst) 78 | _ = lim.Wait(context.Background()) // REMOVE ME 79 | // Start main loop 80 | RootChannel = make(chan string) 81 | for { 82 | SetStatus(StatusStarting) 83 | err := SetupDestination() 84 | if err != nil { 85 | log.Error("Error setting up destination", "err", err) 86 | os.Exit(1) 87 | } 88 | ReadMap() 89 | dbmap.CompileRegexes() 90 | // Create root context allowing cancellation of all goroutines 91 | rootContext, rootCancel := context.WithCancel(context.Background()) 92 | 93 | // Loop through config and replicate databases 94 | log.Info("Start processing source databases") 95 | for _, database := range dbmap { 96 | for i, url := range database.Urls { 97 | log.Info("Starting replication thread", "db-sid", database.Name+"-"+url.SID, "url", url.URL) 98 | wg.Add(1) 99 | go DoReplicateDatabase(rootContext, database, &database.Urls[i]) 100 | } 101 | } 102 | SetStatus(StatusActive) 103 | <-RootChannel 104 | rootCancel() 105 | SetStatus(StatusStopping) 106 | // wait until all workers exit 107 | log.Debug("Waiting for workers to exit") 108 | wg.Wait() 109 | CloseDestination() 110 | CloseConfigDB() 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /streamer/log.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "log/slog" 7 | "os" 8 | "strings" 9 | 10 | "github.com/lmittmann/tint" 11 | "github.com/mattn/go-isatty" 12 | ) 13 | 14 | type ( 15 | LogsConfig struct { 16 | Level string `koanf:"level"` 17 | Source bool `koanf:"source"` 18 | Format string `koanf:"format"` 19 | } 20 | Logger struct { 21 | slogLogger *slog.Logger 22 | } 23 | ) 24 | 25 | var ( 26 | log *slog.Logger 27 | level *slog.LevelVar 28 | defaultLogsConfig = LogsConfig{ 29 | Level: "debug", 30 | Source: true, 31 | Format: "text", 32 | } 33 | ) 34 | 35 | func GetLogger(l *slog.Logger) *Logger { 36 | return &Logger{slogLogger: l} 37 | } 38 | 39 | func (l *Logger) Fatalf(format string, v ...any) { 40 | l.slogLogger.Debug("fatal", "error", fmt.Sprintf(format, v...)) 41 | os.Exit(1) 42 | } 43 | func (l *Logger) Printf(format string, v ...any) { 44 | l.slogLogger.Debug("", "msg", fmt.Sprintf(format, v...)) 45 | } 46 | 47 | func parseLevel(level string) (slog.Level, error) { 48 | l := strings.ToLower(level) 49 | switch { 50 | case l == "err" || l == "error": 51 | return slog.LevelError, nil 52 | case l == "warn" || l == "warning": 53 | return slog.LevelWarn, nil 54 | case l == "info": 55 | return slog.LevelInfo, nil 56 | case l == "debug": 57 | return slog.LevelDebug, nil 58 | } 59 | return slog.LevelDebug, errors.New("can't parse log level") 60 | } 61 | 62 | func SetupLogs(config LogsConfig) { 63 | var l slog.Level 64 | var h slog.Handler 65 | var err error 66 | 67 | if l, err = parseLevel(config.Level); err != nil { 68 | //nolint:forbidigo // Allow printing usage 69 | fmt.Printf("Can't read log level, defaulting to debug\n") 70 | } 71 | level = new(slog.LevelVar) 72 | // if config.Format == "text" { 73 | options := tint.Options{ 74 | Level: level, 75 | TimeFormat: "15:04:05.000", 76 | NoColor: !isatty.IsTerminal(os.Stdout.Fd()), 77 | } 78 | h = tint.NewHandler(os.Stdout, &options) 79 | // } 80 | log = slog.New(h) 81 | level.Set(l) 82 | } 83 | 84 | func UpdateLogs(config LogsConfig) { 85 | var l slog.Level 86 | var err error 87 | 88 | if l, err = parseLevel(config.Level); err != nil { 89 | log.Error("Can't parse log level, not changing it.", "newlevel", config.Level) 90 | return 91 | } 92 | level.Set(l) 93 | } 94 | -------------------------------------------------------------------------------- /streamer/metadata.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "log/slog" 8 | "strings" 9 | 10 | "github.com/jackc/pgx/v5" 11 | "github.com/jackc/pgx/v5/pgxpool" 12 | ) 13 | 14 | type ( 15 | PGColumn struct { 16 | Name string `json:"name"` 17 | ColumnType string `json:"column_type"` 18 | DataTypeOID uint32 `json:"data_type_oid"` 19 | PrimaryKey bool `json:"primary_key"` 20 | } 21 | PGTable struct { 22 | Columns map[string]PGColumn 23 | Partitions []string 24 | } 25 | PGTables map[string]PGTable 26 | 27 | PGRelation struct { 28 | Namespace string 29 | RelationName string 30 | Columns []PGColumn 31 | } 32 | PGRelations map[uint32]PGRelation 33 | ) 34 | 35 | func splitSchema(t string) (string, string) { 36 | before, after, found := strings.Cut(t, ".") 37 | if found { 38 | return before, after 39 | } 40 | return config.App.DefaultSchema, t 41 | } 42 | 43 | func joinSchema(schema, table string) string { 44 | if schema == "" { 45 | return table 46 | } 47 | return schema + "." + table 48 | } 49 | 50 | func getPrimaryKey(log *slog.Logger, database *pgx.Conn, tableName string) (map[string]bool, error) { 51 | result := make(map[string]bool) 52 | s, t := splitSchema(tableName) 53 | query := `SELECT a.attname 54 | FROM pg_index i 55 | JOIN pg_class c ON c.oid = i.indrelid 56 | JOIN pg_attribute a ON a.attrelid = c.oid AND a.attnum = any(i.indkey) 57 | JOIN pg_namespace n ON n.oid = c.relnamespace 58 | WHERE c.relname = $1 59 | AND n.nspname = $2 60 | AND i.indisprimary;` 61 | pkRows, err := database.Query(context.Background(), query, t, s) 62 | if err != nil { 63 | log.Error("cannot get primary keys", "error", err) 64 | return result, fmt.Errorf("cannot get primary keys, table=%s, error=%w", tableName, err) 65 | } 66 | defer pkRows.Close() 67 | 68 | for pkRows.Next() { 69 | var columnName string 70 | 71 | err = pkRows.Scan(&columnName) 72 | if err != nil { 73 | return result, fmt.Errorf("cannot map row constraints to values, table=%s, column=%s, error=%w", tableName, columnName, err) 74 | } 75 | result[columnName] = true 76 | } 77 | return result, nil 78 | } 79 | 80 | func GetTables(log *slog.Logger, database *pgx.Conn, schemaName string) (PGTables, error) { 81 | query := `WITH p as ( 82 | SELECT inhparent as table, array_agg (inhrelid::pg_catalog.regclass) as partitions 83 | FROM pg_catalog.pg_inherits 84 | GROUP BY 1) 85 | SELECT c.table_schema, c.table_name, c.column_name, c.udt_name, t.oid, p.partitions 86 | FROM information_schema.columns as c 87 | INNER JOIN pg_type as t ON c.udt_name=t.typname 88 | INNER JOIN pg_catalog.pg_class as pg ON pg.relname=c.table_name 89 | LEFT JOIN p on p.table=pg.oid 90 | WHERE c.table_catalog=current_database() 91 | and not pg.relispartition 92 | and pg.relkind in ('r', 'p') 93 | and c.table_schema like $1 94 | and c.table_schema not like 'pg_%' 95 | and c.table_schema <> 'information_schema';` 96 | 97 | pgTables := make(PGTables) 98 | if database == nil { 99 | return pgTables, errors.New("no connection to database") 100 | } 101 | log = log.With("schema", schemaName) 102 | log.Debug("Fetching tables and columns") 103 | rows, err := database.Query(context.Background(), query, schemaName) 104 | if err != nil { 105 | return pgTables, fmt.Errorf("cannot get column metadata from database, schema=%s, error=%w", schemaName, err) 106 | } 107 | defer rows.Close() 108 | 109 | for rows.Next() { 110 | var s, t string 111 | var pgColumn PGColumn 112 | var columnName string 113 | var partitions []string 114 | err = rows.Scan(&s, &t, &columnName, &pgColumn.ColumnType, &pgColumn.DataTypeOID, &partitions) 115 | if err != nil { 116 | return pgTables, fmt.Errorf("can't map row to values, schema=%s, error=%w", schemaName, err) 117 | } 118 | tableName := joinSchema(s, t) 119 | pgTable, ok := pgTables[tableName] 120 | if !ok { 121 | pgTable.Partitions = partitions 122 | pgTable.Columns = make(map[string]PGColumn) 123 | } 124 | pgTable.Columns[columnName] = pgColumn 125 | pgTables[tableName] = pgTable 126 | } 127 | if len(pgTables) == 0 { 128 | return pgTables, fmt.Errorf("empty destination metadata, check user rights and destination database schema, schema=%s", schemaName) 129 | } 130 | log.Debug("Got tables", "tables", pgTables) 131 | 132 | // Assign primary keys 133 | for tableName, pgTable := range pgTables { 134 | var pk map[string]bool 135 | pk, err = getPrimaryKey(log, database, tableName) 136 | if err != nil { 137 | return pgTables, err 138 | } 139 | for columnName, column := range pgTable.Columns { 140 | _, ok := pk[columnName] 141 | if ok { 142 | column.PrimaryKey = true 143 | pgTable.Columns[columnName] = column 144 | } 145 | } 146 | pgTables[tableName] = pgTable 147 | } 148 | log.Debug("Assigned PK", "tables", pgTables) 149 | return pgTables, nil 150 | } 151 | 152 | func SetupDestination() error { 153 | var err error 154 | 155 | // Connect to target database if not already connected 156 | DestConnectionPool, err = pgxpool.New(context.Background(), config.Database.URL) 157 | if err != nil { 158 | return fmt.Errorf("can't connect to target database, url=%s, error=%w", config.Database.URL, err) 159 | } 160 | log.Info("Connected to target database", "url", config.Database.URL) 161 | 162 | // Get destination metadata 163 | log.Info("Getting destination table metadata") 164 | conn, err := DestConnectionPool.Acquire(context.Background()) 165 | if err != nil { 166 | return fmt.Errorf("can't get destination table metadata: error=%w", err) 167 | } 168 | defer conn.Release() 169 | DestTables, err = GetTables(log, conn.Conn(), config.Database.Schema) 170 | if err != nil { 171 | return fmt.Errorf("can't get destination table metadata, error=%w", err) 172 | } 173 | return nil 174 | } 175 | 176 | func CloseDestination() { 177 | DestConnectionPool.Close() 178 | } 179 | -------------------------------------------------------------------------------- /streamer/metrics.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | "github.com/prometheus/client_golang/prometheus/promauto" 8 | dto "github.com/prometheus/client_model/go" 9 | ) 10 | 11 | var ( 12 | requestsTotal = promauto.NewCounterVec( 13 | prometheus.CounterOpts{ 14 | Name: "streamer_operations_total", 15 | Help: "Total number of INSERT/UPDATE/DELETE operations.", 16 | }, []string{"database", "sid", "table", "operation", "result"}, 17 | ) 18 | 19 | requestDuration = promauto.NewSummaryVec( 20 | prometheus.SummaryOpts{ 21 | Name: "streamer_operations_seconds", 22 | Help: "Duration of INSERT/UPDATE/DELETE operations.", 23 | Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, 24 | MaxAge: 1 * time.Minute, 25 | }, []string{"database", "sid", "table", "operation", "result"}, 26 | ) 27 | 28 | syncRowsTotal = promauto.NewCounterVec( 29 | prometheus.CounterOpts{ 30 | Name: "streamer_sync_rows_total", 31 | Help: "Total number of rows synced.", 32 | }, []string{"database", "sid", "table"}, 33 | ) 34 | syncBytesTotal = promauto.NewCounterVec( 35 | prometheus.CounterOpts{ 36 | Name: "streamer_sync_bytes_total", 37 | Help: "Total number of bytes synced.", 38 | }, []string{"database", "sid", "table"}, 39 | ) 40 | jobsTotal = promauto.NewCounterVec( 41 | prometheus.CounterOpts{ 42 | Name: "streamer_jobs_total", 43 | Help: "Total number of INSERT/UPDATE/DELETE operations.", 44 | }, []string{"channel"}, 45 | ) 46 | 47 | urlHeartbeat = promauto.NewGaugeVec( 48 | prometheus.GaugeOpts{ 49 | Name: "url_heartbeat", 50 | Help: "Timestamp of last known activity", 51 | }, []string{"database", "sid"}, 52 | ) 53 | ) 54 | 55 | func getMetricValue(col prometheus.Collector) float64 { 56 | c := make(chan prometheus.Metric, 1) // 1 for metric with no vector 57 | col.Collect(c) // collect current metric value into the channel 58 | m := dto.Metric{} 59 | _ = (<-c).Write(&m) // read metric value from the channel 60 | return m.GetGauge().GetValue() 61 | } 62 | 63 | func getStatus(database string, sid string) bool { 64 | g := urlHeartbeat.WithLabelValues(database, sid) 65 | if g == nil { 66 | return false 67 | } 68 | m := getMetricValue(g) 69 | return float64(time.Now().Unix())-m < 10 70 | } 71 | -------------------------------------------------------------------------------- /streamer/migrate.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "database/sql" 5 | "embed" 6 | 7 | "github.com/pressly/goose/v3" 8 | ) 9 | 10 | func Migrate(embeddedMigrations embed.FS, directory string, db *sql.DB) { 11 | goose.SetBaseFS(embeddedMigrations) 12 | goose.SetLogger(GetLogger(log)) 13 | 14 | if err := goose.SetDialect("sqlite3"); err != nil { 15 | panic(err) 16 | } 17 | 18 | if err := goose.Up(db, directory); err != nil { 19 | panic(err) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /streamer/migrations/0001_initial.sql: -------------------------------------------------------------------------------- 1 | -- +goose Up 2 | create table db( 3 | db_id integer primary key, 4 | name text not null unique 5 | ); 6 | 7 | create table url( 8 | url_id integer primary key, 9 | db_id integer not null references db(db_id), 10 | url text not null, 11 | sid text not null, 12 | unique (db_id, sid) 13 | ); 14 | 15 | create table tbl( 16 | tbl_id integer primary key, 17 | db_id integer not null references db(db_id), 18 | schema text not null default 'public', 19 | name text not null, 20 | type text not null, 21 | target text not null, 22 | partitions_regex text null, 23 | unique (db_id, schema, name) 24 | ); 25 | 26 | pragma foreign_keys=ON; 27 | 28 | -------------------------------------------------------------------------------- /streamer/process_history.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "time" 8 | ) 9 | 10 | func (op operation) insertHistory(tableName string, startTime time.Time, values map[string]any) error { 11 | var query string 12 | args := make([]arg, 0) 13 | log := log.With("op", "insertHistory", "table", tableName) 14 | 15 | // Build argument list 16 | if op.destTableHasSID { 17 | args = append(args, arg{"sid", op.sid}) 18 | } 19 | args = append(args, arg{"kvsz_start", startTime}) 20 | args = append(args, arg{"kvsz_end", "9999-01-01 00:00:00"}) 21 | args = append(args, arg{"kvsz_deleted", false}) 22 | args, err := op.buildSetList(tableName, args, values) 23 | if err != nil { 24 | return err 25 | } 26 | log.Debug("Dump params", "values", values) 27 | 28 | // Build query 29 | queryParameters := make([]any, 0) 30 | attributes := args[0].Attribute 31 | valuesIndices := "$1" 32 | queryParameters = append(queryParameters, args[0].Value) 33 | for i := 1; i < len(args); i++ { 34 | attributes = fmt.Sprintf("%s, %s", attributes, args[i].Attribute) 35 | valuesIndices = fmt.Sprintf("%s, $%d", valuesIndices, i+1) 36 | queryParameters = append(queryParameters, args[i].Value) 37 | } 38 | query = fmt.Sprintf("INSERT INTO %s (%s) VALUES(%s)", tableName, attributes, valuesIndices) 39 | 40 | // Run query 41 | log.Debug("insert", "query", query) 42 | _, err = DestConnectionPool.Exec(context.Background(), query, queryParameters...) 43 | if err != nil { 44 | log.Error("can't insert", "query", query, "error", err) 45 | requestsTotal.WithLabelValues(op.database, op.sid, op.sourceTable, "insert", "failure").Inc() 46 | return fmt.Errorf("insertHistory failed, error=%w", err) 47 | } 48 | requestsTotal.WithLabelValues(op.database, op.sid, op.sourceTable, "insert", "success").Inc() 49 | return nil 50 | } 51 | 52 | // Cases 53 | // 1. PK exists and is not updated => old = 0, oldValues=nil ==> where PK=PK and sid=SID. 54 | // 2. PK exists and is updated => old=K, oldValues=oldPK ==> where PK=oldPK and sid=SID. 55 | // 3. PK does not exist, replica full => old=O, oldValues=alloldValues ==> where allfields=alloldValues. 56 | func (op operation) updateHistory(tableName string, relation PGRelation, values map[string]any, old uint8, oldValues map[string]any) error { 57 | var i = 1 58 | log := op.log.With("op", "updateHistory", "table", tableName) 59 | 60 | t0 := time.Now() 61 | 62 | log.Debug("Dump params", "values", values, "oldvalues", oldValues, "old", old) 63 | 64 | // Update old record with kvsz_end=now 65 | query := fmt.Sprintf("UPDATE %s SET kvsz_end=$1", tableName) 66 | queryParameters := make([]any, 0) 67 | queryParameters = append(queryParameters, t0) 68 | i++ 69 | 70 | // Add WHERE clause 71 | if op.destTableHasSID { 72 | query = fmt.Sprintf("%s WHERE sid=$%d AND kvsz_end='9999-01-01'", query, i) 73 | queryParameters = append(queryParameters, op.sid) 74 | } else { 75 | query += "WHERE kvsz_end='9999-01-01'" 76 | } 77 | query, queryParameters = op.buildWhere(tableName, relation, nil, values, old, query, queryParameters) 78 | 79 | // Run query 80 | log.Debug("update", "query", query, "args", queryParameters) 81 | _, err := DestConnectionPool.Exec(context.Background(), query, queryParameters...) 82 | if err != nil { 83 | log.Error("can't update", "query", query, "error", err) 84 | requestsTotal.WithLabelValues(op.database, op.sid, op.sourceTable, "update", "failure").Inc() 85 | return fmt.Errorf("updateHistory failed: error=%w", err) 86 | } 87 | err = op.insertHistory(tableName, t0, values) 88 | return err 89 | } 90 | 91 | func (op operation) deleteHistory(tableName string, relation PGRelation, values map[string]any, old uint8) error { 92 | var query string 93 | log := log.With("op", "deleteHistory", "table", tableName) 94 | t0 := time.Now() 95 | 96 | // Build query 97 | queryParameters := make([]any, 0) 98 | if op.destTableHasSID { 99 | query = fmt.Sprintf("UPDATE %s set kvsz_deleted=true, kvsz_end=$2 WHERE sid=$1 AND kvsz_end='9999-01-01' ", tableName) 100 | queryParameters = append(queryParameters, op.sid) 101 | } else { 102 | query = fmt.Sprintf("UPDATE %s set kvsz_deleted=true, kvsz_end=$1 WHERE kvsz_end='9999-01-01' ", tableName) 103 | } 104 | queryParameters = append(queryParameters, t0) 105 | 106 | query, queryParameters = op.buildWhere(tableName, relation, nil, values, old, query, queryParameters) 107 | // Run query 108 | log.Debug("delete", 109 | "query", query, 110 | "queryParameters", queryParameters) 111 | rows, err := DestConnectionPool.Exec(context.Background(), query, queryParameters...) 112 | if err != nil { 113 | log.Error("can't update history table", 114 | "query", query, 115 | "queryParameters", queryParameters, 116 | "error", err) 117 | requestsTotal.WithLabelValues(op.database, op.sid, op.sourceTable, "delete", "failure").Inc() 118 | return fmt.Errorf("deleteHistory failed: error=%w", err) 119 | } 120 | if rows.RowsAffected() == 0 { 121 | log.Error("did not find row to delete, destination database was not in sync", 122 | "query", query, 123 | "queryParameters", queryParameters) 124 | requestsTotal.WithLabelValues(op.database, op.sid, op.sourceTable, "delete", "failure").Inc() 125 | return errors.New("deleteHistory failed: no affected rows") 126 | } 127 | requestsTotal.WithLabelValues(op.database, op.sid, op.sourceTable, "delete", "success").Inc() 128 | log.Debug("delete", "RowsAffected", rows.RowsAffected()) 129 | return nil 130 | } 131 | -------------------------------------------------------------------------------- /streamer/publication.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log/slog" 7 | "strings" 8 | 9 | mapset "github.com/deckarep/golang-set/v2" 10 | "github.com/jackc/pgx/v5" 11 | ) 12 | 13 | type Publications []string 14 | 15 | func findBaseTables(db string) []string { 16 | var p []string 17 | for i := range MappingTable { 18 | // Check table is being replicated and belongs to us 19 | if !MappingTable[i].Replicated || (MappingTable[i].DBName != db) { 20 | continue 21 | } 22 | // if this is a partitioned table, add all partitions 23 | if len(MappingTable[i].Partitions) > 0 { 24 | for j := range MappingTable[i].Partitions { 25 | p = append(p, joinSchema(MappingTable[i].Schema, MappingTable[i].Partitions[j])) 26 | } 27 | } else { 28 | p = append(p, joinSchema(MappingTable[i].Schema, MappingTable[i].Name)) 29 | } 30 | } 31 | return p 32 | } 33 | 34 | func makePublication(database SourceDatabase) string { 35 | log.Debug("Creating publication", "database", database, "MappingTable", MappingTable) 36 | if len(database.Tables) == 0 { 37 | return "" 38 | } 39 | p := findBaseTables(database.Name) 40 | if len(p) == 0 { 41 | return "" 42 | } 43 | return " for table " + strings.Join(p, ", ") 44 | } 45 | 46 | func SyncPublications(log *slog.Logger, conn *pgx.Conn, db SourceDatabase) ([]string, error) { 47 | var newTables []string 48 | ctx := context.Background() 49 | publishedTables := mapset.NewSet[string]() 50 | 51 | log.Debug("SyncPublications", "db", db) 52 | log.Debug("SyncPublications, step 1: Find published tables") 53 | // Fetch list of published tables 54 | rows, err := conn.Query( 55 | ctx, 56 | "SELECT schemaname,tablename FROM pg_publication_tables WHERE pubname = 'kuvasz_'||$1", 57 | db.Name) 58 | if err != nil { 59 | return newTables, fmt.Errorf("cannot query publication tables, error: %w", err) 60 | } 61 | defer rows.Close() 62 | var schema, table string 63 | for rows.Next() { 64 | err := rows.Scan(&schema, &table) 65 | if err != nil { 66 | return newTables, fmt.Errorf("cannot scan table name, error: %w", err) 67 | } 68 | fullName := joinSchema(schema, table) 69 | log.Debug("Found published table", "database", db.Name, "schema", schema, "table", table) 70 | publishedTables.Add(fullName) 71 | // remove from publication if not in MappingTable, checking for partitions 72 | } 73 | log.Debug("Published tables", "tables", publishedTables) 74 | log.Debug("Configured tables", "tables", db.Tables) 75 | log.Debug("SyncPublications, step 2: remove unconfigured tables") 76 | for _, n := range publishedTables.ToSlice() { 77 | if db.Tables.Find(n) == "" { 78 | log.Debug("Removing table from publication", "database", db.Name, "schema", schema, "table", table) 79 | _, err = conn.Exec(ctx, "ALTER PUBLICATION kuvasz_"+db.Name+" DROP TABLE "+n) 80 | if err != nil { 81 | return newTables, fmt.Errorf("cannot alter publication, error: %w", err) 82 | } 83 | } 84 | } 85 | 86 | // Now add tables missing from publication 87 | log.Debug("SyncPublications, step 3: add missing tables") 88 | p := findBaseTables(db.Name) 89 | log.Debug("Got base tables, scanning for missing ones", "basetables", p) 90 | for i := range p { 91 | if !publishedTables.Contains(p[i]) { 92 | log.Debug("Adding table to publication", "database", db.Name, "table", p[i]) 93 | _, err = conn.Exec(ctx, "ALTER PUBLICATION kuvasz_"+db.Name+" ADD TABLE "+p[i]) 94 | if err != nil { 95 | return newTables, fmt.Errorf("cannot alter publication, error: %w", err) 96 | } 97 | newTables = append(newTables, p[i]) 98 | } 99 | } 100 | return newTables, nil 101 | } 102 | -------------------------------------------------------------------------------- /streamer/worker.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "log/slog" 6 | "strconv" 7 | "sync" 8 | "time" 9 | 10 | "github.com/jackc/pglogrepl" 11 | "github.com/jackc/pgx/v5" 12 | "github.com/prometheus/client_golang/prometheus" 13 | ) 14 | 15 | type ( 16 | lsnStatus struct { 17 | ConfirmedLSN pglogrepl.LSN 18 | WrittenLSN pglogrepl.LSN 19 | CommittedLSN pglogrepl.LSN 20 | } 21 | sourceStatus struct { 22 | sync.Mutex 23 | m map[string]lsnStatus 24 | } 25 | 26 | Worker struct { 27 | log *slog.Logger 28 | workChannel chan operation 29 | jobsCounter prometheus.Counter 30 | tx pgx.Tx 31 | s *sourceStatus 32 | } 33 | ) 34 | 35 | var Workers []Worker 36 | 37 | func (s *sourceStatus) Write(dbsid string, lsn pglogrepl.LSN) { 38 | s.Lock() 39 | defer s.Unlock() 40 | if s.m == nil { 41 | s.m = make(map[string]lsnStatus) 42 | } 43 | status := s.m[dbsid] 44 | status.WrittenLSN = lsn 45 | s.m[dbsid] = status 46 | } 47 | 48 | func (s *sourceStatus) Commit() { 49 | s.Lock() 50 | defer s.Unlock() 51 | if s.m == nil { 52 | log.Error("commit with no write") 53 | return 54 | } 55 | for k, v := range s.m { 56 | v.CommittedLSN = v.WrittenLSN 57 | s.m[k] = v 58 | } 59 | } 60 | 61 | func (w Worker) work() { 62 | var op operation 63 | var err error 64 | log := w.log 65 | timer := time.NewTimer(time.Duration(config.App.CommitDelay) * time.Second) 66 | for { 67 | select { 68 | case <-timer.C: 69 | if w.tx == nil { 70 | timer.Reset(time.Duration(config.App.CommitDelay) * time.Second) 71 | continue 72 | } 73 | if err = w.tx.Commit(context.Background()); err != nil { 74 | log.Error("failed to commit transaction", "error", err) 75 | } 76 | w.s.Commit() 77 | log.Debug("committed transaction", "lsn", w.s) 78 | w.tx = nil 79 | timer.Reset(time.Duration(config.App.CommitDelay) * time.Second) 80 | case op = <-w.workChannel: 81 | w.jobsCounter.Inc() 82 | if w.tx == nil { 83 | w.tx, err = DestConnectionPool.Begin(context.Background()) 84 | if err != nil { 85 | log.Error("failed to begin transaction", "error", err) 86 | continue 87 | } 88 | } 89 | log.Debug("received operation", "op", op) 90 | switch op.opCode { 91 | case "ic": 92 | _ = op.insertClone(w.tx) 93 | case "uc": 94 | _ = op.updateClone(w.tx) 95 | case "dc": 96 | _ = op.deleteClone(w.tx) 97 | default: 98 | log.Error("unhandled opcode", "op", op.opCode) 99 | } 100 | w.s.Write(op.database+"-"+op.sid, op.lsn) 101 | log.Debug("Performed operation", "op", op, "lsn", w.s) 102 | } 103 | } 104 | } 105 | 106 | func SendWork(op operation) { 107 | Workers[op.id%int64(len(Workers))].workChannel <- op 108 | } 109 | 110 | func StartWorkers(numWorkers int) { 111 | Workers = make([]Worker, numWorkers) 112 | for i := 0; i < numWorkers; i++ { 113 | Workers[i].workChannel = make(chan operation) 114 | Workers[i].jobsCounter = jobsTotal.WithLabelValues(strconv.Itoa(i)) 115 | Workers[i].log = log.With("worker", i) 116 | Workers[i].s = &sourceStatus{m: make(map[string]lsnStatus)} 117 | Workers[i].tx = nil 118 | go Workers[i].work() 119 | } 120 | } 121 | 122 | func SetCommittedLSN(database, sid string, lsn pglogrepl.LSN) { 123 | dbsid := database + "-" + sid 124 | 125 | for i := range Workers { 126 | Workers[i].s.Lock() 127 | defer Workers[i].s.Unlock() 128 | 129 | status := Workers[i].s.m[dbsid] 130 | status.CommittedLSN = lsn 131 | Workers[i].s.m[dbsid] = status 132 | } 133 | } 134 | 135 | func GetCommittedLSN(database, sid string, sourceCommittedLSN pglogrepl.LSN) pglogrepl.LSN { 136 | dbsid := database + "-" + sid 137 | // log := log.With("db-sid", dbsid) 138 | lowestDirtyLSN := pglogrepl.LSN(0) 139 | highestCommittedLSN := pglogrepl.LSN(0) 140 | 141 | // step 1 find lowest dirty LSN 142 | for i := range Workers { 143 | if status, ok := Workers[i].s.m[dbsid]; ok { 144 | if status.WrittenLSN > status.CommittedLSN { // worker has written requests but not committed 145 | if status.WrittenLSN < lowestDirtyLSN || lowestDirtyLSN == 0 { 146 | lowestDirtyLSN = status.WrittenLSN 147 | } 148 | } 149 | } 150 | } 151 | // log.Debug("Found Lowest dirty LSN", "lowestDirtyLSN", lowestDirtyLSN) 152 | // step 2 find highest committed transaction in destination already committed in the source 153 | for i := range Workers { 154 | // log.Debug("Worker info", "i", i, "m", Workers[i].s.m[dbsid]) 155 | if status, ok := Workers[i].s.m[dbsid]; ok { 156 | if (status.CommittedLSN < lowestDirtyLSN || lowestDirtyLSN == 0) && 157 | status.CommittedLSN <= sourceCommittedLSN && 158 | status.CommittedLSN > highestCommittedLSN { 159 | highestCommittedLSN = status.CommittedLSN 160 | } 161 | } 162 | } 163 | // log.Debug("Found highest committed LSN", "highestCommittedLSN", highestCommittedLSN, "sourceCommittedLSN", sourceCommittedLSN) 164 | return highestCommittedLSN 165 | } 166 | -------------------------------------------------------------------------------- /test/conf/kuvasz-streamer-lite.toml: -------------------------------------------------------------------------------- 1 | [server] 2 | name = "kuvasz-streamer" 3 | address = ":8000" 4 | max_goroutines = 100 5 | read_timeout = 30 6 | read_header_timeout = 30 7 | write_timeout = 30 8 | idle_timeout = 30 9 | max_header_bytes = 1000 10 | 11 | [maintenance] 12 | pprof = "127.0.0.1:6060" 13 | start_delay = 4 14 | 15 | [logs] 16 | output="console" 17 | level="debug" 18 | format="text" 19 | source=false 20 | 21 | [database] 22 | url = "postgres://kuvasz:kuvasz@127.0.0.1/dest?application_name=kuvasz-streamer" 23 | schema = "public" 24 | 25 | [app] 26 | map_database = "kuvasz-streamer-lite.db" 27 | num_workers = 2 28 | commit_delay = 10.0 29 | -------------------------------------------------------------------------------- /test/conf/kuvasz-streamer-rate.toml: -------------------------------------------------------------------------------- 1 | [server] 2 | name = "kuvasz-streamer" 3 | address = ":8000" 4 | max_goroutines = 100 5 | read_timeout = 30 6 | read_header_timeout = 30 7 | write_timeout = 30 8 | idle_timeout = 30 9 | max_header_bytes = 1000 10 | 11 | [maintenance] 12 | pprof = "127.0.0.1:6060" 13 | start_delay = 5 14 | 15 | [logs] 16 | output="console" 17 | level="debug" 18 | format="text" 19 | source=false 20 | 21 | [database] 22 | url = "postgres://kuvasz:kuvasz@127.0.0.1:6012/dest?application_name=kuvasz-streamer" 23 | schema = "public" 24 | 25 | [app] 26 | map_file = "./conf/rate.yaml" 27 | num_workers = 2 28 | commit_delay = 1.0 29 | sync_rate = 100 30 | sync_burst = 1 -------------------------------------------------------------------------------- /test/conf/kuvasz-streamer-sqlite.toml: -------------------------------------------------------------------------------- 1 | [server] 2 | name = "kuvasz-streamer" 3 | address = ":8000" 4 | max_goroutines = 100 5 | read_timeout = 30 6 | read_header_timeout = 30 7 | write_timeout = 30 8 | idle_timeout = 30 9 | max_header_bytes = 1000 10 | 11 | [maintenance] 12 | pprof = "127.0.0.1:6060" 13 | start_delay = 4 14 | 15 | [logs] 16 | output="console" 17 | level="debug" 18 | format="text" 19 | source=false 20 | 21 | [database] 22 | url = "postgres://kuvasz:kuvasz@127.0.0.1:6012/dest?application_name=kuvasz-streamer" 23 | schema = "public" 24 | 25 | [app] 26 | map_database = "kuvasz-streamer.db" 27 | num_workers = 2 28 | commit_delay = 1.0 -------------------------------------------------------------------------------- /test/conf/kuvasz-streamer.toml: -------------------------------------------------------------------------------- 1 | [server] 2 | name = "kuvasz-streamer" 3 | address = ":8000" 4 | max_goroutines = 100 5 | read_timeout = 30 6 | read_header_timeout = 30 7 | write_timeout = 30 8 | idle_timeout = 30 9 | max_header_bytes = 1000 10 | 11 | [maintenance] 12 | pprof = "127.0.0.1:6060" 13 | start_delay = 5 14 | 15 | [logs] 16 | output="console" 17 | level="debug" 18 | format="text" 19 | source=false 20 | 21 | [database] 22 | url = "postgres://kuvasz:kuvasz@127.0.0.1:6012/dest?application_name=kuvasz-streamer" 23 | schema = "public" 24 | 25 | [app] 26 | map_file = "./conf/map.yaml" 27 | num_workers = 2 28 | commit_delay = 1.0 29 | 30 | [auth] 31 | admin_password = "$2b$05$KlJx0xWATjLt84bXrg6uZe/zU4TH3TvbPDLf6tOrzMUPEyN7AoEie" 32 | jwt_key = "Y3OYHx7Y1KsRJPzJKqHGWfEaHsPbmwwSpPrXcND95Pw=" 33 | ttl = 300 -------------------------------------------------------------------------------- /test/conf/map.yaml: -------------------------------------------------------------------------------- 1 | - database: db1 2 | urls: 3 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6012/db1 4 | sid: 12 5 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6013/db1 6 | sid: 13 7 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6014/db1 8 | sid: 14 9 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6015/db1 10 | sid: 15 11 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6016/db1 12 | sid: 16 13 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6017/db1 14 | sid: 17 15 | tables: 16 | t0: 17 | t1: 18 | t2: 19 | target: rt2 20 | t3: 21 | type: append 22 | t4: 23 | type: history 24 | t5: 25 | t6: 26 | t7: 27 | partitions_regex: "t7_.*" 28 | d0: 29 | 30 | - database: db2 31 | urls: 32 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6012/db2 33 | sid: 12 34 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6013/db2 35 | sid: 13 36 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6014/db2 37 | sid: 14 38 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6015/db2 39 | sid: 15 40 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6016/db2 41 | sid: 16 42 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6017/db2 43 | sid: 17 44 | tables: 45 | s1: 46 | -------------------------------------------------------------------------------- /test/conf/rate.yaml: -------------------------------------------------------------------------------- 1 | - database: rate 2 | urls: 3 | - url: postgres://kuvasz:kuvasz@127.0.0.1:6012/rate 4 | sid: 12 5 | tables: 6 | r0: 7 | -------------------------------------------------------------------------------- /test/database/dest.sql: -------------------------------------------------------------------------------- 1 | create database dest; 2 | \c dest 3 | CREATE TYPE complex AS ( 4 | r double precision, 5 | i double precision 6 | ); 7 | 8 | create table t0(sid text, id bigint, ts timestamptz, name text); 9 | create table t1(sid text, id int, name text, salary int, primary key (sid, id)); 10 | create table rt2(sid text, id int, name text, salary int, extra text); 11 | create table t3(sid text, id int, name text, salary int, primary key (sid, id)); 12 | create table t4(kvsz_id bigserial, sid text, id int, name text, salary int, 13 | kvsz_start timestamptz not null default '1900-01-01 00:00:00', 14 | kvsz_end timestamptz not null default '9999-01-01 00:00:00', 15 | kvsz_deleted boolean not null default false, 16 | primary key(sid, id, kvsz_id)); 17 | create table t5( 18 | sid text, 19 | f1 bigint, 20 | f2 bigserial, 21 | f3 bit, 22 | f4 bit varying, 23 | f5 boolean, 24 | f6 box, 25 | f7 bytea, 26 | f8 character, 27 | f9 character varying, 28 | f10 cidr, 29 | f11 circle, 30 | f12 date, 31 | f13 double precision, 32 | f14 inet, 33 | f15 integer, 34 | f16 interval, 35 | f17 json, 36 | f18 jsonb, 37 | f19 line, 38 | f20 lseg, 39 | f21 macaddr, 40 | f22 macaddr8, 41 | f23 money, 42 | f24 numeric, 43 | f25 path, 44 | f26 pg_lsn, 45 | f28 point, 46 | f29 polygon, 47 | f30 real, 48 | f31 smallint, 49 | f32 smallserial, 50 | f33 serial, 51 | f34 text, 52 | f35 time, 53 | f36 time with time zone, 54 | f37 timestamp, 55 | f38 timestamp with time zone, 56 | f39 tsquery, 57 | f40 tsvector, 58 | f42 uuid, 59 | f43 xml, 60 | f44 integer[], 61 | f45 complex 62 | ); 63 | create table t6(sid text, id int, name text, longvalue text); 64 | create table t7(sid text, id int, name text) partition by range(id); 65 | create table t7_0 partition of t7 for values from (0)to (19); 66 | create table t7_2 partition of t7 for values from (20) to (39); 67 | create table t8(sid text, id int, name text); 68 | create table pt8(sid text, id int, name text); 69 | 70 | -- Without sid 71 | create table d0(id bigint, ts timestamptz, name text); 72 | create table d1(id int primary key, name text, salary int); 73 | create table rd2(id int, name text, salary int, extra text); 74 | create table d3(id int primary key, name text, salary int); 75 | create table d4(kvsz_id bigserial, id int, name text, salary int, 76 | kvsz_start timestamptz not null default '1900-01-01 00:00:00', 77 | kvsz_end timestamptz not null default '9999-01-01 00:00:00', 78 | kvsz_deleted boolean not null default false, 79 | primary key(id, kvsz_id)); 80 | create table d6(id int, name text, longvalue text); 81 | create table d7(id int, name text) partition by range(id); 82 | create table d7_0 partition of d7 for values from (0)to (19); 83 | create table d7_2 partition of d7 for values from (20) to (39); 84 | create table d8(id int, name text); 85 | create table pd8(id int, name text); 86 | 87 | -- db2 88 | create table s1(sid text, id int, name text, salary int, garbage date); 89 | -------------------------------------------------------------------------------- /test/database/source.sql: -------------------------------------------------------------------------------- 1 | create database db1; 2 | \c db1 3 | CREATE TYPE complex AS ( 4 | r double precision, 5 | i double precision 6 | ); 7 | create table t0(id bigserial, ts timestamptz default now(), name text); 8 | create table t1(id serial primary key, name text, salary int, garbage date); 9 | create table t2(id int, name text, salary int, extra text); 10 | alter table t2 replica identity full; 11 | create table t3(id serial primary key, name text, salary int, garbage date); 12 | create table t4(id serial primary key, name text, salary int, garbage date); 13 | create table t5( 14 | f1 bigint, 15 | f2 bigserial, 16 | f3 bit, 17 | f4 bit varying, 18 | f5 boolean, 19 | f6 box, 20 | f7 bytea, 21 | f8 character, 22 | f9 character varying, 23 | f10 cidr, 24 | f11 circle, 25 | f12 date, 26 | f13 double precision, 27 | f14 inet, 28 | f15 integer, 29 | f16 interval, 30 | f17 json, 31 | f18 jsonb, 32 | f19 line, 33 | f20 lseg, 34 | f21 macaddr, 35 | f22 macaddr8, 36 | f23 money, 37 | f24 numeric, 38 | f25 path, 39 | f26 pg_lsn, 40 | f28 point, 41 | f29 polygon, 42 | f30 real, 43 | f31 smallint, 44 | f32 smallserial, 45 | f33 serial, 46 | f34 text, 47 | f35 time, 48 | f36 time with time zone, 49 | f37 timestamp, 50 | f38 timestamp with time zone, 51 | f39 tsquery, 52 | f40 tsvector, 53 | f42 uuid, 54 | f43 xml, 55 | f44 integer[], 56 | f45 complex 57 | ); 58 | alter table t5 replica identity full; 59 | create table t6(id int primary key, name text, longvalue text); 60 | create table t7(id int primary key, name text) partition by range(id); 61 | create table t7_0 partition of t7 for values from (0) to (9); 62 | create table t7_1 partition of t7 for values from (10) to (19); 63 | create table t7_2 partition of t7 for values from (20) to (29); 64 | create table t7_3 partition of t7 for values from (30) to (39); 65 | create schema private; 66 | create table private.t8(id serial primary key, name text); 67 | create table public.t8(id serial primary key, name text); 68 | 69 | -- no sid 70 | create table d0(id bigserial, ts timestamptz default now(), name text); 71 | create table d1(id serial primary key, name text, salary int, garbage date); 72 | create table d2(id int, name text, salary int, extra text); 73 | alter table d2 replica identity full; 74 | create table d3(id serial primary key, name text, salary int, garbage date); 75 | create table d4(id serial primary key, name text, salary int, garbage date); 76 | create table d6(id int primary key, name text, longvalue text); 77 | create table d7(id int primary key, name text) partition by range(id); 78 | create table d7_0 partition of d7 for values from (0) to (9); 79 | create table d7_1 partition of d7 for values from (10) to (19); 80 | create table d7_2 partition of d7 for values from (20) to (29); 81 | create table d7_3 partition of d7 for values from (30) to (39); 82 | create table private.d8(id serial primary key, name text); 83 | create table public.d8(id serial primary key, name text); 84 | 85 | 86 | create database db2; 87 | \c db2 88 | create table s1(id serial primary key, name text, salary int, garbage date); 89 | -------------------------------------------------------------------------------- /test/detailed_testsuite/50-datatypes.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Resource 00-common.robot 3 | Suite Setup Setup empty record 4 | Suite Teardown Disconnect From All Databases 5 | Test Template Update field should propagate 6 | 7 | *** Keywords *** 8 | Setup empty record 9 | Connect To All Databases 10 | FOR ${PG} IN @{PGVERSIONS} 11 | Switch database ${PG} 12 | Execute SQL string insert into t5(f1) values(null) 13 | END 14 | 15 | Update field should propagate 16 | [Arguments] ${FIELD} ${VALUE} 17 | Statement should propagate 18 | ... update t5 set ${FIELD} = ${VALUE} 19 | ... Select ${FIELD} from t5 20 | ... Select ${FIELD} from t5 where sid='{}' 21 | 22 | *** Test cases *** 23 | bigint f1 -9023372036854770000 24 | bigserial f2 1 25 | bit f3 '1' 26 | bit varying f4 '110101' 27 | boolean f5 true 28 | box f6 '(1,1),(4,4)' 29 | bytea f7 '\xdeadbeef' 30 | character f8 'A' 31 | varchar f9 'ABCD' 32 | cidr f10 '192.168.0.1' 33 | circle f11 '(2,2),4' 34 | date f12 '2023-01-01' 35 | double precision f13 123.123456789012345 36 | inet f14 '192.168.0.0/16' 37 | integer f15 1000000 38 | interval f16 'P1DT5M' 39 | json f17 '{"name":"value"}' 40 | jsonb f18 '{"name":"value"}' 41 | line f19 '{1,2,3}' 42 | lseg f20 '(1,1),(5,5)' 43 | macaddr f21 '08:00:2b:01:02:03' 44 | macaddr8 f22 '08:00:2b:01:02:03:04:05' 45 | money f23 123.12 46 | numeric f24 1234567890.12345678901234567890 47 | path f25 '[(1,1),(2,1),(4,4)]' 48 | pg_lsn f26 '16/B374D848' 49 | point f28 '(1,2)' 50 | polygon f29 '(1,1),(2,1),(4,4)' 51 | real f30 123.123456 52 | smallint f31 32000 53 | smallserial f32 1 54 | serial f33 1 55 | text f34 'abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij' 56 | time f35 '12:34:56.123' 57 | timetz f36 '12:34:56.123+02' 58 | timestamp f37 '2023-01-02 01:02:03.123' 59 | timestamptz f38 '2023-01-02 01:02:03.123+02' 60 | tsquery f39 'fat & rat' 61 | tsvector f40 'a fat cat sat on a mat and ate a fat rat' 62 | uuid f42 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11' 63 | xml f43 'bar' 64 | integer[] f44 '{1,2,3,4}' 65 | complex f45 '(1,2)' 66 | -------------------------------------------------------------------------------- /test/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | pg17: 3 | image: postgres:17 4 | command: -c wal_level=logical -c log_connections=on -c log_min_duration_statement=0 -c log_line_prefix='%m [%a] %q%u@%d ' 5 | restart: always 6 | environment: 7 | POSTGRES_PASSWORD: postgres 8 | ports: 9 | - 6017:5432 10 | pg16: 11 | image: postgres:16 12 | command: -c wal_level=logical -c log_connections=on -c log_min_duration_statement=0 -c log_line_prefix='%m [%a] %q%u@%d ' 13 | restart: always 14 | environment: 15 | POSTGRES_PASSWORD: postgres 16 | ports: 17 | - 6016:5432 18 | pg15: 19 | image: postgres:15 20 | command: -c wal_level=logical -c log_connections=on -c log_min_duration_statement=0 -c log_line_prefix='%m [%a] %q%u@%d ' 21 | restart: always 22 | environment: 23 | POSTGRES_PASSWORD: postgres 24 | ports: 25 | - 6015:5432 26 | pg14: 27 | image: postgres:14 28 | command: -c wal_level=logical -c log_connections=on -c log_min_duration_statement=0 -c log_line_prefix='%m [%a] %q%u@%d ' 29 | restart: always 30 | environment: 31 | POSTGRES_PASSWORD: postgres 32 | ports: 33 | - 6014:5432 34 | pg13: 35 | image: postgres:13 36 | command: -c wal_level=logical -c log_connections=on -c log_min_duration_statement=0 -c log_line_prefix='%m [%a] %q%u@%d ' 37 | restart: always 38 | environment: 39 | POSTGRES_PASSWORD: postgres 40 | ports: 41 | - 6013:5432 42 | pg12: 43 | image: postgres:12 44 | command: -c wal_level=logical -c log_connections=on -c log_min_duration_statement=0 -c log_line_prefix='%m [%a] %q%u@%d ' 45 | restart: always 46 | environment: 47 | POSTGRES_PASSWORD: postgres 48 | ports: 49 | - 6012:5432 50 | badpg: 51 | image: postgres:12 52 | command: -c log_connections=on -c log_min_duration_statement=0 -c log_line_prefix='%m [%a] %q%u@%d ' 53 | restart: always 54 | environment: 55 | POSTGRES_PASSWORD: postgres 56 | ports: 57 | - 6011:5432 58 | freshpg: 59 | image: postgres:12 60 | command: -c wal_level=logical -c log_connections=on -c log_min_duration_statement=0 -c log_line_prefix='%m [%a] %q%u@%d ' 61 | restart: always 62 | environment: 63 | POSTGRES_PASSWORD: postgres 64 | ports: 65 | - 6010:5432 66 | -------------------------------------------------------------------------------- /test/kuvasz-streamer-gold.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuvasz-io/kuvasz-streamer/106a4e05e7cc5c64228ce2ab5e6f5f087c1f8b56/test/kuvasz-streamer-gold.db -------------------------------------------------------------------------------- /test/kuvasz-streamer-lite.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuvasz-io/kuvasz-streamer/106a4e05e7cc5c64228ce2ab5e6f5f087c1f8b56/test/kuvasz-streamer-lite.db -------------------------------------------------------------------------------- /test/load/conf/kuvasz-streamer.toml: -------------------------------------------------------------------------------- 1 | [server] 2 | name = "kuvasz-streamer" 3 | address = ":8001" 4 | max_goroutines = 100 5 | read_timeout = 30 6 | read_header_timeout = 30 7 | write_timeout = 30 8 | idle_timeout = 30 9 | max_header_bytes = 1000 10 | 11 | [maintenance] 12 | pprof = "127.0.0.1:6060" 13 | start_delay = 0 14 | 15 | [logs] 16 | output="console" 17 | level="debug" 18 | format="text" 19 | source=false 20 | 21 | [database] 22 | url = "postgres://kuvasz:kuvasz@127.0.0.1:5433/pgbench?application_name=kuvasz-streamer&sslmode=disable" 23 | 24 | [app] 25 | map_file = "./conf/map.yaml" 26 | num_workers = 2 27 | commit_delay = 1.0 -------------------------------------------------------------------------------- /test/load/conf/map.yaml: -------------------------------------------------------------------------------- 1 | - database: pgbench 2 | urls: 3 | - url: postgres://kuvasz:kuvasz@127.0.0.1/pgbench?replication=database&application_name=kuvasz_pgbench&sslmode=disable 4 | sid: pgbench 5 | tables: 6 | pgbench_accounts: 7 | partitions_regex: pgbench_accounts 8 | pgbench_branches: 9 | pgbench_history: 10 | pgbench_tellers: 11 | -------------------------------------------------------------------------------- /test/load/init: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | pgbench -U pgbench -i -s 1000 --partitions=10 pgbench 3 | -------------------------------------------------------------------------------- /test/load/log/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuvasz-io/kuvasz-streamer/106a4e05e7cc5c64228ce2ab5e6f5f087c1f8b56/test/load/log/.gitkeep -------------------------------------------------------------------------------- /test/load/restart: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | killall kuvasz-streamer || true 4 | rm -f log/kuvasz-streamer.log 5 | # psql postgres://kuvasz:kuvasz@127.0.0.1:5432/postgres -c "select pg_drop_replication_slot('kuvasz_pgbench');" || true 6 | # psql postgres://kuvasz:kuvasz@127.0.0.1:5433/pgbench -f truncate.sql 7 | ../../kuvasz-streamer --conf conf/kuvasz-streamer.toml 8 | -------------------------------------------------------------------------------- /test/load/run: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | pgbench -U pgbench -c 10 -j 2 -P 5 -T 600 pgbench 3 | -------------------------------------------------------------------------------- /test/load/truncate.sql: -------------------------------------------------------------------------------- 1 | truncate pgbench_accounts; 2 | truncate pgbench_branches; 3 | truncate pgbench_history; 4 | truncate pgbench_tellers; 5 | -------------------------------------------------------------------------------- /test/log/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuvasz-io/kuvasz-streamer/106a4e05e7cc5c64228ce2ab5e6f5f087c1f8b56/test/log/.gitkeep -------------------------------------------------------------------------------- /test/map.sql: -------------------------------------------------------------------------------- 1 | insert into db(db_id, name) values(1, 'db1'); 2 | 3 | insert into url(url_id, db_id, url, sid) values(1, 1, 'postgres://kuvasz:kuvasz@127.0.0.1:6012/db1?replication=database&application_name=repl_db1', '12'); 4 | insert into url(url_id, db_id, url, sid) values(2, 1, 'postgres://kuvasz:kuvasz@127.0.0.1:6013/db1?replication=database&application_name=repl_db1', '13'); 5 | insert into url(url_id, db_id, url, sid) values(3, 1, 'postgres://kuvasz:kuvasz@127.0.0.1:6014/db1?replication=database&application_name=repl_db1', '14'); 6 | insert into url(url_id, db_id, url, sid) values(4, 1, 'postgres://kuvasz:kuvasz@127.0.0.1:6015/db1?replication=database&application_name=repl_db1', '15'); 7 | insert into url(url_id, db_id, url, sid) values(5, 1, 'postgres://kuvasz:kuvasz@127.0.0.1:6016/db1?replication=database&application_name=repl_db1', '16'); 8 | insert into url(url_id, db_id, url, sid) values(6, 1, 'postgres://kuvasz:kuvasz@127.0.0.1:6017/db1?replication=database&application_name=repl_db1', '17'); 9 | 10 | insert into tbl(tbl_id, db_id, name, type, target, partitions_regex) values(1, 1,'t0', 'clone', 't0', NULL); 11 | insert into tbl(tbl_id, db_id, name, type, target, partitions_regex) values(2, 1,'t1', 'clone', 't1', NULL); 12 | insert into tbl(tbl_id, db_id, name, type, target, partitions_regex) values(3, 1,'t2', 'clone', 'rt2', NULL); 13 | insert into tbl(tbl_id, db_id, name, type, target, partitions_regex) values(4, 1,'t3', 'append', 't3', NULL); 14 | insert into tbl(tbl_id, db_id, name, type, target, partitions_regex) values(5, 1,'t4', 'history','t4', NULL); 15 | insert into tbl(tbl_id, db_id, name, type, target, partitions_regex) values(6, 1,'t5', 'clone', 't5', NULL); 16 | insert into tbl(tbl_id, db_id, name, type, target, partitions_regex) values(7, 1,'t6', 'clone', 't6', NULL); 17 | insert into tbl(tbl_id, db_id, name, type, target, partitions_regex) values(8, 1,'t7', 'clone', 't7', 't7_.*'); 18 | insert into tbl(tbl_id, db_id, schema, name, type, target, partitions_regex) values( 9, 1,'public', 't8', 'clone', 't8', NULL); 19 | insert into tbl(tbl_id, db_id, schema, name, type, target, partitions_regex) values(10, 1,'private', 't8', 'clone', 'pt8', NULL); 20 | 21 | insert into db(db_id, name) values(2, 'db2'); 22 | 23 | insert into url(url_id, db_id, url, sid) values(7, 2, 'postgres://kuvasz:kuvasz@127.0.0.1:6012/db2?replication=database&application_name=repl_db2', '12'); 24 | insert into url(url_id, db_id, url, sid) values(8, 2, 'postgres://kuvasz:kuvasz@127.0.0.1:6013/db2?replication=database&application_name=repl_db2', '13'); 25 | insert into url(url_id, db_id, url, sid) values(9, 2, 'postgres://kuvasz:kuvasz@127.0.0.1:6014/db2?replication=database&application_name=repl_db2', '14'); 26 | insert into url(url_id, db_id, url, sid) values(10,2, 'postgres://kuvasz:kuvasz@127.0.0.1:6015/db2?replication=database&application_name=repl_db2', '15'); 27 | insert into url(url_id, db_id, url, sid) values(11,2, 'postgres://kuvasz:kuvasz@127.0.0.1:6016/db2?replication=database&application_name=repl_db2', '16'); 28 | insert into url(url_id, db_id, url, sid) values(12,2, 'postgres://kuvasz:kuvasz@127.0.0.1:6017/db2?replication=database&application_name=repl_db2', '17'); 29 | 30 | insert into tbl(tbl_id, db_id, name, type, target, partitions_regex) values(11, 2,'s1', 'clone', 's1', NULL); 31 | -------------------------------------------------------------------------------- /test/run: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | set -e 4 | 5 | PSQL="psql -v ON_ERROR_STOP=1" 6 | 7 | function gen() { 8 | for PORT in 6012 6013 6014 6015 6016 6017; do 9 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into t0(name) values('test');" > log/gen.log 2>&1 10 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into t0(name) values('test');" > log/gen.log 2>&1 11 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into t0(name) values('test');" > log/gen.log 2>&1 12 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into t0(name) values('test');" > log/gen.log 2>&1 13 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into t0(name) values('test');" > log/gen.log 2>&1 14 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into t0(name) values('test');" > log/gen.log 2>&1 15 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into t0(name) values('test');" > log/gen.log 2>&1 16 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into t0(name) values('test');" > log/gen.log 2>&1 17 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into t0(name) values('test');" > log/gen.log 2>&1 18 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into t0(name) values('test');" > log/gen.log 2>&1 19 | done 20 | PORT=6012 21 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into d0(name) values('test');" > log/gen.log 2>&1 22 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into d0(name) values('test');" > log/gen.log 2>&1 23 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into d0(name) values('test');" > log/gen.log 2>&1 24 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into d0(name) values('test');" > log/gen.log 2>&1 25 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into d0(name) values('test');" > log/gen.log 2>&1 26 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into d0(name) values('test');" > log/gen.log 2>&1 27 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into d0(name) values('test');" > log/gen.log 2>&1 28 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into d0(name) values('test');" > log/gen.log 2>&1 29 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into d0(name) values('test');" > log/gen.log 2>&1 30 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/db1 -c "insert into d0(name) values('test');" > log/gen.log 2>&1 31 | } 32 | 33 | # Clean any existing process 34 | killall kuvasz-streamer || true 35 | 36 | # Pull latest minor version 37 | export REGISTRY=docker.io 38 | for VER in 12 13 14 15 16 17; do 39 | docker pull ${REGISTRY}/postgres:${VER} 40 | done 41 | 42 | # Start postgres containers 43 | docker compose down 44 | docker compose up -d 45 | 46 | # Wait until all instances are ready 47 | for PORT in 6012 6013 6014 6015 6016 6017; do 48 | until pg_isready -h 127.0.0.1 -p ${PORT} -d postgres -U postgres; 49 | do sleep 1; 50 | done; 51 | done 52 | 53 | # Create user and databases 54 | for PORT in 6012 6013 6014 6015 6016 6017; do 55 | ${PSQL} postgres://postgres:postgres@127.0.0.1:${PORT}/postgres -c "create user kuvasz password 'kuvasz' createdb replication;" 56 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:${PORT}/postgres -f database/source.sql 57 | done 58 | ${PSQL} postgres://kuvasz:kuvasz@127.0.0.1:6012/postgres -f database/dest.sql 59 | 60 | # Start generating data 61 | gen 62 | 63 | cp kuvasz-streamer-gold.db kuvasz-streamer.db 64 | ../kuvasz-streamer --conf=./conf/kuvasz-streamer-sqlite.toml > log/kuvasz-streamer.log 2>&1 & 65 | sleep 5 66 | 67 | robot --exitonfailure -d log testsuite 68 | cp log/log.html log/report.html ../docs/ 69 | docker compose down 70 | killall kuvasz-streamer 71 | -------------------------------------------------------------------------------- /test/testsuite/10-sync.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Resource 00-common.robot 3 | Suite Setup Connect To All Databases 4 | Suite Teardown Disconnect From All Databases 5 | 6 | *** Test cases *** 7 | Initial sync should work 8 | Sleep ${SLEEP} 9 | Statement should propagate 10 | ... Select 1 11 | ... Select '{}', * from t0 order by id 12 | ... Select * from t0 where sid='{}' order by id 13 | 14 | Initial sync should work - no sid 15 | Single database statement should propagate 16 | ... Select 1 17 | ... Select * from d0 order by id 18 | ... Select * from d0 order by id 19 | -------------------------------------------------------------------------------- /test/testsuite/30-append.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Resource 00-common.robot 3 | Suite Setup Connect To All Databases 4 | Suite Teardown Disconnect From All Databases 5 | 6 | *** Test cases *** 7 | Insert in append table row 1 8 | Statement should propagate 9 | ... insert into t3(name) values('r1') 10 | ... Select id, name, salary from t3 order by id 11 | ... Select id, name, salary from t3 where sid='{}' order by id 12 | 13 | Insert in append table row 2 14 | Statement should propagate 15 | ... insert into t3(name) values('r2') 16 | ... Select id, name, salary from t3 order by id 17 | ... Select id, name, salary from t3 where sid='{}' order by id 18 | 19 | Update append table non key attribute 20 | Statement should propagate 21 | ... update t3 set name='x1' where id=1 22 | ... select id, name, salary from t3 order by id 23 | ... select id, name, salary from t3 where sid='{}' order by id 24 | 25 | Update append table key attribute 26 | Statement should propagate 27 | ... update t3 set id=5 where id=1 28 | ... select id, name, salary from t3 order by id 29 | ... select id, name, salary from t3 where sid='{}' order by id 30 | 31 | Delete from append table 32 | Statement should not propagate 33 | ... delete from t3 where id=5 34 | ... select id, name, salary from t3 where sid='{}' order by id 35 | 36 | Insert in append table row 1 - no sid 37 | Single Database Statement should propagate 38 | ... insert into d3(name) values('r1') 39 | ... Select id, name, salary from d3 order by id 40 | ... Select id, name, salary from d3 order by id 41 | 42 | Insert in append table row 2 - no sid 43 | Single Database Statement should propagate 44 | ... insert into d3(name) values('r2') 45 | ... Select id, name, salary from d3 order by id 46 | ... Select id, name, salary from d3 order by id 47 | 48 | Update append table non key attribute - no sid 49 | Single Database Statement should propagate 50 | ... update d3 set name='x1' where id=1 51 | ... select id, name, salary from d3 order by id 52 | ... select id, name, salary from d3 order by id 53 | 54 | Update append table key attribute - no sid 55 | Single Database Statement should propagate 56 | ... update d3 set id=5 where id=1 57 | ... select id, name, salary from d3 order by id 58 | ... select id, name, salary from d3 order by id 59 | 60 | Delete from append table - no sid 61 | Single Database Statement should not propagate 62 | ... delete from t3 where id=5 63 | ... select id, name, salary from t3 order by id 64 | -------------------------------------------------------------------------------- /test/testsuite/50-datatypes.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Resource 00-common.robot 3 | Suite Setup Setup empty record 4 | Suite Teardown Disconnect From All Databases 5 | 6 | *** Variables *** 7 | ${FIELDS} f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,f19,f20,f21,f22,f23,f24,f25,f26,f28,f29,f30,f31,f32,f33,f34,f35,f36,f37,f38,f39,f40,f42,f44,f45 8 | ${PAIRS} SEPARATOR=\n 9 | ... f1=-9023372036854770000, 10 | ... f2=1, 11 | ... f3='1', 12 | ... f4 ='110101', 13 | ... f5 =true, 14 | ... f6 ='(1,1),(4,4)', 15 | ... f7 ='\xdeadbeef', 16 | ... f8 ='A', 17 | ... f9 ='ABCD', 18 | ... f10= '192.168.0.1', 19 | ... f11= '(2,2),4', 20 | ... f12= '2023-01-01', 21 | ... f13= 123.123456789012345, 22 | ... f14= '192.168.0.0/16', 23 | ... f15= 1000000, 24 | ... f16= 'P1DT5M', 25 | ... f17= '{"name":"value"}', 26 | ... f18= '{"name":"value"}', 27 | ... f19= '{1,2,3}', 28 | ... f20= '(1,1),(5,5)', 29 | ... f21= '08:00:2b:01:02:03', 30 | ... f22= '08:00:2b:01:02:03:04:05', 31 | ... f23= 123.12, 32 | ... f24= 1234567890.12345678901234567890, 33 | ... f25= '[(1,1),(2,1),(4,4)]', 34 | ... f26= '16/B374D848', 35 | ... f28= '(1,2)', 36 | ... f29= '(1,1),(2,1),(4,4)', 37 | ... f30= 123.123456, 38 | ... f31= 32000, 39 | ... f32= 1, 40 | ... f33= 1, 41 | ... f34= 'abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij', 42 | ... f35= '12:34:56.123', 43 | ... f36= '12:34:56.123+02', 44 | ... f37= '2023-01-02 01:02:03.123', 45 | ... f38= '2023-01-02 01:02:03.123+02', 46 | ... f39= 'fat & rat', 47 | ... f40= 'a fat cat sat on a mat and ate a fat rat', 48 | ... f42= 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11', 49 | ... f44= '{1,2,3,4}', 50 | ... f45= '(1,2)' 51 | 52 | *** Keywords *** 53 | Setup empty record 54 | Connect To All Databases 55 | FOR ${PG} IN @{PGVERSIONS} 56 | Switch database ${PG} 57 | Execute SQL string insert into t5(f1) values(null) 58 | END 59 | 60 | *** Test cases *** 61 | Update all fields should propagate 62 | Statement should propagate 63 | ... update t5 set ${PAIRS} 64 | ... Select ${FIELDS} from t5 65 | ... Select ${FIELDS} from t5 where sid='{}' 66 | 67 | -------------------------------------------------------------------------------- /test/testsuite/60-Toast.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Resource 00-common.robot 3 | Suite Setup Connect To All Databases 4 | Suite Teardown Disconnect From All Databases 5 | 6 | *** Test cases *** 7 | 8 | Insert in normal row 9 | Statement should propagate 10 | ... insert into t6(id, name) values(1, 'toast') 11 | ... Select id, name, longvalue from t6 12 | ... Select id, name, longvalue from t6 where sid='{}' 13 | 14 | Update with large value 15 | ${TOASTVALUE}= Generate Random String 20000 16 | Statement should propagate 17 | ... update t6 set longvalue='${TOASTVALUE}' 18 | ... Select id, name, longvalue from t6 19 | ... Select id, name, longvalue from t6 where sid='{}' 20 | 21 | Update small value 22 | Statement should propagate 23 | ... update t6 set name='no-toast' 24 | ... Select id, name, longvalue from t6 25 | ... Select id, name, longvalue from t6 where sid='{}' 26 | 27 | Delete from TOAST table 28 | Statement should propagate 29 | ... delete from t6 30 | ... Select id, name, longvalue from t6 31 | ... Select id, name, longvalue from t6 where sid='{}' 32 | -------------------------------------------------------------------------------- /test/testsuite/70-partitions.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Resource 00-common.robot 3 | Suite Setup Connect To All Databases 4 | Suite Teardown Disconnect From All Databases 5 | 6 | *** Test cases *** 7 | 8 | Insert in partition 0 9 | Statement should propagate 10 | ... insert into t7(id, name) values(0, 'p0') 11 | ... Select id, name from t7 order by id 12 | ... Select id, name from t7 where sid='{}' order by id 13 | 14 | Insert in partition 1 15 | Statement should propagate 16 | ... insert into t7(id, name) values(10, 'p1') 17 | ... Select id, name from t7 order by id 18 | ... Select id, name from t7 where sid='{}' order by id 19 | 20 | Insert in partition 2 21 | Statement should propagate 22 | ... insert into t7(id, name) values(20, 'p2') 23 | ... Select id, name from t7 order by id 24 | ... Select id, name from t7 where sid='{}' order by id 25 | 26 | Insert in partition 3 27 | Statement should propagate 28 | ... insert into t7(id, name) values(30, 'p3') 29 | ... Select id, name from t7 order by id 30 | ... Select id, name from t7 where sid='{}' order by id 31 | 32 | Update partition 2 33 | Statement should propagate 34 | ... update t7 set name='p2x' where id=20 35 | ... Select id, name from t7 order by id 36 | ... Select id, name from t7 where sid='{}' order by id 37 | 38 | Delete from partition 3 39 | Statement should propagate 40 | ... delete from t7 where id=30 41 | ... Select id, name from t7 order by id 42 | ... Select id, name from t7 where sid='{}' order by id 43 | 44 | # No SID 45 | 46 | Insert in partition 0 - no sid 47 | Single Database Statement should propagate 48 | ... insert into d7(id, name) values(0, 'p0') 49 | ... Select id, name from d7 order by id 50 | ... Select id, name from d7 order by id 51 | 52 | Insert in partition 1 - no sid 53 | Single Database Statement should propagate 54 | ... insert into d7(id, name) values(10, 'p1') 55 | ... Select id, name from d7 order by id 56 | ... Select id, name from d7 order by id 57 | 58 | Insert in partition 2 - no sid 59 | Single Database Statement should propagate 60 | ... insert into d7(id, name) values(20, 'p2') 61 | ... Select id, name from d7 order by id 62 | ... Select id, name from d7 order by id 63 | 64 | Insert in partition 3 - no sid 65 | Single Database Statement should propagate 66 | ... insert into d7(id, name) values(30, 'p3') 67 | ... Select id, name from d7 order by id 68 | ... Select id, name from d7 order by id 69 | 70 | Update partition 2 - no sid 71 | Single Database Statement should propagate 72 | ... update d7 set name='p2x' where id=20 73 | ... Select id, name from d7 order by id 74 | ... Select id, name from d7 order by id 75 | 76 | Delete from partition 3 - no sid 77 | Single Database Statement should propagate 78 | ... delete from d7 where id=30 79 | ... Select id, name from d7 order by id 80 | ... Select id, name from d7 order by id -------------------------------------------------------------------------------- /test/testsuite/71-schema.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Resource 00-common.robot 3 | Suite Setup Connect To All Databases 4 | Suite Teardown Disconnect From All Databases 5 | 6 | *** Variables *** 7 | ${DESTQUERY} Select id, name, salary, kvsz_start, kvsz_end, kvsz_deleted from t4 where sid='{}' and id=1 order by id, kvsz_start 8 | 9 | *** Test cases *** 10 | Insert in public table 11 | Statement should propagate 12 | ... insert into t8(name) values('p1') 13 | ... Select id, name from t8 order by id 14 | ... Select id, name from t8 where sid='{}' order by id 15 | 16 | Insert in private table 17 | Statement should propagate 18 | ... insert into private.t8(name) values('x1') 19 | ... Select id, name from private.t8 order by id 20 | ... Select id, name from pt8 where sid='{}' order by id 21 | 22 | Update public table non key attribute 23 | Statement should propagate 24 | ... update t8 set name='x1' where id=1 25 | ... select id, name from t8 order by id 26 | ... select id, name from t8 where sid='{}' order by id 27 | 28 | Update private table non key attribute 29 | Statement should propagate 30 | ... update private.t8 set name='z1' where id=1 31 | ... select id, name from private.t8 order by id 32 | ... select id, name from pt8 where sid='{}' order by id 33 | 34 | Update public table key attribute 35 | Statement should propagate 36 | ... update t8 set id=5 where id=1 37 | ... select id, name from t8 order by id 38 | ... select id, name from t8 where sid='{}' 39 | 40 | Update private table key attribute 41 | Statement should propagate 42 | ... update private.t8 set id=10 where id=1 43 | ... select id, name from private.t8 order by id 44 | ... select id, name from pt8 where sid='{}' 45 | 46 | # no sid 47 | 48 | Insert in public table - no sid 49 | Single Database Statement should propagate 50 | ... insert into d8(name) values('p1') 51 | ... Select id, name from d8 order by id 52 | ... Select id, name from d8 order by id 53 | 54 | Insert in private table - no sid 55 | Single Database Statement should propagate 56 | ... insert into private.d8(name) values('x1') 57 | ... Select id, name from private.d8 order by id 58 | ... Select id, name from pd8 order by id 59 | 60 | Update public table non key attribute - no sid 61 | Single Database Statement should propagate 62 | ... update d8 set name='x1' where id=1 63 | ... select id, name from d8 order by id 64 | ... select id, name from d8 order by id 65 | 66 | Update private table non key attribute - no sid 67 | Single Database Statement should propagate 68 | ... update private.d8 set name='z1' where id=1 69 | ... select id, name from private.d8 order by id 70 | ... select id, name from pd8 order by id 71 | 72 | Update public table key attribute - no sid 73 | Single Database Statement should propagate 74 | ... update d8 set id=5 where id=1 75 | ... select id, name from d8 order by id 76 | ... select id, name from d8 order by id 77 | 78 | Update private table key attribute - no sid 79 | Single Database Statement should propagate 80 | ... update private.d8 set id=10 where id=1 81 | ... select id, name from private.d8 order by id 82 | ... select id, name from pd8 order by id 83 | -------------------------------------------------------------------------------- /test/testsuite/80-api-db.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Resource 00-common.robot 3 | 4 | *** Test cases *** 5 | 6 | GET existing db by id should succeed 7 | Clear Expectations 8 | Expect Response Body ${SCHEMA}/db.json 9 | Set Headers ${admin} 10 | GET /api/db/1 11 | Integer response status 200 12 | Integer response body id 1 13 | String response body name db1 14 | 15 | GET non existing db by id should fail 16 | Clear Expectations 17 | Set Headers ${admin} 18 | GET /api/db/99 19 | Integer response status 404 20 | 21 | GET db by invalid id should fail 22 | Clear Expectations 23 | Set Headers ${admin} 24 | Expect Response Body ${schema}/error.json 25 | GET /api/db/dskjhfkdsjfgh 26 | Integer response status 400 27 | 28 | GET all dbs should succeed 29 | Clear Expectations 30 | Set Headers ${admin} 31 | Expect Response Body ${schema}/dbs.json 32 | GET /api/db 33 | Integer response status 200 34 | Array response body minItems=2 maxItems=2 35 | 36 | Create db should succeed 37 | Clear Expectations 38 | Set Headers ${admin} 39 | Expect Response Body ${schema}/db.json 40 | POST /api/db {"name": "db3"} 41 | Integer response status 200 42 | String response body name db3 43 | 44 | Create db with missing parameters should fail 45 | Clear Expectations 46 | Set Headers ${admin} 47 | Expect Response Body ${schema}/error.json 48 | POST /api/db {"product_name": "vm-xl-2"} 49 | Integer response status 400 50 | 51 | Create db with invalid parameters should fail 52 | Clear Expectations 53 | Set Headers ${admin} 54 | Expect Response Body ${schema}/error.json 55 | POST /api/db {"name": 123} 56 | Integer response status 400 57 | 58 | Modify db should succeed 59 | Clear Expectations 60 | Set Headers ${admin} 61 | Expect Response Body ${schema}/db.json 62 | PUT /api/db/3 {"name": "newdb3"} 63 | Integer response status 200 64 | String response body name newdb3 65 | 66 | Modify with missing parameters should fail 67 | Clear Expectations 68 | Set Headers ${admin} 69 | Expect Response Body ${schema}/error.json 70 | PUT /api/db/3 {"product_name": "vm-xl-2"} 71 | Integer response status 400 72 | 73 | Modify db with invalid parameters should fail 74 | Clear Expectations 75 | Set Headers ${admin} 76 | Expect Response Body ${schema}/error.json 77 | PUT /api/db/3 {"name": 123} 78 | Integer response status 400 79 | 80 | Modify non existing db should fail 81 | Clear Expectations 82 | Set Headers ${admin} 83 | Expect Response Body ${schema}/error.json 84 | PUT /api/db/4 {"name": "newdb4"} 85 | Integer response status 404 86 | 87 | Modify db with invalid id should fail 88 | Clear Expectations 89 | Set Headers ${admin} 90 | Expect Response Body ${schema}/error.json 91 | PUT /api/db/sdlkfgjh {"name": "newdb4"} 92 | Integer response status 400 93 | 94 | Delete existing db should succeed 95 | Clear Expectations 96 | Set Headers ${admin} 97 | DELETE /api/db/3 98 | Integer response status 200 99 | 100 | Delete non-existing db should fail 101 | Clear Expectations 102 | Set Headers ${admin} 103 | DELETE /api/db/44 104 | Integer response status 404 105 | 106 | Delete invalid db_id should fail 107 | Clear Expectations 108 | Set Headers ${admin} 109 | DELETE /api/db/sdkjfgh 110 | Integer response status 400 111 | -------------------------------------------------------------------------------- /test/testsuite/81-api-url.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Resource 00-common.robot 3 | 4 | *** Test cases *** 5 | 6 | GET existing url by id should succeed 7 | Clear Expectations 8 | Expect Response Body ${SCHEMA}/url.json 9 | Set Headers ${admin} 10 | GET /api/url/1 11 | Integer response status 200 12 | Integer response body id 1 13 | Integer response body db_id 1 14 | String response body db_name db1 15 | 16 | GET non existing url by id should fail 17 | Clear Expectations 18 | Set Headers ${admin} 19 | GET /api/url/99 20 | Integer response status 404 21 | 22 | GET url by invalid id should fail 23 | Clear Expectations 24 | Set Headers ${admin} 25 | Expect Response Body ${schema}/error.json 26 | GET /api/url/dskjhfkdsjfgh 27 | Integer response status 400 28 | 29 | GET all urls should succeed 30 | Clear Expectations 31 | Set Headers ${admin} 32 | Expect Response Body ${schema}/urls.json 33 | GET /api/url 34 | Integer response status 200 35 | Array response body minItems=12 maxItems=12 36 | 37 | Create url should succeed 38 | Clear Expectations 39 | Set Headers ${admin} 40 | Expect Response Body ${schema}/url.json 41 | POST /api/url {"db_id": 3, "sid": "12", "url":"postgres://user:password@127.0.0.1/db3" } 42 | Integer response status 200 43 | String response body sid 12 44 | 45 | Create url with missing parameters should fail 46 | Clear Expectations 47 | Set Headers ${admin} 48 | Expect Response Body ${schema}/error.json 49 | POST /api/url {"db_id": 3, "product_name": "vm-xl-2"} 50 | Integer response status 400 51 | 52 | Create url with invalid parameters should fail 53 | Clear Expectations 54 | Set Headers ${admin} 55 | Expect Response Body ${schema}/error.json 56 | POST /api/url {"db_id": "toto", "sid": "12", "url":"postgres://user:password@127.0.0.1/db3" } 57 | Integer response status 400 58 | 59 | Modify url should succeed 60 | Clear Expectations 61 | Set Headers ${admin} 62 | Expect Response Body ${schema}/url.json 63 | PUT /api/url/13 {"db_id": 3, "sid": "13", "url":"postgres://user:password@127.0.0.1/db3" } 64 | Integer response status 200 65 | String response body sid 13 66 | 67 | Modify url with missing parameters should fail 68 | Clear Expectations 69 | Set Headers ${admin} 70 | Expect Response Body ${schema}/error.json 71 | PUT /api/url/13 {"product_name": "vm-xl-2"} 72 | Integer response status 400 73 | 74 | Modify url with invalid parameters should fail 75 | Clear Expectations 76 | Set Headers ${admin} 77 | Expect Response Body ${schema}/error.json 78 | PUT /api/url/13 {"sid": 123} 79 | Integer response status 400 80 | 81 | Modify non existing url should fail 82 | Clear Expectations 83 | Set Headers ${admin} 84 | Expect Response Body ${schema}/error.json 85 | PUT /api/url/99 {"db_id": 3, "sid": "13", "url":"postgres://user:password@127.0.0.1/db3" } 86 | Integer response status 404 87 | 88 | Modify url with invalid id should fail 89 | Clear Expectations 90 | Set Headers ${admin} 91 | Expect Response Body ${schema}/error.json 92 | PUT /api/url/sdlkfgjh {"sid": "12"} 93 | Integer response status 400 94 | 95 | Delete existing url should succeed 96 | Clear Expectations 97 | Set Headers ${admin} 98 | DELETE /api/url/13 99 | Integer response status 200 100 | 101 | Delete non-existing url should fail 102 | Clear Expectations 103 | Set Headers ${admin} 104 | DELETE /api/url/44 105 | Integer response status 404 106 | 107 | Delete invalid url_id should fail 108 | Clear Expectations 109 | Set Headers ${admin} 110 | DELETE /api/url/sdkjfgh 111 | Integer response status 400 112 | -------------------------------------------------------------------------------- /test/testsuite/82-api-tbl.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Resource 00-common.robot 3 | 4 | *** Variables *** 5 | ${TABLE_ID} 22 6 | 7 | *** Test cases *** 8 | 9 | GET existing tbl by id should succeed 10 | Clear Expectations 11 | Expect Response Body ${SCHEMA}/tbl.json 12 | Set Headers ${admin} 13 | GET /api/tbl/1 14 | Integer response status 200 15 | Integer response body id 1 16 | Integer response body db_id 1 17 | String response body db_name db1 18 | 19 | GET non existing tbl by id should fail 20 | Clear Expectations 21 | Set Headers ${admin} 22 | GET /api/tbl/99 23 | Integer response status 404 24 | 25 | GET tbl by invalid id should fail 26 | Clear Expectations 27 | Set Headers ${admin} 28 | Expect Response Body ${schema}/error.json 29 | GET /api/tbl/dskjhfkdsjfgh 30 | Integer response status 400 31 | 32 | GET all tbls should succeed 33 | Clear Expectations 34 | Set Headers ${admin} 35 | Expect Response Body ${schema}/tbls.json 36 | GET /api/tbl 37 | Integer response status 200 38 | Array response body minItems=20 maxItems=20 39 | 40 | Create tbl should succeed 41 | Clear Expectations 42 | Set Headers ${admin} 43 | Expect Response Body ${schema}/tbl.json 44 | POST /api/tbl {"db_id":3,"name":"foo","type":"clone","target":"blah"} 45 | Integer response status 200 46 | Integer response body db_id 3 47 | 48 | Create tbl with missing parameters should fail 49 | Clear Expectations 50 | Set Headers ${admin} 51 | Expect Response Body ${schema}/error.json 52 | POST /api/tbl {"db_id": 3, "product_name": "vm-xl-2"} 53 | Integer response status 400 54 | 55 | Create tbl with invalid parameters should fail 56 | Clear Expectations 57 | Set Headers ${admin} 58 | Expect Response Body ${schema}/error.json 59 | POST /api/tbl {"db_id": "toto", "sid": "12", "tbl":"postgres://user:password@127.0.0.1/db3" } 60 | Integer response status 400 61 | 62 | Modify tbl should succeed 63 | Clear Expectations 64 | Set Headers ${admin} 65 | Expect Response Body ${schema}/tbl.json 66 | PUT /api/tbl/${TABLE_ID} {"db_id":3,"schema":"public","name":"bar","type":"clone","target":"bar"} 67 | Integer response status 200 68 | String response body name bar 69 | 70 | Modify tbl with missing parameters should fail 71 | Clear Expectations 72 | Set Headers ${admin} 73 | Expect Response Body ${schema}/error.json 74 | PUT /api/tbl/${TABLE_ID} {"db_id":3,"name":"bar","type":"clone"} 75 | Integer response status 400 76 | 77 | Modify tbl with invalid parameters should fail 78 | Clear Expectations 79 | Set Headers ${admin} 80 | Expect Response Body ${schema}/error.json 81 | PUT /api/tbl/${TABLE_ID} {"db_id":3,"schema":"public","name":123,"type":"clone","target":"bar"} 82 | Integer response status 400 83 | 84 | Modify non existing tbl should fail 85 | Clear Expectations 86 | Set Headers ${admin} 87 | Expect Response Body ${schema}/error.json 88 | PUT /api/tbl/99 {"db_id":3,"schema":"public","name":"bar","type":"clone","target":"bar"} 89 | Integer response status 404 90 | 91 | Modify tbl with invalid id should fail 92 | Clear Expectations 93 | Set Headers ${admin} 94 | Expect Response Body ${schema}/error.json 95 | PUT /api/tbl/sdlkfgjh {"db_id":3,"name":"bar","type":"clone","target":"bar"} 96 | Integer response status 400 97 | 98 | Delete existing tbl should succeed 99 | Clear Expectations 100 | Set Headers ${admin} 101 | DELETE /api/tbl/${TABLE_ID} 102 | Integer response status 200 103 | 104 | Delete non-existing tbl should fail 105 | Clear Expectations 106 | Set Headers ${admin} 107 | DELETE /api/tbl/44 108 | Integer response status 404 109 | 110 | Delete invalid tbl_id should fail 111 | Clear Expectations 112 | Set Headers ${admin} 113 | DELETE /api/tbl/sdkjfgh 114 | Integer response status 400 115 | -------------------------------------------------------------------------------- /web/.eslintrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | "extends": [ 3 | "eslint:recommended", 4 | "plugin:react/recommended", 5 | "plugin:react/jsx-runtime", 6 | "plugin:react-hooks/recommended", 7 | "prettier" 8 | ], 9 | "parser": "@typescript-eslint/parser", 10 | "plugins": ["@typescript-eslint"], 11 | "env": { 12 | "browser": true, 13 | "es2021": true 14 | }, 15 | "settings": { 16 | "react": { 17 | "version": "detect" 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /web/.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | pnpm-debug.log* 8 | lerna-debug.log* 9 | 10 | node_modules 11 | dist 12 | dist-ssr 13 | *.local 14 | 15 | # Editor directories and files 16 | .vscode/* 17 | !.vscode/extensions.json 18 | .idea 19 | .DS_Store 20 | *.suo 21 | *.ntvs* 22 | *.njsproj 23 | *.sln 24 | *.sw? 25 | -------------------------------------------------------------------------------- /web/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 9 | 10 | 11 | 12 | admin 13 | 109 | 110 | 114 | 115 | 116 | 117 | 118 |
119 |
120 |
Loading...
121 |
122 |
123 | 124 | 125 | 126 | -------------------------------------------------------------------------------- /web/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "admin", 3 | "homepage": ".", 4 | "private": true, 5 | "scripts": { 6 | "dev": "vite", 7 | "build": "vite build", 8 | "serve": "vite preview", 9 | "type-check": "tsc --noEmit", 10 | "lint": "eslint --fix --ext .js,.jsx,.ts,.tsx ./src", 11 | "format": "prettier --write ./src" 12 | }, 13 | "dependencies": { 14 | "jwt-decode": "^4.0.0", 15 | "ra-data-simple-rest": "^5.3.3", 16 | "react": "^18.2.0", 17 | "react-admin": "^5.3.3", 18 | "react-dom": "^18.2.0", 19 | "react-query": "^3.39.3" 20 | }, 21 | "devDependencies": { 22 | "@types/node": "^22.9.0", 23 | "@types/react": "^18.0.22", 24 | "@types/react-dom": "^18.0.7", 25 | "@typescript-eslint/eslint-plugin": "^8.13.0", 26 | "@typescript-eslint/parser": "^8.13.0", 27 | "@vitejs/plugin-react": "^4.0.1", 28 | "eslint": "^9.14.0", 29 | "eslint-config-prettier": "^9.1.0", 30 | "eslint-plugin-react": "^7.32.2", 31 | "eslint-plugin-react-hooks": "^5.0.0", 32 | "prettier": "^3.3.3", 33 | "typescript": "^5.1.6", 34 | "vite": "^5.4.10" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /web/prettier.config.js: -------------------------------------------------------------------------------- 1 | module.exports = {} -------------------------------------------------------------------------------- /web/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kuvasz-io/kuvasz-streamer/106a4e05e7cc5c64228ce2ab5e6f5f087c1f8b56/web/public/favicon.ico -------------------------------------------------------------------------------- /web/public/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "admin", 3 | "name": "{{name}}", 4 | "icons": [ 5 | { 6 | "src": "favicon.ico", 7 | "sizes": "64x64 32x32 24x24 16x16", 8 | "type": "image/x-icon" 9 | } 10 | ], 11 | "start_url": "./index.html", 12 | "display": "standalone", 13 | "theme_color": "#000000", 14 | "background_color": "#ffffff" 15 | } 16 | -------------------------------------------------------------------------------- /web/src/app-bar.tsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react'; 2 | import { AppBar, Button, TitlePortal, useDataProvider, useRefresh } from 'react-admin'; 3 | import { Box, useMediaQuery, Theme } from '@mui/material'; 4 | 5 | import { useMutation } from 'react-query'; 6 | 7 | import Logo from "./logo"; 8 | 9 | const RestartAllButton = () => { 10 | const dataProvider = useDataProvider(); 11 | const refresh = useRefresh(); 12 | 13 | const { mutate, isLoading } = useMutation( 14 | () => dataProvider.restartAll().then(() => refresh())); 15 | return ; 49 | }; 50 | 51 | export const MapList = () => ( 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | ); 69 | 70 | export const MapEdit = () => ( 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | ); 85 | 86 | 87 | 88 | 89 | 90 | 91 | -------------------------------------------------------------------------------- /web/src/soft-theme.ts: -------------------------------------------------------------------------------- 1 | import { defaultTheme } from 'react-admin'; 2 | 3 | /** 4 | * Soft: A gentle theme for apps with rich content (images, charts, maps, etc). 5 | * 6 | * Uses white app bar, rounder corners, light colors. 7 | */ 8 | 9 | export const softDarkTheme = { 10 | palette: { 11 | primary: { 12 | main: '#90caf9', 13 | }, 14 | secondary: { 15 | main: '#FBBA72', 16 | }, 17 | mode: 'dark' as 'dark', // Switching the dark mode on is a single property value change. 18 | }, 19 | sidebar: { 20 | width: 200, 21 | }, 22 | components: { 23 | ...defaultTheme.components, 24 | RaMenuItemLink: { 25 | styleOverrides: { 26 | root: { 27 | borderLeft: '3px solid #000', 28 | '&.RaMenuItemLink-active': { 29 | borderLeft: '3px solid #90caf9', 30 | }, 31 | }, 32 | }, 33 | }, 34 | MuiAppBar: { 35 | styleOverrides: { 36 | colorSecondary: { 37 | color: '#ffffffb3', 38 | backgroundColor: '#616161', 39 | }, 40 | }, 41 | defaultProps: { 42 | elevation: 1, 43 | }, 44 | }, 45 | }, 46 | }; 47 | 48 | export const softLightTheme = { 49 | palette: { 50 | primary: { 51 | main: '#4f3cc9', 52 | }, 53 | secondary: { 54 | light: '#5f5fc4', 55 | main: '#283593', 56 | dark: '#001064', 57 | contrastText: '#fff', 58 | }, 59 | background: { 60 | default: '#fcfcfe', 61 | }, 62 | mode: 'light' as 'light', 63 | }, 64 | shape: { 65 | borderRadius: 10, 66 | }, 67 | sidebar: { 68 | width: 200, 69 | }, 70 | components: { 71 | ...defaultTheme.components, 72 | RaMenuItemLink: { 73 | styleOverrides: { 74 | root: { 75 | borderLeft: '3px solid #fff', 76 | '&.RaMenuItemLink-active': { 77 | borderLeft: '3px solid #4f3cc9', 78 | }, 79 | }, 80 | }, 81 | }, 82 | MuiPaper: { 83 | styleOverrides: { 84 | elevation1: { 85 | boxShadow: 'none', 86 | }, 87 | root: { 88 | border: '1px solid #e0e0e3', 89 | backgroundClip: 'padding-box', 90 | }, 91 | }, 92 | }, 93 | MuiAppBar: { 94 | styleOverrides: { 95 | colorSecondary: { 96 | color: '#808080', 97 | backgroundColor: '#fff', 98 | }, 99 | }, 100 | defaultProps: { 101 | elevation: 1, 102 | }, 103 | }, 104 | MuiLinearProgress: { 105 | styleOverrides: { 106 | colorPrimary: { 107 | backgroundColor: '#f5f5f5', 108 | }, 109 | barColorPrimary: { 110 | backgroundColor: '#d7d7d7', 111 | }, 112 | }, 113 | }, 114 | MuiTableRow: { 115 | styleOverrides: { 116 | root: { 117 | '&:last-child td': { border: 0 }, 118 | }, 119 | }, 120 | }, 121 | }, 122 | }; -------------------------------------------------------------------------------- /web/src/tbl.tsx: -------------------------------------------------------------------------------- 1 | import { 2 | List, 3 | Edit, 4 | Show, 5 | Create, 6 | Datagrid, 7 | TextField, 8 | ReferenceField, 9 | ReferenceInput, 10 | SimpleForm, 11 | TextInput, 12 | SelectInput, 13 | SelectField, 14 | SimpleShowLayout 15 | } from 'react-admin'; 16 | 17 | import { TableTypeInput } from './common'; 18 | 19 | export const TblList = () => ( 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | ); 32 | 33 | export const TblEdit = () => ( 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | ); 46 | 47 | export const TblShow = () => ( 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | ); 60 | 61 | export const TblCreate = () => ( 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | ); 75 | -------------------------------------------------------------------------------- /web/src/url.tsx: -------------------------------------------------------------------------------- 1 | import { 2 | Identifier, 3 | RaRecord, 4 | List, 5 | Edit, 6 | Show, 7 | Create, 8 | Datagrid, 9 | TextField, 10 | ReferenceField, 11 | BooleanField, 12 | ReferenceInput, 13 | SimpleForm, 14 | TextInput, 15 | SelectInput, 16 | SimpleShowLayout, 17 | EditButton, 18 | RecordContext 19 | } from 'react-admin'; 20 | 21 | import { useRecordContext } from 'react-admin'; 22 | import { Button, Link } from '@mui/material'; 23 | 24 | import WarningIcon from '@mui/icons-material/Warning'; 25 | 26 | export const UrlList = () => ( 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | ); 38 | 39 | export const UrlEdit = () => ( 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | ); 49 | 50 | export const UrlShow = () => ( 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | ); 60 | 61 | export const UrlCreate = () => ( 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | ); 72 | -------------------------------------------------------------------------------- /web/src/vite-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | -------------------------------------------------------------------------------- /web/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es5", 4 | "lib": [ 5 | "dom", 6 | "dom.iterable", 7 | "esnext" 8 | ], 9 | "allowJs": true, 10 | "skipLibCheck": true, 11 | "esModuleInterop": true, 12 | "allowSyntheticDefaultImports": true, 13 | "strict": true, 14 | "forceConsistentCasingInFileNames": true, 15 | "noFallthroughCasesInSwitch": true, 16 | "module": "esnext", 17 | "moduleResolution": "node", 18 | "resolveJsonModule": true, 19 | "isolatedModules": true, 20 | "noEmit": true, 21 | "jsx": "react-jsx" 22 | }, 23 | "include": [ 24 | "src" 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /web/vite.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from 'vite'; 2 | import react from '@vitejs/plugin-react'; 3 | 4 | // https://vitejs.dev/config/ 5 | export default defineConfig({ 6 | plugins: [react()], 7 | define: { 8 | 'process.env': process.env, 9 | }, 10 | server: { 11 | host: true, 12 | }, 13 | base: './', 14 | }); 15 | --------------------------------------------------------------------------------