├── .github └── workflows │ └── synchdb-ci.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── LICENSE ├── Makefile ├── README.md ├── SETUP-CN.md ├── ci ├── build-synchdb.sh ├── setup-remotedbs.sh ├── teardown-remotedbs.sh └── test-synchdb.sh ├── ezdeploy.sh ├── myrule.json ├── src ├── backend │ ├── converter │ │ ├── debezium_event_handler.c │ │ ├── format_converter.c │ │ └── olr_event_handler.c │ ├── debezium │ │ ├── pom.xml │ │ └── src │ │ │ ├── main │ │ │ └── java │ │ │ │ └── com │ │ │ │ └── example │ │ │ │ └── DebeziumRunner.java │ │ │ └── test │ │ │ └── java │ │ │ └── com │ │ │ └── example │ │ │ └── AppTest.java │ ├── executor │ │ └── replication_agent.c │ ├── olr │ │ ├── OraProtoBuf.pb-c.c │ │ ├── OraProtoBuf.proto │ │ ├── olr_client.c │ │ ├── oracle_parser16 │ │ │ ├── Makefile │ │ │ ├── check_keywords.pl │ │ │ ├── include │ │ │ │ ├── catalog │ │ │ │ │ ├── pg_attribute.h │ │ │ │ │ └── pg_attribute_d.h │ │ │ │ ├── nodes │ │ │ │ │ ├── nodetags_ext.h │ │ │ │ │ ├── parsenodes.h │ │ │ │ │ └── primnodes.h │ │ │ │ ├── oracle_parser │ │ │ │ │ ├── ora_keywords.h │ │ │ │ │ ├── ora_kwlist.h │ │ │ │ │ ├── ora_parser_hook.h │ │ │ │ │ └── ora_scanner.h │ │ │ │ ├── parser │ │ │ │ │ └── parser.h │ │ │ │ └── utils │ │ │ │ │ └── ora_compatible.h │ │ │ ├── liboracle_parser.c │ │ │ ├── meson.build │ │ │ ├── ora_gram.c │ │ │ ├── ora_gram.h │ │ │ ├── ora_gram.y │ │ │ ├── ora_gram.y.mod │ │ │ ├── ora_gramparse.h │ │ │ ├── ora_keywords.c │ │ │ ├── ora_kwlist_d.h │ │ │ ├── ora_scan.c │ │ │ ├── ora_scan.l │ │ │ └── ora_scan.l.mod │ │ ├── oracle_parser17 │ │ │ ├── .gitignore │ │ │ ├── Makefile │ │ │ ├── check_keywords.pl │ │ │ ├── include │ │ │ │ ├── catalog │ │ │ │ │ ├── pg_attribute.h │ │ │ │ │ └── pg_attribute_d.h │ │ │ │ ├── nodes │ │ │ │ │ ├── nodetags_ext.h │ │ │ │ │ ├── parsenodes.h │ │ │ │ │ └── primnodes.h │ │ │ │ ├── oracle_parser │ │ │ │ │ ├── ora_keywords.h │ │ │ │ │ ├── ora_kwlist.h │ │ │ │ │ ├── ora_parser_hook.h │ │ │ │ │ └── ora_scanner.h │ │ │ │ ├── parser │ │ │ │ │ └── parser.h │ │ │ │ └── utils │ │ │ │ │ └── ora_compatible.h │ │ │ ├── liboracle_parser.c │ │ │ ├── meson.build │ │ │ ├── ora_gram.y │ │ │ ├── ora_gramparse.h │ │ │ ├── ora_keywords.c │ │ │ ├── ora_kwlist_d.h │ │ │ └── ora_scan.l │ │ └── oracle_parser_patches │ │ │ ├── Makefile.patch │ │ │ ├── liboracle_parser.c.patch │ │ │ ├── ora_compatible.h.patch │ │ │ └── ora_gram.y.patch │ ├── synchdb │ │ └── synchdb.c │ └── utils │ │ └── netio_utils.c ├── include │ ├── converter │ │ ├── debezium_event_handler.h │ │ ├── format_converter.h │ │ └── olr_event_handler.h │ ├── executor │ │ └── replication_agent.h │ ├── olr │ │ ├── OraProtoBuf.pb-c.h │ │ └── olr_client.h │ ├── synchdb │ │ └── synchdb.h │ └── utils │ │ └── netio_utils.h ├── monitoring │ ├── docker-compose.yaml │ ├── grafana-provisioning │ │ ├── dashboards │ │ │ ├── dashboards.yaml │ │ │ ├── synchdb-jvm_grafana-dashboard.json │ │ │ ├── synchdb-mysql-grafana-dashboard.json │ │ │ ├── synchdb-oracle-grafana-dashboard.json │ │ │ └── synchdb-sqlserver-grafana-dashboard.json │ │ └── datasources │ │ │ └── datasource.yaml │ ├── jmx-conf │ │ ├── jmxacc.file │ │ ├── jmxexport.conf │ │ └── jmxpwd.file │ └── prometheus.yml └── test │ ├── pytests │ ├── hammerdb │ │ ├── __init__.py │ │ ├── conftest.py │ │ └── test_tpcc.py │ └── synchdbtests │ │ ├── __init__.py │ │ ├── common.py │ │ ├── conftest.py │ │ └── t │ │ ├── test_001_initialsnapshot.py │ │ ├── test_002_ddl.py │ │ ├── test_003_datatypes.py │ │ ├── test_004_dml.py │ │ └── test_005_utility.py │ ├── regress │ ├── expected │ │ └── synchdb.out │ ├── results │ │ ├── regression.diffs │ │ ├── results │ │ │ └── synchdb.out │ │ └── synchdb.out │ └── sql │ │ └── synchdb.sql │ └── scripts │ ├── create_mass_tables.sh │ └── db_config.conf ├── synchdb--1.0.sql ├── synchdb.control └── testenv ├── README.md ├── hammerdb ├── mysql_buildschema.tcl ├── mysql_runtpcc.tcl ├── oracle_buildschema.tcl ├── oracle_runtpcc.tcl ├── sqlserver_buildschema.tcl └── sqlserver_runtpcc.tcl ├── mysql ├── synchdb-mysql-test-internal.yaml └── synchdb-mysql-test.yaml ├── olr ├── 1.3.0 │ └── OpenLogReplicator.json ├── 1.7.0 │ └── OpenLogReplicator.json ├── 1.8.5 │ └── OpenLogReplicator.json └── synchdb-olr-test.yaml ├── ora19c ├── synchdb-ora19c-test-internal.yaml ├── synchdb-ora19c-test-olr-internal.yaml ├── synchdb-ora19c-test-olr.yaml └── synchdb-ora19c-test.yaml ├── oracle ├── synchdb-oracle-test-internal.yaml └── synchdb-oracle-test.yaml ├── sqlserver ├── inventory.sql ├── synchdb-combined-cert.pem ├── synchdb-private.key ├── synchdb-sqlserver-test-internal.yaml ├── synchdb-sqlserver-test.yaml └── synchdb-sqlserver-withssl-test.yaml └── synchdb ├── Dockerfile ├── init-synchdb.sh ├── jmx_prometheus_javaagent-1.3.0.jar ├── jmxexport.conf └── synchdb-test.yaml /.github/workflows/synchdb-ci.yml: -------------------------------------------------------------------------------- 1 | name: SynchDB CI 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: [ main, synchdb-devel] 6 | pull_request: 7 | branches: [ main, synchdb-devel] 8 | jobs: 9 | params: 10 | runs-on: ubuntu-22.04 11 | name: Initialize parameters 12 | outputs: 13 | pg16_version: '{ "major": "16", "full": "REL_16_4" }' 14 | pg17_version: '{ "major": "17", "full": "REL_17_4" }' 15 | steps: 16 | - name: set up parameters 17 | run: echo 'noop' 18 | build: 19 | needs: params 20 | name: Build for PG${{ fromJson(matrix.pg_version).major }} 21 | strategy: 22 | matrix: 23 | pg_version: 24 | - ${{ needs.params.outputs.pg16_version }} 25 | - ${{ needs.params.outputs.pg17_version }} 26 | runs-on: ubuntu-22.04 27 | steps: 28 | - uses: actions/checkout@v2 29 | - name: Set up JDK 22 30 | uses: actions/setup-java@v2 31 | with: 32 | java-version: '22' 33 | distribution: 'temurin' 34 | architecture: x64 35 | check-latest: true 36 | - name: Set up Maven 37 | uses: stCarolas/setup-maven@v4.5 38 | with: 39 | maven-version: 3.9.8 40 | - name: Install Protobuf 41 | run: | 42 | sudo apt-get update 43 | sudo apt-get install -y protobuf-compiler libprotobuf-dev libprotoc-dev 44 | - name: Expose $PG_MAJOR to Github Env 45 | run: | 46 | echo "PG_MAJOR=$(echo '${{ matrix.pg_version }}' | jq -r .major)" >> $GITHUB_ENV 47 | echo "PG_BRANCH=$(echo '${{ matrix.pg_version }}' | jq -r .full)" >> $GITHUB_ENV 48 | shell: bash 49 | - name: Build 50 | run: "./ci/build-synchdb.sh" 51 | shell: bash 52 | - uses: actions/upload-artifact@v4 53 | with: 54 | name: synchdb-install-${{ env.PG_MAJOR }} 55 | path: |- 56 | ./synchdb-install-${{ fromJson(matrix.pg_version).major }}.tar.gz 57 | test-synchdb: 58 | name: PG${{ fromJson(matrix.pg_version).major }}-${{ matrix.dbtypes }} Tests 59 | strategy: 60 | matrix: 61 | pg_version: 62 | - ${{ needs.params.outputs.pg16_version }} 63 | - ${{ needs.params.outputs.pg17_version }} 64 | dbtypes: 65 | - mysql 66 | - oracle 67 | - sqlserver 68 | - olr 69 | runs-on: ubuntu-22.04 70 | needs: 71 | - params 72 | - build 73 | steps: 74 | - uses: actions/checkout@v2 75 | - uses: actions/download-artifact@v4 76 | with: 77 | name: synchdb-install-${{ fromJson(matrix.pg_version).major }} 78 | - name: Set up JDK 22 79 | uses: actions/setup-java@v2 80 | with: 81 | java-version: '22' 82 | distribution: 'temurin' 83 | architecture: x64 84 | check-latest: true 85 | - name: Set up Python 86 | uses: actions/setup-python@v4 87 | with: 88 | python-version: "3.10" 89 | - name: Install pytest 90 | run: | 91 | pip install pytest psycopg2 92 | python --version 93 | pytest --version 94 | - name: Configure Linker 95 | run: | 96 | echo "configure Java" 97 | JAVA_PATH=$(which java) 98 | JDK_HOME_PATH=$(readlink -f ${JAVA_PATH} | sed 's:/bin/java::') 99 | JDK_LIB_PATH=${JDK_HOME_PATH}/lib 100 | echo $JDK_LIB_PATH | sudo tee -a /etc/ld.so.conf.d/x86_64-linux-gnu.conf 101 | echo $JDK_LIB_PATH/server | sudo tee -a /etc/ld.so.conf.d/x86_64-linux-gnu.conf 102 | sudo ldconfig 103 | - name: Install Base PG and SynchDB 104 | run: | 105 | sudo tar xzvf synchdb-install-${{ fromJson(matrix.pg_version).major }}.tar.gz -C / 106 | sudo apt-get install -y docker-compose 107 | docker-compose --version 108 | - name: Install Protobuf 109 | run: | 110 | sudo apt-get update 111 | sudo apt-get install -y protobuf-compiler libprotobuf-dev libprotoc-dev 112 | - name: Expose $DBTYPE to Github Env 113 | run: echo "DBTYPE=${{ matrix.dbtypes }}" >> $GITHUB_ENV 114 | shell: bash 115 | - name: synchdb test 116 | run: | 117 | export PATH=$PATH:/usr/lib/postgresql/${{ fromJson(matrix.pg_version).major }}/bin 118 | mkdir -p /var/run/postgresql 119 | sudo chown -R $(whoami) /var/run/postgresql 120 | make USE_PGXS=1 ${{ matrix.dbtypes }}check 121 | - uses: actions/upload-artifact@v4 122 | if: always() 123 | with: 124 | name: synchdb-test-outputs-${{ fromJson(matrix.pg_version).major }}-${{ matrix.dbtypes }} 125 | path: |- 126 | ./synchdb_testdir 127 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | dbz-engine/target/ 2 | *.o 3 | *.so 4 | testenv/olr/oradata/ 5 | testenv/olr/checkpoint 6 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # SynchDB Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as contributors, maintainers, and leaders of the SynchDB project pledge to make participation in our community a harassment-free and inclusive experience for everyone. We commit to fostering an open and welcoming environment for all participants, regardless of age, body size, visible or invisible disability, ethnicity, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. 8 | 9 | ## Our Standards 10 | 11 | Examples of behavior that contributes to creating a positive environment include: 12 | 13 | - Using welcoming and inclusive language 14 | - Respecting differing viewpoints and experiences 15 | - Gracefully accepting constructive criticism 16 | - Focusing on what is best for the community 17 | - Showing empathy towards other community members 18 | - Being supportive of others' growth and learning 19 | - Taking responsibility and apologizing to those affected by our mistakes 20 | 21 | Examples of unacceptable behavior include: 22 | 23 | - The use of sexualized language or imagery and unwelcome sexual attention or advances 24 | - Trolling, insulting/derogatory comments, and personal or political attacks 25 | - Public or private harassment 26 | - Publishing others' private information, such as a physical or electronic address, without explicit permission 27 | - Other conduct which could reasonably be considered inappropriate in a professional setting 28 | - Inappropriate use of community platforms or tools 29 | - Advocating for or encouraging any of the above behaviors 30 | 31 | ## Our Responsibilities 32 | 33 | Project maintainers are responsible for clarifying and enforcing the standards of acceptable behavior. They have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned with this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. 34 | 35 | Project maintainers also have the responsibility to: 36 | - Provide guidance and mentorship to new contributors 37 | - Ensure that the project remains accessible to contributors of all skill levels 38 | - Address conflicts in a fair and transparent manner 39 | - Lead by example in upholding these standards in all community venues 40 | 41 | ## Scope 42 | 43 | This Code of Conduct applies within all community spaces, including the project repository, discussions, issue trackers, community calls, and social media channels. It also applies when an individual is officially representing the project in public spaces. Examples of representing our project include using an official project email address, posting via an official social media account, or acting as an appointed representative at an event. 44 | 45 | ## Enforcement 46 | 47 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the project team at [INSERT EMAIL ADDRESS]. All complaints will be reviewed and investigated promptly and fairly. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. 48 | 49 | Enforcement responsibilities are delegated to project leaders who will determine the consequences for any action they deem in violation of this Code of Conduct. If a community member engages in unacceptable behavior, the project leaders may take any action they deem appropriate, including a temporary ban or permanent expulsion from the community without warning. 50 | 51 | ## Reporting Process 52 | 53 | 1. Contact the project team through the designated email address 54 | 2. Include a description of the incident, relevant links or screenshots, and any additional context that may be helpful 55 | 3. The report will be acknowledged within 48 hours 56 | 4. The team will investigate the incident and determine appropriate actions 57 | 5. The reporter will receive a follow-up communication regarding the outcome 58 | 59 | All community leaders are obligated to respect the privacy and security concerns of the reporter of any incident. 60 | 61 | ## Enforcement Guidelines 62 | 63 | Project maintainers will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: 64 | 65 | ### 1. Correction 66 | 67 | **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. 68 | 69 | **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. 70 | 71 | ### 2. Warning 72 | 73 | **Community Impact**: A violation through a single incident or series of actions. 74 | 75 | **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. 76 | 77 | ### 3. Temporary Ban 78 | 79 | **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. 80 | 81 | **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. 82 | 83 | ### 4. Permanent Ban 84 | 85 | **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. 86 | 87 | **Consequence**: A permanent ban from any sort of public interaction within the community. 88 | 89 | ## Attribution 90 | 91 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 2.0, available at [https://www.contributor-covenant.org/version/2/0/code_of_conduct.html](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html). 92 | 93 | Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). 94 | 95 | ## Questions 96 | 97 | If you have questions about this Code of Conduct, please contact the SynchDB team at coc@synchdb.com. 98 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # contrib/synchdb/Makefile 2 | 3 | MODULE_big = synchdb 4 | 5 | EXTENSION = synchdb 6 | DATA = synchdb--1.0.sql 7 | PGFILEDESC = "synchdb - allows logical replication with heterogeneous databases" 8 | 9 | REGRESS = synchdb 10 | REGRESS_OPTS = --inputdir=./src/test/regress --outputdir=./src/test/regress/results --load-extension=pgcrypto 11 | 12 | # flag to build with native openlog replicator connector support 13 | WITH_OLR ?= 0 14 | 15 | OBJS = src/backend/synchdb/synchdb.o \ 16 | src/backend/converter/format_converter.o \ 17 | src/backend/converter/debezium_event_handler.o \ 18 | src/backend/executor/replication_agent.o 19 | 20 | DBZ_ENGINE_PATH = src/backend/debezium 21 | 22 | # Dynamically set JDK paths 23 | JAVA_PATH := $(shell which java) 24 | JDK_HOME_PATH := $(shell readlink -f $(JAVA_PATH) | sed 's:/bin/java::') 25 | JDK_INCLUDE_PATH := $(JDK_HOME_PATH)/include 26 | 27 | # default protobuf-c path (for OLR build) 28 | PROTOBUF_C_INCLUDE_DIR ?= /usr/local/include 29 | PROTOBUF_C_LIB_DIR ?= /usr/local/lib 30 | 31 | # Detect the operating system 32 | UNAME_S := $(shell uname -s) 33 | 34 | # Set JDK_INCLUDE_PATH based on the operating system 35 | ifeq ($(UNAME_S), Linux) 36 | JDK_INCLUDE_PATH_OS := $(JDK_INCLUDE_PATH)/linux 37 | $(info Detected OS: Linux) 38 | else ifeq ($(UNAME_S), Darwin) 39 | JDK_INCLUDE_PATH_OS := $(JDK_INCLUDE_PATH)/darwin 40 | $(info Detected OS: Darwin) 41 | else 42 | $(error Unsupported operating system: $(UNAME_S)) 43 | endif 44 | 45 | JDK_LIB_PATH := $(JDK_HOME_PATH)/lib/server 46 | 47 | PG_CFLAGS = -I$(JDK_INCLUDE_PATH) -I$(JDK_INCLUDE_PATH_OS) -I./src/include -I${PROTOBUF_C_INCLUDE_DIR} 48 | PG_CPPFLAGS = -I$(JDK_INCLUDE_PATH) -I$(JDK_INCLUDE_PATH_OS) -I./src/include -I${PROTOBUF_C_INCLUDE_DIR} 49 | PG_LDFLAGS = -L$(JDK_LIB_PATH) -ljvm 50 | 51 | 52 | ifeq ($(WITH_OLR),1) 53 | OBJS += src/backend/converter/olr_event_handler.o \ 54 | src/backend/olr/OraProtoBuf.pb-c.o \ 55 | src/backend/utils/netio_utils.o \ 56 | src/backend/olr/olr_client.o 57 | 58 | PG_LDFLAGS += -lprotobuf-c -L$(PROTOBUF_C_LIB_DIR) 59 | PG_CFLAGS += -DWITH_OLR 60 | PG_CPPFLAGS += -DWITH_OLR 61 | endif 62 | 63 | ifdef USE_PGXS 64 | PG_CONFIG = pg_config 65 | PGXS := $(shell $(PG_CONFIG) --pgxs) 66 | include $(PGXS) 67 | PG_MAJOR := $(shell $(PG_CONFIG) --majorversion) 68 | else 69 | subdir = contrib/synchdb 70 | top_builddir = ../.. 71 | include $(top_builddir)/src/Makefile.global 72 | include $(top_srcdir)/contrib/contrib-global.mk 73 | PG_MAJOR := $(MAJORVERSION) 74 | endif 75 | 76 | 77 | check_protobufc: 78 | @echo "Checking protobuf-c installation" 79 | @if [ ! -d $(PROTOBUF_C_INCLUDE_DIR)/protobuf-c ]; then \ 80 | echo "Error: protobuf-c include path $(PROTOBUF_C_INCLUDE_DIR) not found"; \ 81 | echo "Hint: overwrite PROTOBUF_C_INCLUDE_DIR with correct path to protobuf-c include dir"; \ 82 | exit 1; \ 83 | fi 84 | @if [ ! -f $(PROTOBUF_C_LIB_DIR)/libprotobuf-c.so.1.0.0 ]; then \ 85 | echo "Error: $(PROTOBUF_C_LIB_DIR)/libprotobuf-c.so.1.0.0 not found"; \ 86 | echo "Hint: overwrite PROTOBUF_C_LIB_DIR with correct path to /libprotobuf-c.so.1.0.0"; \ 87 | exit 1; \ 88 | fi 89 | 90 | @echo "protobuf-c Paths" 91 | @echo "$(PROTOBUF_C_INCLUDE_DIR)/protobuf-c" 92 | @echo "$(PROTOBUF_C_LIB_DIR)/libprotobuf-c.so.1.0.0" 93 | @echo "protobuf-c check passed" 94 | 95 | 96 | # Target that checks JDK paths 97 | check_jdk: 98 | @echo "Checking JDK environment" 99 | @if [ ! -d $(JDK_INCLUDE_PATH) ]; then \ 100 | echo "Error: JDK include path $(JDK_INCLUDE_PATH) not found"; \ 101 | exit 1; \ 102 | fi 103 | @if [ ! -d $(JDK_INCLUDE_PATH_OS) ]; then \ 104 | echo "Error: JDK include path for OS $(JDK_INCLUDE_PATH_OS) not found"; \ 105 | exit 1; \ 106 | fi 107 | @if [ ! -d $(JDK_LIB_PATH) ]; then \ 108 | echo "Error: JDK lib path $(JDK_LIB_PATH) not found"; \ 109 | exit 1; \ 110 | fi 111 | 112 | @echo "JDK Paths" 113 | @echo "$(JDK_INCLUDE_PATH)" 114 | @echo "$(JDK_INCLUDE_PATH_OS)" 115 | @echo "$(JDK_LIB_PATH)" 116 | @echo "JDK check passed" 117 | 118 | build_dbz: 119 | cd $(DBZ_ENGINE_PATH) && mvn clean install 120 | 121 | clean_dbz: 122 | cd $(DBZ_ENGINE_PATH) && mvn clean 123 | 124 | install_dbz: 125 | rm -rf $(pkglibdir)/dbz_engine 126 | install -d $(pkglibdir)/dbz_engine 127 | cp -rp $(DBZ_ENGINE_PATH)/target/* $(pkglibdir)/dbz_engine 128 | 129 | oracle_parser: 130 | @echo "building against pgmajor ${PG_MAJOR}" 131 | make -C src/backend/olr/oracle_parser${PG_MAJOR} 132 | 133 | clean_oracle_parser: 134 | @echo "cleaning against pgmajor ${PG_MAJOR}" 135 | make clean -C src/backend/olr/oracle_parser${PG_MAJOR} 136 | 137 | install_oracle_parser: 138 | @echo "installing against pgmajor ${PG_MAJOR}" 139 | make install -C src/backend/olr/oracle_parser${PG_MAJOR} 140 | 141 | .PHONY: dbcheck mysqlcheck sqlservercheck oraclecheck dbcheck-tpcc mysqlcheck-tpcc sqlservercheck-tpcc oraclecheck-tpcc 142 | dbcheck: 143 | @command -v pytest >/dev/null 2>&1 || { echo >&2 "❌ pytest not found in PATH."; exit 1; } 144 | @command -v docker >/dev/null 2>&1 || { echo >&2 "❌ docker not found in PATH."; exit 1; } 145 | @command -v docker-compose >/dev/null 2>&1 || command -v docker >/dev/null 2>&1 && docker compose version >/dev/null 2>&1 || { echo >&2 "❌ docker-compose not found in PATH"; exit 1; } 146 | @echo "Running tests against dbvendor=$(DB)" 147 | PYTHONPATH=./src/test/pytests/synchdbtests/ pytest -x -v -s --dbvendor=$(DB) --capture=tee-sys ./src/test/pytests/synchdbtests/ 148 | rm -r .pytest_cache ./src/test/pytests/synchdbtests/__pycache__ ./src/test/pytests/synchdbtests/t/__pycache__ 149 | 150 | dbcheck-tpcc: 151 | @command -v pytest >/dev/null 2>&1 || { echo >&2 "❌ pytest not found in PATH."; exit 1; } 152 | @command -v docker >/dev/null 2>&1 || { echo >&2 "❌ docker not found in PATH."; exit 1; } 153 | @command -v docker-compose >/dev/null 2>&1 || command -v docker >/dev/null 2>&1 && docker compose version >/dev/null 2>&1 || { echo >&2 "❌ docker-compose not found in PATH"; exit 1; } 154 | @echo "Running hammerdb based tpcc tests against dbvendor=$(DB)" 155 | PYTHONPATH=./src/test/pytests/synchdbtests/ pytest -v -s --dbvendor=$(DB) --tpccmode=serial --capture=tee-sys ./src/test/pytests/hammerdb/ 156 | rm -r .pytest_cache ./src/test/pytests/hammerdb/__pycache__ 157 | 158 | mysqlcheck: 159 | $(MAKE) dbcheck DB=mysql 160 | 161 | sqlservercheck: 162 | $(MAKE) dbcheck DB=sqlserver 163 | 164 | oraclecheck: 165 | $(MAKE) dbcheck DB=oracle 166 | 167 | olrcheck: 168 | $(MAKE) dbcheck DB=olr 169 | 170 | mysqlcheck-benchmark: 171 | $(MAKE) dbcheck-tpcc DB=mysql 172 | 173 | sqlservercheck-benchmark: 174 | $(MAKE) dbcheck-tpcc DB=sqlserver 175 | 176 | oraclecheck-benchmark: 177 | $(MAKE) dbcheck-tpcc DB=oracle 178 | 179 | 180 | -------------------------------------------------------------------------------- /ci/build-synchdb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # make bash behave 4 | set -euo pipefail 5 | IFS=$'\n\t' 6 | 7 | # read pg major version, error if not provided 8 | PG_MAJOR=${PG_MAJOR:?please provide the postgres major version} 9 | PG_BRANCH=${PG_BRANCH:?please provide the postgres branch} 10 | 11 | # get codename from release file 12 | . /etc/os-release 13 | codename=${VERSION#*(} 14 | codename=${codename%)*} 15 | 16 | # we'll do everything with absolute paths 17 | basedir="$(pwd)" 18 | 19 | function build_synchdb() 20 | { 21 | pg_major="$1" 22 | 23 | echo $PG_BRANCH 24 | installdir="${basedir}/synchdb-install-${pg_major}" 25 | mkdir -p $installdir 26 | echo "Beginning build for PostgreSQL ${pg_major}..." >&2 27 | 28 | git clone https://github.com/postgres/postgres.git --branch ${PG_BRANCH} 29 | ( 30 | cd postgres && \ 31 | ./configure --prefix=${installdir}/usr/lib/postgresql/${PG_MAJOR} \ 32 | --enable-cassert \ 33 | -enable-rpath \ 34 | --enable-injection-points \ 35 | --with-libedit-preferred \ 36 | --with-libxml \ 37 | --with-icu \ 38 | --with-ssl=openssl && \ 39 | make && \ 40 | make install 41 | 42 | cd contrib && \ 43 | make && \ 44 | make install 45 | ) 46 | 47 | git clone https://github.com/protobuf-c/protobuf-c.git --branch v1.5.2 48 | ( 49 | cd protobuf-c && \ 50 | ./autogen.sh && \ 51 | ./configure --prefix=${installdir}/usr/local && \ 52 | make && \ 53 | make install 54 | ) 55 | 56 | mkdir -p postgres/contrib/synchdb 57 | rsync -a --delete \ 58 | --exclude '.git/' \ 59 | --exclude='.github/' \ 60 | --exclude='ci/' \ 61 | --exclude='testenv/' \ 62 | --exclude='postgres/' \ 63 | --exclude='protobuf-c/' \ 64 | ./ postgres/contrib/synchdb/ 65 | ( 66 | cd postgres/contrib/synchdb && \ 67 | make oracle_parser && \ 68 | make install_oracle_parser && \ 69 | make WITH_OLR=1 build_dbz && \ 70 | make WITH_OLR=1 PROTOBUF_C_INCLUDE_DIR=$installdir/usr/local/include PROTOBUF_C_LIB_DIR=$installdir/usr/local/lib && \ 71 | make WITH_OLR=1 install && \ 72 | make WITH_OLR=1 install_dbz 73 | ) 74 | 75 | #mkdir -p "${builddir}" && cd "${builddir}" 76 | 77 | 78 | #export USE_PGXS=1 79 | #make build_dbz PG_CONFIG=/usr/lib/postgresql/${pg_major}/bin/pg_config 80 | #make PG_CONFIG=/usr/lib/postgresql/${pg_major}/bin/pg_config 81 | 82 | #sudo USE_PGXS=1 make install DESTDIR=${installdir} PG_CONFIG=/usr/lib/postgresql/${pg_major}/bin/pg_config 83 | #sudo USE_PGXS=1 make install_dbz pkglibdir=${installdir}/usr/lib/postgresql/${pg_major}/lib PG_CONFIG=/usr/lib/postgresql/${pg_major}/bin/pg_config 84 | 85 | cd $installdir 86 | tar czvf synchdb-install-${pg_major}.tar.gz * 87 | mv synchdb-install-${pg_major}.tar.gz $basedir 88 | } 89 | 90 | build_synchdb "${PG_MAJOR}" 91 | -------------------------------------------------------------------------------- /ci/teardown-remotedbs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # make bash behave 4 | set -euo pipefail 5 | IFS=$'\n\t' 6 | 7 | # read pg major version, error if not provided 8 | DBTYPE=${DBTYPE:?please provide database type} 9 | 10 | # we'll do everything with absolute paths 11 | basedir="$(pwd)" 12 | 13 | function teardown_mysql() 14 | { 15 | echo "tearing down mysql..." 16 | docker stop mysql >/dev/null 2>&1 17 | docker rm mysql >/dev/null 2>&1 18 | #docker-compose -f testenv/mysql/synchdb-mysql-test.yaml down 19 | } 20 | 21 | function teardown_sqlserver() 22 | { 23 | 24 | echo "tearing down sqlserver..." 25 | docker stop sqlserver >/dev/null 2>&1 26 | docker rm sqlserver >/dev/null 2>&1 27 | #docker-compose -f testenv/sqlserver/synchdb-sqlserver-test.yaml down 28 | } 29 | 30 | function teardown_oracle() 31 | { 32 | echo "tearing down oracle..." 33 | docker stop oracle >/dev/null 2>&1 34 | docker rm oracle >/dev/null 2>&1 35 | #docker-compose -f testenv/oracle/synchdb-oracle-test.yaml down 36 | } 37 | 38 | function teardown_ora19c() 39 | { 40 | echo "tearing down ora19c..." 41 | docker stop ora19c >/dev/null 2>&1 42 | docker rm ora19c >/dev/null 2>&1 43 | } 44 | 45 | function teardown_hammerdb() 46 | { 47 | echo "tearing down hammerdb..." 48 | docker stop hammerdb >/dev/null 2>&1 49 | docker rm hammerdb >/dev/null 2>&1 50 | } 51 | 52 | function teardown_olr() 53 | { 54 | echo "tearing down olr..." 55 | docker stop OpenLogReplicator >/dev/null 2>&1 56 | docker rm OpenLogReplicator >/dev/null 2>&1 57 | #docker-compose -f testenv/olr/synchdb-olr-test.yaml down 58 | } 59 | 60 | function teardown_oradata() 61 | { 62 | echo "tearing down oradata..." 63 | if [ -d ./testenv/olr/oradata ]; then 64 | sudo rm -rf ./testenv/olr/oradata 65 | fi 66 | 67 | if [ -d ./testenv/olr/checkpoint ]; then 68 | sudo rm -rf ./testenv/olr/checkpoint 69 | fi 70 | } 71 | 72 | function teardown_synchdbnet() 73 | { 74 | echo "tearing down synchdbnet..." 75 | docker network rm synchdbnet >/dev/null 2>&1 76 | } 77 | 78 | function teardown_remotedb() 79 | { 80 | dbtype="$1" 81 | 82 | case "$dbtype" in 83 | "mysql") 84 | teardown_mysql 85 | ;; 86 | "sqlserver") 87 | teardown_sqlserver 88 | ;; 89 | "oracle") 90 | teardown_oracle 91 | ;; 92 | "ora19c") 93 | teardown_ora19c 94 | ;; 95 | "hammerdb") 96 | teardown_hammerdb 97 | ;; 98 | "olr") 99 | teardown_olr 100 | teardown_ora19c 101 | ;; 102 | "synchdbnet") 103 | teardown_synchdbnet 104 | ;; 105 | "oradata") 106 | teardown_oradata 107 | ;; 108 | *) 109 | echo "$dbtype not supported" 110 | exit 1 111 | ;; 112 | esac 113 | } 114 | 115 | teardown_remotedb "${DBTYPE}" 116 | -------------------------------------------------------------------------------- /ci/test-synchdb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # make bash behave 4 | set -euo pipefail 5 | IFS=$'\n\t' 6 | 7 | # read pg major version, error if not provided 8 | DBTYPE=${DBTYPE:?please provide database type} 9 | 10 | # get codename from release file 11 | . /etc/os-release 12 | codename=${VERSION#*(} 13 | codename=${codename%)*} 14 | 15 | # we'll do everything with absolute paths 16 | basedir="$(pwd)" 17 | 18 | function test_mysql() 19 | { 20 | echo "testing mysql..." 21 | psql -d postgres -c "SELECT synchdb_add_conninfo('mysqlconn', '127.0.0.1', 3306, 'mysqluser', 'mysqlpwd', 'inventory', 'postgres', '', 'mysql');" 22 | if [ $? -ne 0 ]; then 23 | echo "failed to create connector" 24 | exit 1 25 | fi 26 | 27 | psql -d postgres -c "SELECT synchdb_start_engine_bgw('mysqlconn');" 28 | if [ $? -ne 0 ]; then 29 | echo "failed to start connector" 30 | exit 1 31 | fi 32 | echo "waiting for initial snapshot before checking results..." 33 | sleep 10 34 | psql -d postgres -c "SELECT * FROM synchdb_state_view;" 35 | syncing_src_count=$(docker exec -i mysql mysql -umysqluser -pmysqlpwd -sN -e "SELECT COUNT(*) from inventory.orders" | tr -d ' \r\n') 36 | syncing_dst_count=$(psql -d postgres -t -c "SELECT COUNT(*) from inventory.orders;" | tr -d ' \n') 37 | if [ "$syncing_src_count" -ne "$syncing_dst_count" ]; then 38 | echo "initial snapshot failed. orders table count mismatch: src:$syncing_src_count vs dst:$syncing_dst_count" 39 | exit 1 40 | fi 41 | echo "initial snapshot test done, orders table count matched: src:$syncing_src_count vs dst:$syncing_dst_count" 42 | 43 | docker exec -i mysql mysql -umysqluser -pmysqlpwd -e "INSERT INTO inventory.orders(order_date, purchaser, quantity, product_id) VALUES ('2024-01-01', 1003, 2, 107)" 44 | echo "waiting for CDC before checking results..." 45 | sleep 10 46 | syncing_src_count=$(docker exec -i mysql mysql -umysqluser -pmysqlpwd -sN -e "SELECT COUNT(*) from inventory.orders" | tr -d ' \r\n') 47 | syncing_dst_count=$(psql -d postgres -t -c "SELECT COUNT(*) from inventory.orders;" | tr -d ' \n') 48 | if [ "$syncing_src_count" -ne "$syncing_dst_count" ]; then 49 | echo "CDC failed. orders table count mismatch: src:$syncing_src_count vs dst:$syncing_dst_count" 50 | exit 1 51 | fi 52 | echo "CDC test done, orders table count matched: src:$syncing_src_count vs dst:$syncing_dst_count" 53 | psql -d postgres -c "SELECT * FROM synchdb_stats_view;" 54 | exit 0 55 | } 56 | 57 | function test_sqlserver() 58 | { 59 | 60 | echo "testing sqlserver..." 61 | id=$(docker ps | grep sqlserver | awk '{print $1}') 62 | psql -d postgres -c "SELECT synchdb_add_conninfo('sqlserverconn', '127.0.0.1', 1433, 'sa', 'Password!', 'testDB', 'postgres', '', 'sqlserver');" 63 | if [ $? -ne 0 ]; then 64 | echo "failed to create connector" 65 | exit 1 66 | fi 67 | 68 | psql -d postgres -c "SELECT synchdb_start_engine_bgw('sqlserverconn');" 69 | if [ $? -ne 0 ]; then 70 | echo "failed to start connector" 71 | exit 1 72 | fi 73 | 74 | echo "waiting for initial snapshot before checking results..." 75 | sleep 10 76 | psql -d postgres -c "SELECT * FROM synchdb_state_view;" 77 | syncing_src_count=$(docker exec -i $id /opt/mssql-tools18/bin/sqlcmd -U sa -P 'Password!' -d testDB -C -Q "SELECT COUNT(*) from orders" -h -1 | sed -n '1p' | tr -d ' \r\n') 78 | syncing_dst_count=$(psql -d postgres -t -c "SELECT COUNT(*) from testDB.orders;" | tr -d ' \n') 79 | if [ "$syncing_src_count" -ne "$syncing_dst_count" ]; then 80 | echo "initial snapshot failed. orders table count mismatch: src:$syncing_src_count vs dst:$syncing_dst_count" 81 | exit 1 82 | fi 83 | echo "initial snapshot test done, orders table count matched: src:$syncing_src_count vs dst:$syncing_dst_count" 84 | 85 | docker exec -i $id /opt/mssql-tools18/bin/sqlcmd -U sa -P 'Password!' -d testDB -C -Q "INSERT INTO orders(order_date, purchaser, quantity, product_id) VALUES ('2024-01-01', 1003, 2, 107)" 86 | echo "waiting for CDC before checking results..." 87 | sleep 10 88 | syncing_src_count=$(docker exec -i $id /opt/mssql-tools18/bin/sqlcmd -U sa -P 'Password!' -d testDB -C -Q "SELECT COUNT(*) from orders" -h -1 | sed -n '1p' | tr -d ' \r\n') 89 | syncing_dst_count=$(psql -d postgres -t -c "SELECT COUNT(*) from testDB.orders;" | tr -d ' \n') 90 | 91 | if [ "$syncing_src_count" -ne "$syncing_dst_count" ]; then 92 | echo "CDC failed. orders table count mismatch: src:$syncing_src_count vs dst:$syncing_dst_count" 93 | exit 1 94 | fi 95 | echo "CDC test done, orders table count matched: src:$syncing_src_count vs dst:$syncing_dst_count" 96 | psql -d postgres -c "SELECT * FROM synchdb_stats_view;" 97 | exit 0 98 | } 99 | 100 | function test_oracle() 101 | { 102 | echo "testing oracle..." 103 | id=$(docker ps | grep oracle | awk '{print $1}') 104 | psql -d postgres -c "SELECT synchdb_add_conninfo('oracleconn','127.0.0.1', 1521, 'c##dbzuser', 'dbz', 'FREE', 'postgres', '', 'oracle');" 105 | if [ $? -ne 0 ]; then 106 | echo "failed to create connector" 107 | exit 1 108 | fi 109 | 110 | psql -d postgres -c "SELECT synchdb_start_engine_bgw('oracleconn');" 111 | if [ $? -ne 0 ]; then 112 | echo "failed to start connector" 113 | exit 1 114 | fi 115 | 116 | echo "waiting for initial snapshot before checking results..." 117 | sleep 20 118 | psql -d postgres -c "SELECT * FROM synchdb_state_view;" 119 | syncing_src_count=$(docker exec -i $id sqlplus -S 'c##dbzuser/dbz@//localhost:1521/FREE' </dev/null 2>&1; 10 | } 11 | 12 | function deploy-monitoring() 13 | { 14 | echo "setting up prometheus..." 15 | if docker inspect prometheus >/dev/null 2>&1; then 16 | echo "prometheus already exists: skip it..." 17 | else 18 | docker_compose -f ./src/monitoring/docker-compose.yaml up -d prometheus 19 | fi 20 | echo "setting up grafana..." 21 | if docker inspect grafana >/dev/null 2>&1; then 22 | echo "grafaaana already exists: skip it..." 23 | else 24 | docker_compose -f ./src/monitoring/docker-compose.yaml up -d grafana 25 | fi 26 | } 27 | 28 | function teardown-monitoring() 29 | { 30 | echo "tearing down grafana..." 31 | if docker inspect grafana >/dev/null 2>&1; then 32 | docker stop grafana >/dev/null 2>&1 33 | docker rm grafana >/dev/null 2>&1 34 | fi 35 | 36 | echo "tearing down prometheus" 37 | if docker inspect prometheus >/dev/null 2>&1; then 38 | docker stop prometheus >/dev/null 2>&1 39 | docker rm prometheus >/dev/null 2>&1 40 | fi 41 | } 42 | 43 | function deploy-synchdb() 44 | { 45 | echo "setting up synchdb..." 46 | if docker inspect synchdb >/dev/null 2>&1; then 47 | echo "synchdb already exists: skip it..." 48 | else 49 | docker_compose -f ./testenv/synchdb/synchdb-test.yaml up -d 50 | fi 51 | } 52 | 53 | function teardown-synchdb() 54 | { 55 | echo "tearing down synchdb..." 56 | if docker inspect synchdb >/dev/null 2>&1; then 57 | docker stop synchdb >/dev/null 2>&1 58 | docker rm synchdb >/dev/null 2>&1 59 | fi 60 | } 61 | 62 | function deploy-sourcedb() 63 | { 64 | if [ $1 == "olr" ]; then 65 | DBTYPE=$1 INTERNAL=1 OLRVER=$OLRVER ./ci/setup-remotedbs.sh 66 | else 67 | DBTYPE=$1 INTERNAL=1 ./ci/setup-remotedbs.sh 68 | fi 69 | } 70 | 71 | function teardown-sourcedb() 72 | { 73 | echo "tearing down $1 if active..." 74 | if [ $1 == "olr" ]; then 75 | name="OpenLogReplicator" 76 | else 77 | name=$1 78 | fi 79 | 80 | if docker inspect $name >/dev/null 2>&1; then 81 | DBTYPE=$1 ./ci/teardown-remotedbs.sh 82 | fi 83 | } 84 | 85 | function clear-oradata() 86 | { 87 | DBTYPE=oradata ./ci/teardown-remotedbs.sh 88 | } 89 | 90 | function custom-deployment() 91 | { 92 | echo "" 93 | echo "please list source databases separate by comma" 94 | echo "possible values: (mysql, sqlserver, oracle23ai, oracle19c, olr)" 95 | 96 | read -rp "your selection: " RAW_CHOICES 97 | IFS=', ' read -r -a _tokens <<< "$RAW_CHOICES" 98 | declare -A PICKED=() 99 | for t in "${_tokens[@]}"; do 100 | [[ -z "$t" ]] && continue 101 | key="${t,,}" 102 | case "$key" in 103 | mysql|sqlserver|oracle23ai|oracle19c|olr) PICKED["$key"]=1 ;; 104 | *) echo "Ignoring unknown option: $t" ;; 105 | esac 106 | done 107 | 108 | if [[ "${#PICKED[@]}" -eq 0 ]]; then 109 | echo "No valid selections made. We'll deploy only 'synchdb'." 110 | FINAL_LIST="synchdb" 111 | else 112 | 113 | if [[ -n "${PICKED[oracle19c]+x}" && -n "${PICKED[olr]+x}" ]]; then 114 | echo "Ignoring oracle19c because olr will also deploy oracle19c" 115 | unset PICKED[oracle19c] 116 | fi 117 | 118 | FINAL_LIST="synchdb $(printf '%s ' "${!PICKED[@]}" | sed 's/ $//')" 119 | fi 120 | 121 | #echo "About to deploy: $FINAL_LIST" 122 | for x in $FINAL_LIST; 123 | do 124 | if [ $x == "synchdb" ]; then 125 | deploy-synchdb 126 | continue 127 | fi 128 | 129 | if [ $x == "oracle23ai" ]; then 130 | x="oracle" 131 | fi 132 | 133 | if [ $x == "oracle19c" ]; then 134 | x="ora19c" 135 | fi 136 | 137 | deploy-sourcedb $x 138 | 139 | #echo "deploying $x ..." 140 | done 141 | } 142 | 143 | 144 | # check required tools 145 | if ! have docker; then 146 | echo "docker is missing. Exit..." 147 | exit 1 148 | fi 149 | 150 | if docker compose version >/dev/null 2>&1; then 151 | docker_compose() { docker compose "$@"; } 152 | elif command -v docker-compose >/dev/null 2>&1; then 153 | docker_compose() { docker-compose "$@"; } 154 | else 155 | echo "docker-compose or docker compose is missing. Exit..." 156 | exit 1 157 | fi 158 | 159 | # check required scripts 160 | if [ ! -f ./ci/setup-remotedbs.sh ] || [ ! -f ./ci/teardown-remotedbs.sh ]; then 161 | echo "please run this script in the root directory of synchdb project" 162 | exit 1 163 | fi 164 | 165 | # prompts 166 | 167 | echo "----------------------------------" 168 | echo "-----> Welcome to ezdeploy! <-----" 169 | echo "----------------------------------" 170 | echo "" 171 | echo "please select a quick deploy option:" 172 | echo -e "\t 1) synchdb only" 173 | echo -e "\t 2) synchdb + mysql" 174 | echo -e "\t 3) synchdb + sqlserver" 175 | echo -e "\t 4) synchdb + oracle23ai" 176 | echo -e "\t 5) synchdb + oracle19c" 177 | echo -e "\t 6) synchdb + olr(oracle19c)" 178 | echo -e "\t 7) synchdb + all source databases" 179 | echo -e "\t 8) custom deployment" 180 | echo -e "\t 9) deploy monitoring" 181 | echo -e "\t10) teardown deployment" 182 | 183 | read -rp "enter your selection: " choice 184 | 185 | case "$choice" in 186 | 1) deploy-synchdb 187 | ;; 188 | 2) deploy-synchdb 189 | deploy-sourcedb "mysql" 190 | ;; 191 | 3) deploy-synchdb 192 | deploy-sourcedb "sqlserver" 193 | ;; 194 | 4) deploy-synchdb 195 | deploy-sourcedb "oracle" 196 | ;; 197 | 5) deploy-synchdb 198 | deploy-sourcedb "ora19c" 199 | ;; 200 | 6) deploy-synchdb 201 | deploy-sourcedb "olr" 202 | ;; 203 | 7) deploy-synchdb 204 | deploy-sourcedb "mysql" 205 | deploy-sourcedb "sqlserver" 206 | deploy-sourcedb "oracle" 207 | deploy-sourcedb "olr" 208 | ;; 209 | 8) custom-deployment 210 | ;; 211 | 9) deploy-monitoring 212 | ;; 213 | 10) teardown-synchdb 214 | teardown-sourcedb "mysql" 215 | teardown-sourcedb "sqlserver" 216 | teardown-sourcedb "oracle" 217 | teardown-sourcedb "ora19c" 218 | teardown-sourcedb "olr" 219 | teardown-monitoring 220 | clear-oradata 221 | teardown-sourcedb "synchdbnet" 222 | ;; 223 | *) echo "Invalid choice"; exit 1 224 | ;; 225 | esac 226 | 227 | echo "job done..." 228 | -------------------------------------------------------------------------------- /myrule.json: -------------------------------------------------------------------------------- 1 | { 2 | "transform_datatype_rules": 3 | [ 4 | { 5 | "translate_from": "GEOMETRY", 6 | "translate_from_autoinc": false, 7 | "translate_to": "TEXT", 8 | "translate_to_size": -1 9 | }, 10 | { 11 | "translate_from": "POINT", 12 | "translate_from_autoinc": false, 13 | "translate_to": "TEXT", 14 | "translate_to_size": -1 15 | }, 16 | { 17 | "translate_from": "inventory.geom.g.GEOMETRY", 18 | "translate_from_autoinc": false, 19 | "translate_to": "GEOMETRY", 20 | "translate_to_size": 0 21 | }, 22 | { 23 | "translate_from": "inventory.orders.quantity.INT", 24 | "translate_from_autoinc": false, 25 | "translate_to": "BIGINT", 26 | "translate_to_size": 0 27 | } 28 | ], 29 | "transform_objectname_rules": 30 | [ 31 | { 32 | "object_type": "table", 33 | "source_object": "inventory.orders", 34 | "destination_object": "schema1.orders" 35 | }, 36 | { 37 | "object_type": "table", 38 | "source_object": "inventory.products", 39 | "destination_object": "products" 40 | }, 41 | { 42 | "object_type": "table", 43 | "source_object": "testDB.dbo.customers", 44 | "destination_object": "schema1.people" 45 | }, 46 | { 47 | "object_type": "table", 48 | "source_object": "inventory.altertest", 49 | "destination_object": "schema2.notaltertest" 50 | }, 51 | { 52 | "object_type": "column", 53 | "source_object": "inventory.orders.order_number", 54 | "destination_object": "ididid" 55 | }, 56 | { 57 | "object_type": "column", 58 | "source_object": "inventory.orders.purchaser", 59 | "destination_object": "the_dude" 60 | }, 61 | { 62 | "object_type": "column", 63 | "source_object": "inventory.orders.quantity", 64 | "destination_object": "the_numba" 65 | }, 66 | { 67 | "object_type": "column", 68 | "source_object": "testDB.dbo.customers.first_name", 69 | "destination_object": "the_awesome_first_name" 70 | } 71 | ], 72 | "transform_expression_rules": 73 | [ 74 | { 75 | "transform_from": "inventory.orders.quantity", 76 | "transform_expression": "case when %d < 500 then 0 else %d end" 77 | }, 78 | { 79 | "transform_from": "inventory.geom.g", 80 | "transform_expression": "ST_SetSRID(ST_GeomFromWKB(decode('%w', 'base64')),%s)" 81 | }, 82 | { 83 | "transform_from": "inventory.products.name", 84 | "transform_expression": "'>>>>>' || '%d' || '<<<<<'" 85 | }, 86 | { 87 | "transform_from": "inventory.products.description", 88 | "transform_expression": "'>>>>>' || '%d' || '<<<<<'" 89 | } 90 | ] 91 | } 92 | -------------------------------------------------------------------------------- /src/backend/debezium/pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4.0.0 4 | com.example 5 | dbz-engine 6 | jar 7 | 1.0.0 8 | dbz-engine 9 | http://maven.apache.org 10 | 11 | 12 | junit 13 | junit 14 | 3.8.1 15 | test 16 | 17 | 18 | mysql 19 | mysql-connector-java 20 | 8.0.33 21 | 22 | 23 | org.apache.kafka 24 | connect-api 25 | 3.6.2 26 | 27 | 28 | org.apache.kafka 29 | connect-runtime 30 | 3.6.2 31 | 32 | 33 | com.zendesk 34 | mysql-binlog-connector-java 35 | 0.29.1 36 | 37 | 38 | io.debezium 39 | debezium-api 40 | 2.6.2.Final 41 | 42 | 43 | io.debezium 44 | debezium-embedded 45 | 2.6.2.Final 46 | 47 | 48 | io.debezium 49 | debezium-connector-mysql 50 | 2.6.2.Final 51 | 52 | 53 | io.debezium 54 | debezium-connector-oracle 55 | 2.6.2.Final 56 | 57 | 58 | io.debezium 59 | debezium-connector-sqlserver 60 | 2.6.2.Final 61 | 62 | 63 | io.debezium 64 | debezium-core 65 | 2.6.2.Final 66 | 67 | 68 | io.debezium 69 | debezium-storage-kafka 70 | 2.6.2.Final 71 | 72 | 73 | io.debezium 74 | debezium-storage-file 75 | 2.6.2.Final 76 | 77 | 78 | io.debezium 79 | debezium-ddl-parser 80 | 2.6.2.Final 81 | 82 | 83 | org.slf4j 84 | slf4j-api 85 | 1.7.36 86 | 87 | 88 | org.slf4j 89 | slf4j-log4j12 90 | 1.7.36 91 | 92 | 93 | log4j 94 | log4j 95 | 1.2.17 96 | 97 | 98 | org.antlr 99 | antlr4-runtime 100 | 4.10.1 101 | 102 | 103 | com.fasterxml.jackson.core 104 | jackson-databind 105 | 2.15.3 106 | 107 | 108 | com.fasterxml.jackson.core 109 | jackson-annotations 110 | 2.15.3 111 | 112 | 113 | com.fasterxml.jackson.core 114 | jackson-core 115 | 2.15.3 116 | 117 | 118 | 119 | 120 | 121 | org.apache.maven.plugins 122 | maven-compiler-plugin 123 | 3.8.1 124 | 125 | 1.8 126 | 1.8 127 | 128 | 129 | 130 | org.apache.maven.plugins 131 | maven-jar-plugin 132 | 133 | 134 | 135 | true 136 | com.example.DebeziumRunner 137 | lib/ 138 | 139 | 140 | 141 | 142 | 143 | org.apache.maven.plugins 144 | maven-dependency-plugin 145 | 3.1.2 146 | 147 | 148 | copy-dependencies 149 | package 150 | 151 | copy-dependencies 152 | 153 | 154 | ${project.build.directory}/lib 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | -------------------------------------------------------------------------------- /src/backend/debezium/src/test/java/com/example/AppTest.java: -------------------------------------------------------------------------------- 1 | package com.example; 2 | 3 | import junit.framework.Test; 4 | import junit.framework.TestCase; 5 | import junit.framework.TestSuite; 6 | 7 | /** 8 | * Unit test for simple App. 9 | */ 10 | public class AppTest 11 | extends TestCase 12 | { 13 | /** 14 | * Create the test case 15 | * 16 | * @param testName name of the test case 17 | */ 18 | public AppTest( String testName ) 19 | { 20 | super( testName ); 21 | } 22 | 23 | /** 24 | * @return the suite of tests being tested 25 | */ 26 | public static Test suite() 27 | { 28 | return new TestSuite( AppTest.class ); 29 | } 30 | 31 | /** 32 | * Rigourous Test :-) 33 | */ 34 | public void testApp() 35 | { 36 | assertTrue( true ); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/backend/olr/OraProtoBuf.proto: -------------------------------------------------------------------------------- 1 | /* Copyright (C) 2018-2025 Adam Leszczynski (aleszczynski@bersler.com) 2 | 3 | This file is part of OpenLogReplicator. 4 | 5 | OpenLogReplicator is free software; you can redistribute it and/or 6 | modify it under the terms of the GNU General Public License as published 7 | by the Free Software Foundation; either version 3, or (at your option) 8 | any later version. 9 | 10 | OpenLogReplicator is distributed in the hope that it will be useful, 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General 13 | Public License for more details. 14 | 15 | You should have received a copy of the GNU General Public License 16 | along with OpenLogReplicator; see the file LICENSE; If not see 17 | . */ 18 | 19 | syntax = "proto3"; 20 | package OpenLogReplicator.pb; 21 | 22 | option java_package="io.debezium.connector.oracle.proto"; 23 | option java_outer_classname = "OpenLogReplicator"; 24 | // option cc_enable_arenas = true; 25 | // option optimize_for = SPEED; 26 | 27 | enum Op { 28 | BEGIN = 0; //begin 29 | COMMIT = 1; //commit 30 | INSERT = 2; //c 31 | UPDATE = 3; //u 32 | DELETE = 4; //d 33 | DDL = 5; //ddl 34 | CHKPT = 6; //checkpoint 35 | } 36 | 37 | enum ColumnType { 38 | UNKNOWN = 0; 39 | VARCHAR2 = 1; //1 40 | NUMBER = 2; //2, FLOAT 41 | LONG = 3; //8 42 | DATE = 4; //12 43 | RAW = 5; //23 44 | LONG_RAW = 6; //24 45 | CHAR = 7; //96, NCHAR 46 | BINARY_FLOAT = 8; //100 47 | BINARY_DOUBLE = 9; //101 48 | CLOB = 10; //112, NCLOB 49 | BLOB = 11; //113 50 | TIMESTAMP = 12; //180 51 | TIMESTAMP_WITH_TZ = 13; //181 52 | INTERVAL_YEAR_TO_MONTH = 14; //182 53 | INTERVAL_DAY_TO_SECOND = 15; //183 54 | UROWID = 16; //208 55 | TIMESTAMP_WITH_LOCAL_TZ = 17; //231 56 | } 57 | 58 | service OpenLogReplicator { 59 | rpc Redo(stream RedoRequest) returns (stream RedoResponse); 60 | } 61 | 62 | enum RequestCode { 63 | INFO = 0; 64 | START = 1; 65 | CONTINUE = 2; 66 | CONFIRM = 3; 67 | } 68 | 69 | enum ResponseCode { 70 | READY = 0; 71 | FAILED_START = 1; 72 | STARTING = 2; 73 | ALREADY_STARTED = 3; 74 | REPLICATE = 4; 75 | PAYLOAD = 5; 76 | INVALID_DATABASE = 6; 77 | INVALID_COMMAND = 7; 78 | } 79 | 80 | message Value { 81 | string name = 1; 82 | oneof datum { 83 | int64 value_int = 2; 84 | float value_float = 3; 85 | double value_double = 4; 86 | string value_string = 5; 87 | bytes value_bytes = 6; 88 | } 89 | } 90 | 91 | message Column { 92 | string name = 1; 93 | ColumnType type = 2; 94 | int32 length = 3; 95 | int32 precision = 4; 96 | int32 scale = 5; 97 | bool nullable = 6; 98 | } 99 | 100 | message Schema { 101 | string owner = 1; 102 | string name = 2; 103 | uint32 obj = 3; 104 | oneof tm_val { 105 | uint64 tm = 4; 106 | string tms = 5; 107 | } 108 | repeated Column column = 6; 109 | } 110 | 111 | message Payload { 112 | Op op = 1; 113 | Schema schema = 2; 114 | string rid = 3; 115 | repeated Value before = 4; 116 | repeated Value after = 5; 117 | string ddl = 6; 118 | uint32 seq = 7; 119 | uint64 offset = 8; 120 | bool redo = 9; 121 | uint64 num = 10; 122 | } 123 | 124 | message SchemaRequest { 125 | string mask = 1; 126 | string filter = 2; 127 | } 128 | 129 | message RedoRequest { 130 | RequestCode code = 1; 131 | string database_name = 2; 132 | oneof tm_val { 133 | uint64 scn = 3; 134 | string tms = 4; 135 | int64 tm_rel = 5; 136 | } 137 | uint64 seq = 6; 138 | repeated SchemaRequest schema = 7; 139 | uint64 c_scn = 8; 140 | uint64 c_idx = 9; 141 | } 142 | 143 | message RedoResponse { 144 | ResponseCode code = 1; 145 | oneof scn_val { 146 | uint64 scn = 2; 147 | string scns = 3; 148 | } 149 | oneof tm_val { 150 | uint64 tm = 4; 151 | string tms = 5; 152 | } 153 | oneof xid_val { 154 | string xid = 6; 155 | uint64 xidn = 7; 156 | } 157 | string db = 8; 158 | repeated Payload payload = 9; 159 | uint64 c_scn = 10; 160 | uint64 c_idx = 11; 161 | map attributes = 12; 162 | } 163 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser16/Makefile: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------- 2 | # 3 | # Makefile-- 4 | # Makefile for src/backend/oracle_parser 5 | # 6 | # Copyright (c) 2023, Ivory SQL Global Development Team 7 | # IDENTIFICATION 8 | # src/backend/oracle_parser/Makefile 9 | # 10 | # add the file for requirement "SQL PARSER" 11 | # 12 | #------------------------------------------------------------------------- 13 | #subdir = contrib/oracle_parser 14 | top_builddir = ../../../../../../ 15 | include $(top_builddir)/src/Makefile.global 16 | 17 | PGFILEDESC = "liboracle_parser - raw parser for oracle" 18 | NAME = liboracle_parser 19 | 20 | # include our headers first 21 | override CPPFLAGS := -I./include -I$(srcdir) $(CPPFLAGS) 22 | rpath = 23 | 24 | OBJS = ora_keywords.o ora_gram.o ora_scan.o liboracle_parser.o $(WIN32RES) 25 | 26 | # where to find ora_gen_keywordlist.pl and subsidiary files 27 | TOOLSDIR = $(top_srcdir)/src/tools 28 | GEN_KEYWORDLIST = $(PERL) -I $(TOOLSDIR) $(TOOLSDIR)/ora_gen_keywordlist.pl 29 | GEN_KEYWORDLIST_DEPS = $(TOOLSDIR)/ora_gen_keywordlist.pl $(TOOLSDIR)/PerfectHash.pm 30 | 31 | all: all-shared-lib 32 | 33 | include $(top_builddir)/src/Makefile.shlib 34 | 35 | install: all installdirs install-lib 36 | 37 | installdirs: installdirs-lib 38 | 39 | uninstall: uninstall-lib 40 | 41 | 42 | # See notes in src/backend/oracle_parser/Makefile about the following two rules 43 | ora_gram.h: ora_gram.c 44 | touch $@ 45 | 46 | ora_gram.c: BISONFLAGS += -d 47 | ora_gram.c: BISON_CHECK_CMD = $(PERL) $(srcdir)/check_keywords.pl $< $(srcdir)/include/oracle_parser/ora_kwlist.h 48 | 49 | ora_scan.c: FLEXFLAGS = -CF -p -p 50 | ora_scan.c: FLEX_NO_BACKUP=yes 51 | ora_scan.c: FLEX_FIX_WARNING=yes 52 | 53 | # Force these dependencies to be known even without dependency info built: 54 | ora_scan.o ora_gram.o ora_keywords.o liboracle_parser.o: ora_gram.h 55 | 56 | # generate SQL keyword lookup table to be included into ora_keywords*.o. 57 | #ora_kwlist_d.h: $(top_srcdir)/src/include/oracle_parser/ora_kwlist.h $(GEN_KEYWORDLIST_DEPS) 58 | # $(GEN_KEYWORDLIST) --extern $< 59 | 60 | # Dependencies of ora_keywords*.o need to be managed explicitly to make sure 61 | # that you don't get broken parsing code, even in a non-enable-depend build. 62 | ora_keywords.o : ora_kwlist_d.h 63 | 64 | 65 | distprep: ora_scan.c ora_gram.c ora_kwlist_d.h 66 | 67 | # ora_gram.c, ora_gram.h, and ora_scan.c are in the distribution tarball, so they 68 | # are not clean here. 69 | clean distclean: clean-lib 70 | rm -f lex.backup $(OBJS) 71 | 72 | maintainer-clean: clean 73 | rm -f ora_scan.c ora_gram.c ora_kwlist_d.h 74 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser16/check_keywords.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | 3 | # Check that the keyword lists in gram.y and kwlist.h are sane. 4 | # Usage: check_keywords.pl gram.y kwlist.h 5 | 6 | # src/backend/parser/check_keywords.pl 7 | # Copyright (c) 2009-2024, PostgreSQL Global Development Group 8 | # Portions Copyright (c) 2023-2025, IvorySQL Global Development Team 9 | 10 | use strict; 11 | use warnings FATAL => 'all'; 12 | 13 | my $gram_filename = $ARGV[0]; 14 | my $kwlist_filename = $ARGV[1]; 15 | 16 | my $errors = 0; 17 | 18 | sub error 19 | { 20 | print STDERR @_; 21 | $errors = 1; 22 | return; 23 | } 24 | 25 | # Check alphabetical order of a set of keyword symbols 26 | # (note these are NOT the actual keyword strings) 27 | sub check_alphabetical_order 28 | { 29 | my ($listname, $list) = @_; 30 | my $prevkword = ''; 31 | 32 | foreach my $kword (@$list) 33 | { 34 | # Some symbols have a _P suffix. Remove it for the comparison. 35 | my $bare_kword = $kword; 36 | $bare_kword =~ s/_P$//; 37 | if ($bare_kword le $prevkword) 38 | { 39 | error 40 | "'$bare_kword' after '$prevkword' in $listname list is misplaced"; 41 | } 42 | $prevkword = $bare_kword; 43 | } 44 | return; 45 | } 46 | 47 | $, = ' '; # set output field separator 48 | $\ = "\n"; # set output record separator 49 | 50 | my %keyword_categories; 51 | $keyword_categories{'unreserved_keyword'} = 'UNRESERVED_KEYWORD'; 52 | $keyword_categories{'col_name_keyword'} = 'COL_NAME_KEYWORD'; 53 | $keyword_categories{'type_func_name_keyword'} = 'TYPE_FUNC_NAME_KEYWORD'; 54 | $keyword_categories{'reserved_keyword'} = 'RESERVED_KEYWORD'; 55 | 56 | open(my $gram, '<', $gram_filename) || die("Could not open : $gram_filename"); 57 | 58 | my $kcat; 59 | my $in_bare_labels; 60 | my $comment; 61 | my @arr; 62 | my %keywords; 63 | my @bare_label_keywords; 64 | 65 | line: while (my $S = <$gram>) 66 | { 67 | chomp $S; # strip record separator 68 | 69 | my $s; 70 | 71 | # Make sure any braces are split 72 | $s = '{', $S =~ s/$s/ { /g; 73 | $s = '}', $S =~ s/$s/ } /g; 74 | 75 | # Any comments are split 76 | $s = '[/][*]', $S =~ s#$s# /* #g; 77 | $s = '[*][/]', $S =~ s#$s# */ #g; 78 | 79 | if (!($kcat) && !($in_bare_labels)) 80 | { 81 | 82 | # Is this the beginning of a keyword list? 83 | foreach my $k (keys %keyword_categories) 84 | { 85 | if ($S =~ m/^($k):/) 86 | { 87 | $kcat = $k; 88 | next line; 89 | } 90 | } 91 | 92 | # Is this the beginning of the bare_label_keyword list? 93 | $in_bare_labels = 1 if ($S =~ m/^bare_label_keyword:/); 94 | 95 | next line; 96 | } 97 | 98 | # Now split the line into individual fields 99 | my $n = (@arr = split(' ', $S)); 100 | 101 | # Ok, we're in a keyword list. Go through each field in turn 102 | for (my $fieldIndexer = 0; $fieldIndexer < $n; $fieldIndexer++) 103 | { 104 | if ($arr[$fieldIndexer] eq '*/' && $comment) 105 | { 106 | $comment = 0; 107 | next; 108 | } 109 | elsif ($comment) 110 | { 111 | next; 112 | } 113 | elsif ($arr[$fieldIndexer] eq '/*') 114 | { 115 | 116 | # start of a multiline comment 117 | $comment = 1; 118 | next; 119 | } 120 | elsif ($arr[$fieldIndexer] eq '//') 121 | { 122 | next line; 123 | } 124 | 125 | if ($arr[$fieldIndexer] eq ';') 126 | { 127 | 128 | # end of keyword list 129 | undef $kcat; 130 | undef $in_bare_labels; 131 | next; 132 | } 133 | 134 | if ($arr[$fieldIndexer] eq '|') 135 | { 136 | next; 137 | } 138 | 139 | # Put this keyword into the right list 140 | if ($in_bare_labels) 141 | { 142 | push @bare_label_keywords, $arr[$fieldIndexer]; 143 | } 144 | else 145 | { 146 | push @{ $keywords{$kcat} }, $arr[$fieldIndexer]; 147 | } 148 | } 149 | } 150 | close $gram; 151 | 152 | # Check that each keyword list is in alphabetical order (just for neatnik-ism) 153 | check_alphabetical_order($_, $keywords{$_}) for (keys %keyword_categories); 154 | check_alphabetical_order('bare_label_keyword', \@bare_label_keywords); 155 | 156 | # Transform the keyword lists into hashes. 157 | # kwhashes is a hash of hashes, keyed by keyword category id, 158 | # e.g. UNRESERVED_KEYWORD. 159 | # Each inner hash is keyed by keyword id, e.g. ABORT_P, with a dummy value. 160 | my %kwhashes; 161 | while (my ($kcat, $kcat_id) = each(%keyword_categories)) 162 | { 163 | @arr = @{ $keywords{$kcat} }; 164 | 165 | my $hash; 166 | foreach my $item (@arr) { $hash->{$item} = 1; } 167 | 168 | $kwhashes{$kcat_id} = $hash; 169 | } 170 | my %bare_label_keywords = map { $_ => 1 } @bare_label_keywords; 171 | 172 | # Now read in kwlist.h 173 | 174 | open(my $kwlist, '<', $kwlist_filename) 175 | || die("Could not open : $kwlist_filename"); 176 | 177 | my $prevkwstring = ''; 178 | my $bare_kwname; 179 | my %kwhash; 180 | kwlist_line: while (<$kwlist>) 181 | { 182 | my ($line) = $_; 183 | 184 | if ($line =~ /^PG_KEYWORD\(\"(.*)\", (.*), (.*), (.*)\)/) 185 | { 186 | my ($kwstring) = $1; 187 | my ($kwname) = $2; 188 | my ($kwcat_id) = $3; 189 | my ($collabel) = $4; 190 | 191 | # Check that the list is in alphabetical order (critical!) 192 | if ($kwstring le $prevkwstring) 193 | { 194 | error 195 | "'$kwstring' after '$prevkwstring' in kwlist.h is misplaced"; 196 | } 197 | $prevkwstring = $kwstring; 198 | 199 | # Check that the keyword string is valid: all lower-case ASCII chars 200 | if ($kwstring !~ /^[a-z_]+$/ and $kwstring ne "varchar2" and $kwstring ne "nvl2") 201 | { 202 | error 203 | "'$kwstring' is not a valid keyword string, must be all lower-case ASCII chars"; 204 | } 205 | 206 | # Check that the keyword name is valid: all upper-case ASCII chars 207 | if ($kwname !~ /^[A-Z_]+$/ and $kwname ne "VARCHAR2" and $kwname ne "NVL2") 208 | { 209 | error 210 | "'$kwname' is not a valid keyword name, must be all upper-case ASCII chars"; 211 | } 212 | 213 | # Check that the keyword string matches keyword name 214 | $bare_kwname = $kwname; 215 | $bare_kwname =~ s/_P$//; 216 | if ($bare_kwname ne uc($kwstring)) 217 | { 218 | error 219 | "keyword name '$kwname' doesn't match keyword string '$kwstring'"; 220 | } 221 | 222 | # Check that the keyword is present in the right category list 223 | %kwhash = %{ $kwhashes{$kwcat_id} }; 224 | 225 | if (!(%kwhash)) 226 | { 227 | error "Unknown keyword category: $kwcat_id"; 228 | } 229 | else 230 | { 231 | if (!($kwhash{$kwname})) 232 | { 233 | error "'$kwname' not present in $kwcat_id section of gram.y"; 234 | } 235 | else 236 | { 237 | 238 | # Remove it from the hash, so that we can 239 | # complain at the end if there's keywords left 240 | # that were not found in kwlist.h 241 | delete $kwhashes{$kwcat_id}->{$kwname}; 242 | } 243 | } 244 | 245 | # Check that the keyword's collabel property matches gram.y 246 | if ($collabel eq 'BARE_LABEL') 247 | { 248 | unless ($bare_label_keywords{$kwname}) 249 | { 250 | error 251 | "'$kwname' is marked as BARE_LABEL in kwlist.h, but it is missing from gram.y's bare_label_keyword rule"; 252 | } 253 | } 254 | elsif ($collabel eq 'AS_LABEL') 255 | { 256 | if ($bare_label_keywords{$kwname}) 257 | { 258 | error 259 | "'$kwname' is marked as AS_LABEL in kwlist.h, but it is listed in gram.y's bare_label_keyword rule"; 260 | } 261 | } 262 | else 263 | { 264 | error 265 | "'$collabel' not recognized in kwlist.h. Expected either 'BARE_LABEL' or 'AS_LABEL'"; 266 | } 267 | } 268 | } 269 | close $kwlist; 270 | 271 | # Check that we've paired up all keywords from gram.y with lines in kwlist.h 272 | while (my ($kwcat, $kwcat_id) = each(%keyword_categories)) 273 | { 274 | %kwhash = %{ $kwhashes{$kwcat_id} }; 275 | 276 | for my $kw (keys %kwhash) 277 | { 278 | error "'$kw' found in gram.y $kwcat category, but not in kwlist.h"; 279 | } 280 | } 281 | 282 | exit $errors; 283 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser16/include/catalog/pg_attribute_d.h: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------- 2 | * 3 | * pg_attribute_d.h 4 | * Macro definitions for pg_attribute 5 | * 6 | * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group 7 | * Portions Copyright (c) 1994, Regents of the University of California 8 | * 9 | * NOTES 10 | * ****************************** 11 | * *** DO NOT EDIT THIS FILE! *** 12 | * ****************************** 13 | * 14 | * It has been GENERATED by src/backend/catalog/genbki.pl 15 | * 16 | *------------------------------------------------------------------------- 17 | */ 18 | #ifndef PG_ATTRIBUTE_D_H 19 | #define PG_ATTRIBUTE_D_H 20 | 21 | #define AttributeRelationId 1249 22 | #define AttributeRelation_Rowtype_Id 75 23 | #define AttributeRelidNameIndexId 2658 24 | #define AttributeRelidNumIndexId 2659 25 | 26 | #define Anum_pg_attribute_attrelid 1 27 | #define Anum_pg_attribute_attname 2 28 | #define Anum_pg_attribute_atttypid 3 29 | #define Anum_pg_attribute_attlen 4 30 | #define Anum_pg_attribute_attnum 5 31 | #define Anum_pg_attribute_attcacheoff 6 32 | #define Anum_pg_attribute_atttypmod 7 33 | #define Anum_pg_attribute_attndims 8 34 | #define Anum_pg_attribute_attbyval 9 35 | #define Anum_pg_attribute_attalign 10 36 | #define Anum_pg_attribute_attstorage 11 37 | #define Anum_pg_attribute_attcompression 12 38 | #define Anum_pg_attribute_attnotnull 13 39 | #define Anum_pg_attribute_atthasdef 14 40 | #define Anum_pg_attribute_atthasmissing 15 41 | #define Anum_pg_attribute_attidentity 16 42 | #define Anum_pg_attribute_attgenerated 17 43 | #define Anum_pg_attribute_attisdropped 18 44 | #define Anum_pg_attribute_attislocal 19 45 | #define Anum_pg_attribute_attinhcount 20 46 | #define Anum_pg_attribute_attstattarget 21 47 | #define Anum_pg_attribute_attcollation 22 48 | #define Anum_pg_attribute_attacl 23 49 | #define Anum_pg_attribute_attoptions 24 50 | #define Anum_pg_attribute_attfdwoptions 25 51 | #define Anum_pg_attribute_attmissingval 26 52 | 53 | #define Natts_pg_attribute 26 54 | 55 | 56 | #define ATTRIBUTE_IDENTITY_ALWAYS 'a' 57 | #define ATTRIBUTE_IDENTITY_BY_DEFAULT 'd' 58 | 59 | #define ATTRIBUTE_GENERATED_STORED 's' 60 | 61 | /* for compatibel oracle identity column */ 62 | /* n=self increasing columns can be inserted and NULL */ 63 | #define ATTRIBUTE_IDENTITY_DEFAULT_ON_NULL 'n' 64 | /* i=self increasing columns cannot be inserted and can only be generated */ 65 | #define ATTRIBUTE_ORA_IDENTITY_ALWAYS 'i' 66 | /* o=self increasing columns can be inserted but cannot be NULL */ 67 | #define ATTRIBUTE_ORA_IDENTITY_BY_DEFAULT 'o' 68 | 69 | 70 | #endif /* PG_ATTRIBUTE_D_H */ 71 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser16/include/nodes/nodetags_ext.h: -------------------------------------------------------------------------------- 1 | #ifndef NODETAGS_EXT_H 2 | #define NODETAGS_EXT_H 3 | 4 | /* 5 | * Custom NodeTag values for IvorySQL or Oracle parser 6 | * Make sure these start AFTER the last vanilla PostgreSQL NodeTag 7 | */ 8 | #define T_OraNodetagBegin 600 9 | 10 | #define T_AccessibleByClause (T_OraNodetagBegin + 1) 11 | #define T_AccessorItem (T_OraNodetagBegin + 2) 12 | #define T_CompileFunctionStmt (T_OraNodetagBegin + 3) 13 | #define T_CreatePackageStmt (T_OraNodetagBegin + 4) 14 | #define T_CreatePackageBodyStmt (T_OraNodetagBegin + 5) 15 | #define T_AlterPackageStmt (T_OraNodetagBegin + 6) 16 | #define T_ColumnRefOrFuncCall (T_OraNodetagBegin + 7) 17 | #define T_BFloat (T_OraNodetagBegin + 8) 18 | #define T_BDouble (T_OraNodetagBegin + 9) 19 | 20 | #endif /* NODETAGS_EXT_H */ 21 | 22 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser16/include/oracle_parser/ora_keywords.h: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------- 2 | * 3 | * ora_keywords.h 4 | * IvorySQL's list of SQL keywords 5 | * 6 | * 7 | * Portions Copyright (c) 2023, IvorySQL 8 | * 9 | * src/include/oracle_parser/ora_keywords.h 10 | * 11 | *------------------------------------------------------------------------- 12 | */ 13 | #ifndef ORA_KEYWORDS_H 14 | #define ORA_KEYWORDS_H 15 | 16 | #include "common/kwlookup.h" 17 | 18 | /* Keyword categories --- should match lists in gram.y */ 19 | #define UNRESERVED_KEYWORD 0 20 | #define COL_NAME_KEYWORD 1 21 | #define TYPE_FUNC_NAME_KEYWORD 2 22 | #define RESERVED_KEYWORD 3 23 | 24 | extern const ScanKeywordList OraScanKeywords; 25 | extern const uint8 OraScanKeywordCategories[]; 26 | extern const bool OraScanKeywordBareLabel[]; 27 | 28 | 29 | #endif /* ORA_KEYWORDS_H */ 30 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser16/include/oracle_parser/ora_parser_hook.h: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------- 2 | * 3 | * ora_parser_hook.h 4 | * Variable Declarations shown as below are used to 5 | * for the Hook function's definitions. 6 | * 7 | * 8 | * Portions Copyright (c) 2023, IvorySQL 9 | * 10 | * src/include/oracle_parser/ora_parser_hook.h 11 | * 12 | * add the file for requirement "SQL PARSER" 13 | * 14 | *------------------------------------------------------------------------- 15 | */ 16 | 17 | #ifndef ORA_PARSER_HOOK_H 18 | #define ORA_PARSER_HOOK_H 19 | 20 | /* Hook for plugins to get control in get_keywords() */ 21 | typedef Datum (*get_keywords_hook_type)(PG_FUNCTION_ARGS); 22 | extern PGDLLIMPORT get_keywords_hook_type get_keywords_hook; 23 | 24 | /* Hook for plugins to get control in fill_in_constant_lengths() */ 25 | typedef void (*fill_in_constant_lengths_hook_type)(void *jstate, const char *query, int query_loc); 26 | extern PGDLLIMPORT fill_in_constant_lengths_hook_type fill_in_constant_lengths_hook; 27 | 28 | //fill_in_constant_lengths_hook_type fill_in_constant_lengths_hook = NULL; 29 | 30 | /* Hook for plugins to get control in quote_identifier() */ 31 | typedef const char *(*quote_identifier_hook_type)(const char *ident); 32 | extern PGDLLIMPORT quote_identifier_hook_type quote_identifier_hook; 33 | 34 | #endif /* ORA_PARSER_HOOK_H */ 35 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser16/include/oracle_parser/ora_scanner.h: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------- 2 | * 3 | * ora_scanner.h 4 | * API for the core scanner (flex machine) 5 | * 6 | * The core scanner is also used by PL/SQL, so we provide a public API 7 | * for it. However, the rest of the backend is only expected to use the 8 | * higher-level API provided by parser.h. 9 | * 10 | * 11 | * Portions Copyright (c) 2023, IvorySQL 12 | * 13 | * src/include/oracle-parser/ora_scanner.h 14 | * 15 | *------------------------------------------------------------------------- 16 | */ 17 | 18 | #ifndef ORA_SCANNER_H 19 | #define ORA_SCANNER_H 20 | 21 | #include "common/keywords.h" 22 | 23 | /* 24 | * The scanner returns extra data about scanned tokens in this union type. 25 | * Note that this is a subset of the fields used in YYSTYPE of the bison 26 | * parsers built atop the scanner. 27 | */ 28 | typedef union ora_core_YYSTYPE 29 | { 30 | int ival; /* for integer literals */ 31 | char *str; /* for identifiers and non-integer literals */ 32 | const char *keyword; /* canonical spelling of keywords */ 33 | } ora_core_YYSTYPE; 34 | 35 | /* 36 | * We track token locations in terms of byte offsets from the start of the 37 | * source string, not the column number/line number representation that 38 | * bison uses by default. Also, to minimize overhead we track only one 39 | * location (usually the first token location) for each construct, not 40 | * the beginning and ending locations as bison does by default. It's 41 | * therefore sufficient to make YYLTYPE an int. 42 | */ 43 | #define YYLTYPE int 44 | 45 | /* 46 | * Another important component of the scanner's API is the token code numbers. 47 | * However, those are not defined in this file, because bison insists on 48 | * defining them for itself. The token codes used by the core scanner are 49 | * the ASCII characters plus these: 50 | * %token IDENT UIDENT FCONST SCONST USCONST BCONST XCONST Op 51 | * %token ICONST PARAM 52 | * %token TYPECAST DOT_DOT COLON_EQUALS EQUALS_GREATER 53 | * %token LESS_EQUALS GREATER_EQUALS NOT_EQUALS 54 | * The above token definitions *must* be the first ones declared in any 55 | * bison parser built atop this scanner, so that they will have consistent 56 | * numbers assigned to them (specifically, IDENT = 258 and so on). 57 | */ 58 | 59 | /* 60 | * The YY_EXTRA data that a flex scanner allows us to pass around. 61 | * Private state needed by the core scanner goes here. Note that the actual 62 | * yy_extra struct may be larger and have this as its first component, thus 63 | * allowing the calling parser to keep some fields of its own in YY_EXTRA. 64 | */ 65 | typedef struct ora_core_yy_extra_type 66 | { 67 | /* 68 | * The string the scanner is physically scanning. We keep this mainly so 69 | * that we can cheaply compute the offset of the current token (yytext). 70 | */ 71 | char *scanbuf; 72 | Size scanbuflen; 73 | 74 | /* 75 | * The keyword list to use, and the associated grammar token codes. 76 | */ 77 | const ScanKeywordList *keywordlist; 78 | const uint16 *keyword_tokens; 79 | 80 | /* 81 | * Scanner settings to use. These are initialized from the corresponding 82 | * GUC variables by scanner_init(). Callers can modify them after 83 | * scanner_init() if they don't want the scanner's behavior to follow the 84 | * prevailing GUC settings. 85 | */ 86 | int backslash_quote; 87 | bool escape_string_warning; 88 | bool standard_conforming_strings; 89 | 90 | /* 91 | * literalbuf is used to accumulate literal values when multiple rules are 92 | * needed to parse a single literal. Call startlit() to reset buffer to 93 | * empty, addlit() to add text. NOTE: the string in literalbuf is NOT 94 | * necessarily null-terminated, but there always IS room to add a trailing 95 | * null at offset literallen. We store a null only when we need it. 96 | */ 97 | char *literalbuf; /* palloc'd expandable buffer */ 98 | int literallen; /* actual current string length */ 99 | int literalalloc; /* current allocated buffer size */ 100 | 101 | /* 102 | * Random assorted scanner state. 103 | */ 104 | int state_before_str_stop; /* start cond. before end quote */ 105 | int xcdepth; /* depth of nesting in slash-star comments */ 106 | char *dolqstart; /* current $foo$ quote start string */ 107 | YYLTYPE save_yylloc; /* one-element stack for PUSH_YYLLOC() */ 108 | 109 | /* first part of UTF16 surrogate pair for Unicode escapes */ 110 | int32 utf16_first_part; 111 | 112 | /* state variables for literal-lexing warnings */ 113 | bool warn_on_first_escape; 114 | bool saw_non_ascii; 115 | } ora_core_yy_extra_type; 116 | 117 | /* 118 | * The type of yyscanner is opaque outside scan.l. 119 | */ 120 | typedef void *ora_core_yyscan_t; 121 | 122 | /* Support for scanner_errposition_callback function */ 123 | typedef struct OraScannerCallbackState 124 | { 125 | ora_core_yyscan_t yyscanner; 126 | int location; 127 | ErrorContextCallback errcallback; 128 | } OraScannerCallbackState; 129 | 130 | 131 | /* Constant data exported from oracle_parser/ora_scan.l */ 132 | extern PGDLLIMPORT const uint16 OraScanKeywordTokens[]; 133 | 134 | /* Entry points in oracle_parser/ora_scan.l */ 135 | extern ora_core_yyscan_t ora_scanner_init(const char *str, 136 | ora_core_yy_extra_type *yyext, 137 | const ScanKeywordList *keywordlist, 138 | const uint16 *keyword_tokens); 139 | extern void ora_scanner_finish(ora_core_yyscan_t yyscanner); 140 | extern int ora_core_yylex(ora_core_YYSTYPE *yylval_param, YYLTYPE *yylloc_param, 141 | ora_core_yyscan_t yyscanner); 142 | extern int ora_scanner_errposition(int location, ora_core_yyscan_t yyscanner); 143 | extern void ora_setup_scanner_errposition_callback(OraScannerCallbackState *scbstate, 144 | ora_core_yyscan_t yyscanner, 145 | int location); 146 | extern void ora_cancel_scanner_errposition_callback(OraScannerCallbackState *scbstate); 147 | extern void ora_scanner_yyerror(const char *message, ora_core_yyscan_t yyscanner) pg_attribute_noreturn(); 148 | 149 | #endif /* SCANNER_H */ 150 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser16/include/parser/parser.h: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------- 2 | * 3 | * parser.h 4 | * Definitions for the "raw" parser (flex and bison phases only) 5 | * 6 | * This is the external API for the raw lexing/parsing functions. 7 | * 8 | * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group 9 | * Portions Copyright (c) 1994, Regents of the University of California 10 | * 11 | * src/include/parser/parser.h 12 | * 13 | *------------------------------------------------------------------------- 14 | */ 15 | #ifndef PARSER_H 16 | #define PARSER_H 17 | 18 | #include "nodes/parsenodes.h" 19 | 20 | 21 | /* 22 | * RawParseMode determines the form of the string that raw_parser() accepts: 23 | * 24 | * RAW_PARSE_DEFAULT: parse a semicolon-separated list of SQL commands, 25 | * and return a List of RawStmt nodes. 26 | * 27 | * RAW_PARSE_TYPE_NAME: parse a type name, and return a one-element List 28 | * containing a TypeName node. 29 | * 30 | * RAW_PARSE_PLPGSQL_EXPR: parse a PL/pgSQL expression, and return 31 | * a one-element List containing a RawStmt node. 32 | * 33 | * RAW_PARSE_PLPGSQL_ASSIGNn: parse a PL/pgSQL assignment statement, 34 | * and return a one-element List containing a RawStmt node. "n" 35 | * gives the number of dotted names comprising the target ColumnRef. 36 | */ 37 | typedef enum 38 | { 39 | RAW_PARSE_DEFAULT = 0, 40 | RAW_PARSE_TYPE_NAME, 41 | RAW_PARSE_PLPGSQL_EXPR, 42 | RAW_PARSE_PLPGSQL_ASSIGN1, 43 | RAW_PARSE_PLPGSQL_ASSIGN2, 44 | RAW_PARSE_PLPGSQL_ASSIGN3 45 | } RawParseMode; 46 | 47 | /* Values for the backslash_quote GUC */ 48 | typedef enum 49 | { 50 | BACKSLASH_QUOTE_OFF, 51 | BACKSLASH_QUOTE_ON, 52 | BACKSLASH_QUOTE_SAFE_ENCODING 53 | } BackslashQuoteType; 54 | 55 | /* GUC variables in scan.l (every one of these is a bad idea :-() */ 56 | extern PGDLLIMPORT int backslash_quote; 57 | extern PGDLLIMPORT bool escape_string_warning; 58 | extern PGDLLIMPORT bool standard_conforming_strings; 59 | 60 | /* Hook for plugins to get control in raw_parser() */ 61 | typedef List *(*raw_parser_hook_type) (const char *str, RawParseMode mode); 62 | extern PGDLLIMPORT raw_parser_hook_type sql_raw_parser; 63 | extern PGDLLIMPORT raw_parser_hook_type ora_raw_parser; 64 | 65 | /* Primary entry point for the raw parsing functions */ 66 | extern List *raw_parser(const char *str, RawParseMode mode); 67 | 68 | extern List *standard_raw_parser(const char *str, RawParseMode mode); 69 | 70 | /* Utility functions exported by gram.y (perhaps these should be elsewhere) */ 71 | extern List *SystemFuncName(char *name); 72 | extern TypeName *SystemTypeName(char *name); 73 | extern List *OracleSystemFuncName(char *name); 74 | extern TypeName *OracleSystemTypeName(char *name); 75 | 76 | 77 | #endif /* PARSER_H */ 78 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser16/include/utils/ora_compatible.h: -------------------------------------------------------------------------------- 1 | /*-------------------------------------------------------------------- 2 | * 3 | * ora_compatible.h 4 | * 5 | * Definition enumeration structure is fro supporting different compatibility modes. 6 | * 7 | * Portions Copyright (c) 2023, IvorySQL 8 | * 9 | * src/include/utils/ora_compatible.h 10 | * 11 | * add the file for requirement "SQL PARSER" 12 | * 13 | *---------------------------------------------------------------------- 14 | */ 15 | 16 | #ifndef ORA_COMPATIBLE_H 17 | #define ORA_COMPATIBLE_H 18 | 19 | #define ORA_SEARCH_PATH "sys,\"$user\", public" 20 | #define DB_MODE_PARMATER "ivorysql.database_mode" 21 | 22 | #define CHAR_TYPE_LENGTH_MAX 2000 23 | 24 | typedef enum DBMode 25 | { 26 | DB_PG = 0, 27 | DB_ORACLE 28 | }DBMode; 29 | 30 | typedef enum DBParser 31 | { 32 | PG_PARSER = 0, 33 | ORA_PARSER 34 | }DBParser; 35 | 36 | typedef enum CaseSwitchMode 37 | { 38 | NORMAL = 0, 39 | INTERCHANGE, 40 | LOWERCASE 41 | }CaseSwitchMode; 42 | 43 | typedef enum 44 | { 45 | NLS_LENGTH_BYTE, 46 | NLS_LENGTH_CHAR 47 | } NlsLengthSemantics; 48 | 49 | /* oracle parser static parameters */ 50 | extern int compatible_db; 51 | extern int nls_length_semantics; 52 | extern bool identifier_case_from_pg_dump; 53 | extern bool enable_case_switch; 54 | extern bool enable_emptystring_to_NULL; 55 | extern int identifier_case_switch; 56 | 57 | char *identifier_case_transform(const char *ident, int len); 58 | char *downcase_identifier(const char *ident, int len, bool warn, bool truncate); 59 | char *upcase_identifier(const char *ident, int len, bool warn, bool truncate); 60 | void truncate_identifier(char *ident, int len, bool warn); 61 | bool is_all_upper(const char *src, int len); 62 | 63 | #endif /* ORA_COMPATIBLE_H */ 64 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser16/meson.build: -------------------------------------------------------------------------------- 1 | # Portions Copyright (c) 2023, Ivory SQL Global Development Team 2 | 3 | ora_parser_sources = files( 4 | 'liboracle_parser.c', 5 | 'ora_keywords.c', 6 | ) 7 | 8 | ora_parser_kwlist = custom_target('ora_kwlist', 9 | input: files('../../include/oracle_parser/ora_kwlist.h'), 10 | output: 'ora_kwlist_d.h', 11 | command: [perl, '-I', '@SOURCE_ROOT@/src/tools', files('../../tools/ora_gen_keywordlist.pl'), 12 | '--extern', '--output', '@OUTDIR@', '@INPUT@']) 13 | generated_sources += ora_parser_kwlist 14 | ora_parser_sources += ora_parser_kwlist 15 | 16 | 17 | ora_backend_scanner = custom_target('ora_scan', 18 | input: 'ora_scan.l', 19 | output: 'ora_scan.c', 20 | command: [flex_cmd, '--no-backup', '--fix-warnings', '--', '-CF', '-p', '-p'], 21 | ) 22 | generated_sources += ora_backend_scanner 23 | ora_parser_sources += ora_backend_scanner 24 | 25 | ora_backend_parser = custom_target('ora_gram', 26 | input: 'ora_gram.y', 27 | kwargs: bison_kw, 28 | ) 29 | generated_sources += ora_backend_parser.to_list() 30 | ora_parser_sources += ora_backend_parser 31 | 32 | 33 | liboracle_parser = shared_module('liboracle_parser', 34 | ora_parser_sources, 35 | c_pch: pch_postgres_h, 36 | include_directories: [postgres_inc, include_directories('.')], 37 | kwargs: pg_mod_args, 38 | ) 39 | backend_targets += liboracle_parser 40 | 41 | ora_parser = static_library('ora_parser', 42 | ora_parser_sources, 43 | c_pch: pch_postgres_h, 44 | include_directories: [postgres_inc, include_directories('.')], 45 | kwargs: pg_mod_args 46 | ) 47 | backend_link_with += ora_parser 48 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser16/ora_gramparse.h: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------- 2 | * 3 | * ora_gramparse.h 4 | * Shared definitions for the "raw" parser (flex and bison phases only) 5 | * 6 | * NOTE: this file is only meant to be included in the core parsing files, 7 | * ie, parser.c, ora_gram.y, ora_scan.l, and src/backend/oracle_parser/ora_keywords.c. 8 | * Definitions that are needed outside the core parser should be in parser.h. 9 | * 10 | * 11 | * Portions Copyright (c) 2023, IvorySQL Global Development Team 12 | * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group 13 | * Portions Copyright (c) 1994, Regents of the University of California 14 | * 15 | * src/include/oracle_parser/ora_gramparse.h 16 | * 17 | * add the file for requirement "SQL PARSER" 18 | * 19 | *------------------------------------------------------------------------- 20 | */ 21 | 22 | #ifndef ORA_GRAMPARSE_H 23 | #define ORA_GRAMPARSE_H 24 | 25 | #include "nodes/parsenodes.h" 26 | #include "oracle_parser/ora_scanner.h" 27 | 28 | /* 29 | * NB: include ora_gram.h only AFTER including ora_scanner.h, because ora_scanner.h 30 | * is what #defines YYLTYPE. 31 | */ 32 | #include "ora_gram.h" 33 | 34 | typedef enum OraBodyStyle 35 | { 36 | OraBody_UNKOWN, 37 | OraBody_FUNC, 38 | OraBody_ANONYMOUS_BLOCK, 39 | OraBody_MAYBE_ANONYMOUS_BLOCK_BEGIN, 40 | OraBody_MAYBE_ANONYMOUS_BLOCK_DECLARE 41 | }OraBodyStyle; 42 | 43 | /* Auxiliary data about a token (other than the token type) */ 44 | typedef struct 45 | { 46 | YYSTYPE lval; /* semantic information */ 47 | YYLTYPE lloc; /* offset in scanbuf */ 48 | int leng; /* length in bytes */ 49 | } TokenAuxData; 50 | 51 | /* Token pushback stack */ 52 | #define MAX_PUSHBACKS 16 53 | 54 | /* 55 | * The YY_EXTRA data that a flex scanner allows us to pass around. Private 56 | * state needed for raw parsing/lexing goes here. 57 | */ 58 | typedef struct ora_base_yy_extra_type 59 | { 60 | /* 61 | * Fields used by the core scanner. 62 | */ 63 | ora_core_yy_extra_type core_yy_extra; 64 | 65 | /* 66 | * State variables for ora_base_yylex(). 67 | */ 68 | bool have_lookahead; /* is lookahead info valid? */ 69 | int lookahead_token; /* one-token lookahead */ 70 | ora_core_YYSTYPE lookahead_yylval; /* yylval for lookahead token */ 71 | YYLTYPE lookahead_yylloc; /* yylloc for lookahead token */ 72 | char *lookahead_end; /* end of current token */ 73 | char lookahead_hold_char; /* to be put back at *lookahead_end */ 74 | 75 | /* 76 | * State variables that belong to the grammar. 77 | */ 78 | List *parsetree; /* final parse result is delivered here */ 79 | 80 | /* 81 | * The native PG only cache one-token info include yylloc, yylval and token 82 | * number in yyextra, IvorySQL cache multiple tokens info using two arrays. 83 | */ 84 | int max_pushbacks; /* the maxsize of cache array */ 85 | int loc_pushback; /* # of used tokens */ 86 | int num_pushbacks; /* # of cached tokens */ 87 | int *pushback_token; /* token number array */ 88 | TokenAuxData *pushback_auxdata; /* auxdata array */ 89 | 90 | OraBodyStyle body_style; 91 | int body_start; 92 | int body_level; 93 | } ora_base_yy_extra_type; 94 | 95 | /* 96 | * In principle we should use yyget_extra() to fetch the yyextra field 97 | * from a yyscanner struct. However, flex always puts that field first, 98 | * and this is sufficiently performance-critical to make it seem worth 99 | * cheating a bit to use an inline macro. 100 | */ 101 | #define pg_yyget_extra(yyscanner) (*((ora_base_yy_extra_type **) (yyscanner))) 102 | 103 | 104 | /* from libparser_oracle.c */ 105 | extern int ora_base_yylex(YYSTYPE *lvalp, YYLTYPE *llocp, 106 | ora_core_yyscan_t yyscanner); 107 | 108 | extern void set_oracle_plsql_body(ora_core_yyscan_t yyscanner, OraBodyStyle body_style); 109 | extern void set_oracle_plsql_bodystart(ora_core_yyscan_t yyscanner, int body_start, int body_level); 110 | 111 | /* from ora_gram.y */ 112 | extern void ora_parser_init(ora_base_yy_extra_type *yyext); 113 | extern int ora_base_yyparse(ora_core_yyscan_t yyscanner); 114 | 115 | #endif /* ORA_GRAMPARSE_H */ 116 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser16/ora_keywords.c: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------- 2 | * 3 | * ora_keywords.c 4 | * IvorySQL's list of SQL keywords (Oracle Compatible) 5 | * 6 | * 7 | * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group 8 | * Portions Copyright (c) 1994, Regents of the University of California 9 | * 10 | * 11 | * IDENTIFICATION 12 | * src/backend/oracle_parser/ora_keywords.c 13 | * 14 | * add the file for requirement "SQL PARSER" 15 | * 16 | *------------------------------------------------------------------------- 17 | */ 18 | #include "c.h" 19 | 20 | #include "oracle_parser/ora_keywords.h" 21 | 22 | 23 | /* ScanKeywordList lookup data for SQL keywords */ 24 | 25 | #include "ora_kwlist_d.h" 26 | 27 | /* Keyword categories for SQL keywords */ 28 | 29 | #define PG_KEYWORD(kwname, value, category, collabel) category, 30 | 31 | const uint8 OraScanKeywordCategories[ORASCANKEYWORDS_NUM_KEYWORDS] = { 32 | #include "oracle_parser/ora_kwlist.h" 33 | }; 34 | 35 | #undef PG_KEYWORD 36 | 37 | /* Keyword can-be-bare-label flags for SQL keywords */ 38 | 39 | #define PG_KEYWORD(kwname, value, category, collabel) collabel, 40 | 41 | #define BARE_LABEL true 42 | #define AS_LABEL false 43 | 44 | const bool OraScanKeywordBareLabel[ORASCANKEYWORDS_NUM_KEYWORDS] = { 45 | #include "oracle_parser/ora_kwlist.h" 46 | }; 47 | 48 | #undef PG_KEYWORD 49 | #undef BARE_LABEL 50 | #undef AS_LABEL 51 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser17/.gitignore: -------------------------------------------------------------------------------- 1 | /ora_gram.h 2 | /ora_gram.c 3 | /ora_scan.c 4 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser17/Makefile: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------- 2 | # Copyright 2025 IvorySQL Global Development Team 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | # Makefile-- 17 | # Makefile for src/backend/oracle_parser 18 | # 19 | # Copyright (c) 2023-2025, IvorySQL Global Development Team 20 | # Portions Copyright (c) 2023-2025, IvorySQL Global Development Team 21 | # 22 | # IDENTIFICATION 23 | # src/backend/oracle_parser/Makefile 24 | # 25 | # add the file for requirement "SQL PARSER" 26 | # 27 | #------------------------------------------------------------------------- 28 | #subdir = contrib/oracle_parser 29 | top_builddir = ../../../../../../ 30 | include $(top_builddir)/src/Makefile.global 31 | 32 | PGFILEDESC = "liboracle_parser - raw parser for oracle" 33 | NAME = liboracle_parser 34 | 35 | # include our headers first 36 | override CPPFLAGS := -I./include -I$(srcdir) $(CPPFLAGS) 37 | rpath = 38 | 39 | OBJS = ora_keywords.o ora_gram.o ora_scan.o liboracle_parser.o $(WIN32RES) 40 | 41 | # where to find ora_gen_keywordlist.pl and subsidiary files 42 | TOOLSDIR = $(top_srcdir)/src/tools 43 | GEN_KEYWORDLIST = $(PERL) -I $(TOOLSDIR) $(TOOLSDIR)/ora_gen_keywordlist.pl 44 | GEN_KEYWORDLIST_DEPS = $(TOOLSDIR)/ora_gen_keywordlist.pl $(TOOLSDIR)/PerfectHash.pm 45 | 46 | all: all-shared-lib 47 | 48 | include $(top_builddir)/src/Makefile.shlib 49 | 50 | install: all installdirs install-lib 51 | 52 | installdirs: installdirs-lib 53 | 54 | uninstall: uninstall-lib 55 | 56 | 57 | # See notes in src/backend/oracle_parser/Makefile about the following two rules 58 | ora_gram.h: ora_gram.c 59 | touch $@ 60 | 61 | ora_gram.c: BISONFLAGS += -d 62 | ora_gram.c: BISON_CHECK_CMD = $(PERL) $(srcdir)/check_keywords.pl $< $(srcdir)/include/oracle_parser/ora_kwlist.h 63 | 64 | ora_scan.c: FLEXFLAGS = -CF -p -p 65 | ora_scan.c: FLEX_NO_BACKUP=yes 66 | ora_scan.c: FLEX_FIX_WARNING=yes 67 | 68 | # Force these dependencies to be known even without dependency info built: 69 | ora_scan.o ora_gram.o ora_keywords.o liboracle_parser.o: ora_gram.h 70 | 71 | # generate SQL keyword lookup table to be included into ora_keywords*.o. 72 | #ora_kwlist_d.h: $(top_srcdir)/src/include/oracle_parser/ora_kwlist.h $(GEN_KEYWORDLIST_DEPS) 73 | # $(GEN_KEYWORDLIST) --extern $< 74 | 75 | # Dependencies of ora_keywords*.o need to be managed explicitly to make sure 76 | # that you don't get broken parsing code, even in a non-enable-depend build. 77 | ora_keywords.o : ora_kwlist_d.h 78 | 79 | 80 | distprep: ora_scan.c ora_gram.c ora_kwlist_d.h 81 | 82 | # ora_gram.c, ora_gram.h, and ora_scan.c are in the distribution tarball, so they 83 | # are not clean here. 84 | clean distclean: clean-lib 85 | rm -f lex.backup $(OBJS) 86 | 87 | maintainer-clean: clean 88 | rm -f ora_scan.c ora_gram.c ora_kwlist_d.h 89 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser17/check_keywords.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | 3 | # Check that the keyword lists in gram.y and kwlist.h are sane. 4 | # Usage: check_keywords.pl gram.y kwlist.h 5 | 6 | # src/backend/parser/check_keywords.pl 7 | # Copyright (c) 2009-2024, PostgreSQL Global Development Group 8 | # Portions Copyright (c) 2023-2025, IvorySQL Global Development Team 9 | 10 | use strict; 11 | use warnings FATAL => 'all'; 12 | 13 | my $gram_filename = $ARGV[0]; 14 | my $kwlist_filename = $ARGV[1]; 15 | 16 | my $errors = 0; 17 | 18 | sub error 19 | { 20 | print STDERR @_; 21 | $errors = 1; 22 | return; 23 | } 24 | 25 | # Check alphabetical order of a set of keyword symbols 26 | # (note these are NOT the actual keyword strings) 27 | sub check_alphabetical_order 28 | { 29 | my ($listname, $list) = @_; 30 | my $prevkword = ''; 31 | 32 | foreach my $kword (@$list) 33 | { 34 | # Some symbols have a _P suffix. Remove it for the comparison. 35 | my $bare_kword = $kword; 36 | $bare_kword =~ s/_P$//; 37 | if ($bare_kword le $prevkword) 38 | { 39 | error 40 | "'$bare_kword' after '$prevkword' in $listname list is misplaced"; 41 | } 42 | $prevkword = $bare_kword; 43 | } 44 | return; 45 | } 46 | 47 | $, = ' '; # set output field separator 48 | $\ = "\n"; # set output record separator 49 | 50 | my %keyword_categories; 51 | $keyword_categories{'unreserved_keyword'} = 'UNRESERVED_KEYWORD'; 52 | $keyword_categories{'col_name_keyword'} = 'COL_NAME_KEYWORD'; 53 | $keyword_categories{'type_func_name_keyword'} = 'TYPE_FUNC_NAME_KEYWORD'; 54 | $keyword_categories{'reserved_keyword'} = 'RESERVED_KEYWORD'; 55 | 56 | open(my $gram, '<', $gram_filename) || die("Could not open : $gram_filename"); 57 | 58 | my $kcat; 59 | my $in_bare_labels; 60 | my $comment; 61 | my @arr; 62 | my %keywords; 63 | my @bare_label_keywords; 64 | 65 | line: while (my $S = <$gram>) 66 | { 67 | chomp $S; # strip record separator 68 | 69 | my $s; 70 | 71 | # Make sure any braces are split 72 | $s = '{', $S =~ s/$s/ { /g; 73 | $s = '}', $S =~ s/$s/ } /g; 74 | 75 | # Any comments are split 76 | $s = '[/][*]', $S =~ s#$s# /* #g; 77 | $s = '[*][/]', $S =~ s#$s# */ #g; 78 | 79 | if (!($kcat) && !($in_bare_labels)) 80 | { 81 | 82 | # Is this the beginning of a keyword list? 83 | foreach my $k (keys %keyword_categories) 84 | { 85 | if ($S =~ m/^($k):/) 86 | { 87 | $kcat = $k; 88 | next line; 89 | } 90 | } 91 | 92 | # Is this the beginning of the bare_label_keyword list? 93 | $in_bare_labels = 1 if ($S =~ m/^bare_label_keyword:/); 94 | 95 | next line; 96 | } 97 | 98 | # Now split the line into individual fields 99 | my $n = (@arr = split(' ', $S)); 100 | 101 | # Ok, we're in a keyword list. Go through each field in turn 102 | for (my $fieldIndexer = 0; $fieldIndexer < $n; $fieldIndexer++) 103 | { 104 | if ($arr[$fieldIndexer] eq '*/' && $comment) 105 | { 106 | $comment = 0; 107 | next; 108 | } 109 | elsif ($comment) 110 | { 111 | next; 112 | } 113 | elsif ($arr[$fieldIndexer] eq '/*') 114 | { 115 | 116 | # start of a multiline comment 117 | $comment = 1; 118 | next; 119 | } 120 | elsif ($arr[$fieldIndexer] eq '//') 121 | { 122 | next line; 123 | } 124 | 125 | if ($arr[$fieldIndexer] eq ';') 126 | { 127 | 128 | # end of keyword list 129 | undef $kcat; 130 | undef $in_bare_labels; 131 | next; 132 | } 133 | 134 | if ($arr[$fieldIndexer] eq '|') 135 | { 136 | next; 137 | } 138 | 139 | # Put this keyword into the right list 140 | if ($in_bare_labels) 141 | { 142 | push @bare_label_keywords, $arr[$fieldIndexer]; 143 | } 144 | else 145 | { 146 | push @{ $keywords{$kcat} }, $arr[$fieldIndexer]; 147 | } 148 | } 149 | } 150 | close $gram; 151 | 152 | # Check that each keyword list is in alphabetical order (just for neatnik-ism) 153 | check_alphabetical_order($_, $keywords{$_}) for (keys %keyword_categories); 154 | check_alphabetical_order('bare_label_keyword', \@bare_label_keywords); 155 | 156 | # Transform the keyword lists into hashes. 157 | # kwhashes is a hash of hashes, keyed by keyword category id, 158 | # e.g. UNRESERVED_KEYWORD. 159 | # Each inner hash is keyed by keyword id, e.g. ABORT_P, with a dummy value. 160 | my %kwhashes; 161 | while (my ($kcat, $kcat_id) = each(%keyword_categories)) 162 | { 163 | @arr = @{ $keywords{$kcat} }; 164 | 165 | my $hash; 166 | foreach my $item (@arr) { $hash->{$item} = 1; } 167 | 168 | $kwhashes{$kcat_id} = $hash; 169 | } 170 | my %bare_label_keywords = map { $_ => 1 } @bare_label_keywords; 171 | 172 | # Now read in kwlist.h 173 | 174 | open(my $kwlist, '<', $kwlist_filename) 175 | || die("Could not open : $kwlist_filename"); 176 | 177 | my $prevkwstring = ''; 178 | my $bare_kwname; 179 | my %kwhash; 180 | kwlist_line: while (<$kwlist>) 181 | { 182 | my ($line) = $_; 183 | 184 | if ($line =~ /^PG_KEYWORD\(\"(.*)\", (.*), (.*), (.*)\)/) 185 | { 186 | my ($kwstring) = $1; 187 | my ($kwname) = $2; 188 | my ($kwcat_id) = $3; 189 | my ($collabel) = $4; 190 | 191 | # Check that the list is in alphabetical order (critical!) 192 | if ($kwstring le $prevkwstring) 193 | { 194 | error 195 | "'$kwstring' after '$prevkwstring' in kwlist.h is misplaced"; 196 | } 197 | $prevkwstring = $kwstring; 198 | 199 | # Check that the keyword string is valid: all lower-case ASCII chars 200 | if ($kwstring !~ /^[a-z_]+$/ and $kwstring ne "varchar2" and $kwstring ne "nvl2") 201 | { 202 | error 203 | "'$kwstring' is not a valid keyword string, must be all lower-case ASCII chars"; 204 | } 205 | 206 | # Check that the keyword name is valid: all upper-case ASCII chars 207 | if ($kwname !~ /^[A-Z_]+$/ and $kwname ne "VARCHAR2" and $kwname ne "NVL2") 208 | { 209 | error 210 | "'$kwname' is not a valid keyword name, must be all upper-case ASCII chars"; 211 | } 212 | 213 | # Check that the keyword string matches keyword name 214 | $bare_kwname = $kwname; 215 | $bare_kwname =~ s/_P$//; 216 | if ($bare_kwname ne uc($kwstring)) 217 | { 218 | error 219 | "keyword name '$kwname' doesn't match keyword string '$kwstring'"; 220 | } 221 | 222 | # Check that the keyword is present in the right category list 223 | %kwhash = %{ $kwhashes{$kwcat_id} }; 224 | 225 | if (!(%kwhash)) 226 | { 227 | error "Unknown keyword category: $kwcat_id"; 228 | } 229 | else 230 | { 231 | if (!($kwhash{$kwname})) 232 | { 233 | error "'$kwname' not present in $kwcat_id section of gram.y"; 234 | } 235 | else 236 | { 237 | 238 | # Remove it from the hash, so that we can 239 | # complain at the end if there's keywords left 240 | # that were not found in kwlist.h 241 | delete $kwhashes{$kwcat_id}->{$kwname}; 242 | } 243 | } 244 | 245 | # Check that the keyword's collabel property matches gram.y 246 | if ($collabel eq 'BARE_LABEL') 247 | { 248 | unless ($bare_label_keywords{$kwname}) 249 | { 250 | error 251 | "'$kwname' is marked as BARE_LABEL in kwlist.h, but it is missing from gram.y's bare_label_keyword rule"; 252 | } 253 | } 254 | elsif ($collabel eq 'AS_LABEL') 255 | { 256 | if ($bare_label_keywords{$kwname}) 257 | { 258 | error 259 | "'$kwname' is marked as AS_LABEL in kwlist.h, but it is listed in gram.y's bare_label_keyword rule"; 260 | } 261 | } 262 | else 263 | { 264 | error 265 | "'$collabel' not recognized in kwlist.h. Expected either 'BARE_LABEL' or 'AS_LABEL'"; 266 | } 267 | } 268 | } 269 | close $kwlist; 270 | 271 | # Check that we've paired up all keywords from gram.y with lines in kwlist.h 272 | while (my ($kwcat, $kwcat_id) = each(%keyword_categories)) 273 | { 274 | %kwhash = %{ $kwhashes{$kwcat_id} }; 275 | 276 | for my $kw (keys %kwhash) 277 | { 278 | error "'$kw' found in gram.y $kwcat category, but not in kwlist.h"; 279 | } 280 | } 281 | 282 | exit $errors; 283 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser17/include/catalog/pg_attribute_d.h: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------- 2 | * 3 | * pg_attribute_d.h 4 | * Macro definitions for pg_attribute 5 | * 6 | * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group 7 | * Portions Copyright (c) 1994, Regents of the University of California 8 | * 9 | * NOTES 10 | * ****************************** 11 | * *** DO NOT EDIT THIS FILE! *** 12 | * ****************************** 13 | * 14 | * It has been GENERATED by src/backend/catalog/genbki.pl 15 | * 16 | *------------------------------------------------------------------------- 17 | */ 18 | #ifndef PG_ATTRIBUTE_D_H 19 | #define PG_ATTRIBUTE_D_H 20 | 21 | #define AttributeRelationId 1249 22 | #define AttributeRelation_Rowtype_Id 75 23 | #define AttributeRelidNameIndexId 2658 24 | #define AttributeRelidNumIndexId 2659 25 | 26 | #define Anum_pg_attribute_attrelid 1 27 | #define Anum_pg_attribute_attname 2 28 | #define Anum_pg_attribute_atttypid 3 29 | #define Anum_pg_attribute_attlen 4 30 | #define Anum_pg_attribute_attnum 5 31 | #define Anum_pg_attribute_attcacheoff 6 32 | #define Anum_pg_attribute_atttypmod 7 33 | #define Anum_pg_attribute_attndims 8 34 | #define Anum_pg_attribute_attbyval 9 35 | #define Anum_pg_attribute_attalign 10 36 | #define Anum_pg_attribute_attstorage 11 37 | #define Anum_pg_attribute_attcompression 12 38 | #define Anum_pg_attribute_attnotnull 13 39 | #define Anum_pg_attribute_atthasdef 14 40 | #define Anum_pg_attribute_atthasmissing 15 41 | #define Anum_pg_attribute_attidentity 16 42 | #define Anum_pg_attribute_attgenerated 17 43 | #define Anum_pg_attribute_attisdropped 18 44 | #define Anum_pg_attribute_attislocal 19 45 | #define Anum_pg_attribute_attisinvisible 20 46 | #define Anum_pg_attribute_attinhcount 21 47 | #define Anum_pg_attribute_attcollation 22 48 | #define Anum_pg_attribute_attstattarget 23 49 | #define Anum_pg_attribute_attacl 24 50 | #define Anum_pg_attribute_attoptions 25 51 | #define Anum_pg_attribute_attfdwoptions 26 52 | #define Anum_pg_attribute_attmissingval 27 53 | 54 | #define Natts_pg_attribute 27 55 | 56 | 57 | #define ATTRIBUTE_IDENTITY_ALWAYS 'a' 58 | #define ATTRIBUTE_IDENTITY_BY_DEFAULT 'd' 59 | 60 | #define ATTRIBUTE_GENERATED_STORED 's' 61 | 62 | /* for compatibel oracle identity column */ 63 | /* n=self increasing columns can be inserted and NULL */ 64 | #define ATTRIBUTE_IDENTITY_DEFAULT_ON_NULL 'n' 65 | /* i=self increasing columns cannot be inserted and can only be generated */ 66 | #define ATTRIBUTE_ORA_IDENTITY_ALWAYS 'i' 67 | /* o=self increasing columns can be inserted but cannot be NULL */ 68 | #define ATTRIBUTE_ORA_IDENTITY_BY_DEFAULT 'o' 69 | 70 | 71 | #endif /* PG_ATTRIBUTE_D_H */ 72 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser17/include/nodes/nodetags_ext.h: -------------------------------------------------------------------------------- 1 | #ifndef NODETAGS_EXT_H 2 | #define NODETAGS_EXT_H 3 | 4 | /* 5 | * Custom NodeTag values for IvorySQL or Oracle parser 6 | * Make sure these start AFTER the last vanilla PostgreSQL NodeTag 7 | */ 8 | #define T_OraNodetagBegin 600 9 | 10 | #define T_AccessibleByClause (T_OraNodetagBegin + 1) 11 | #define T_AccessorItem (T_OraNodetagBegin + 2) 12 | #define T_CompileFunctionStmt (T_OraNodetagBegin + 3) 13 | #define T_CreatePackageStmt (T_OraNodetagBegin + 4) 14 | #define T_CreatePackageBodyStmt (T_OraNodetagBegin + 5) 15 | #define T_AlterPackageStmt (T_OraNodetagBegin + 6) 16 | #define T_ColumnRefOrFuncCall (T_OraNodetagBegin + 7) 17 | #define T_BFloat (T_OraNodetagBegin + 8) 18 | #define T_BDouble (T_OraNodetagBegin + 9) 19 | 20 | #endif /* NODETAGS_EXT_H */ 21 | 22 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser17/include/oracle_parser/ora_keywords.h: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------- 2 | * Copyright 2025 IvorySQL Global Development Team 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | * 16 | * ora_keywords.h 17 | * IvorySQL's list of SQL keywords 18 | * 19 | * 20 | * Portions Copyright (c) 2023-2025, IvorySQL Global Development Team 21 | * 22 | * src/include/oracle_parser/ora_keywords.h 23 | * 24 | *------------------------------------------------------------------------- 25 | */ 26 | #ifndef ORA_KEYWORDS_H 27 | #define ORA_KEYWORDS_H 28 | 29 | #include "common/kwlookup.h" 30 | 31 | /* Keyword categories --- should match lists in gram.y */ 32 | #define UNRESERVED_KEYWORD 0 33 | #define COL_NAME_KEYWORD 1 34 | #define TYPE_FUNC_NAME_KEYWORD 2 35 | #define RESERVED_KEYWORD 3 36 | 37 | extern const ScanKeywordList OraScanKeywords; 38 | extern const uint8 OraScanKeywordCategories[]; 39 | extern const bool OraScanKeywordBareLabel[]; 40 | 41 | 42 | #endif /* ORA_KEYWORDS_H */ 43 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser17/include/oracle_parser/ora_parser_hook.h: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------- 2 | * Copyright 2025 IvorySQL Global Development Team 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | * 16 | * ora_parser_hook.h 17 | * Variable Declarations shown as below are used to 18 | * for the Hook function's definitions. 19 | * 20 | * 21 | * Portions Copyright (c) 2023-2025, IvorySQL Global Development Team 22 | * 23 | * src/include/oracle_parser/ora_parser_hook.h 24 | * 25 | * add the file for requirement "SQL PARSER" 26 | * 27 | *------------------------------------------------------------------------- 28 | */ 29 | 30 | #ifndef ORA_PARSER_HOOK_H 31 | #define ORA_PARSER_HOOK_H 32 | 33 | /* Hook for plugins to get control in get_keywords() */ 34 | typedef Datum (*get_keywords_hook_type)(PG_FUNCTION_ARGS); 35 | extern PGDLLIMPORT get_keywords_hook_type get_keywords_hook; 36 | 37 | /* Hook for plugins to get control in fill_in_constant_lengths() */ 38 | typedef void (*fill_in_constant_lengths_hook_type)(void *jstate, const char *query, int query_loc); 39 | extern PGDLLIMPORT fill_in_constant_lengths_hook_type fill_in_constant_lengths_hook; 40 | 41 | //fill_in_constant_lengths_hook_type fill_in_constant_lengths_hook = NULL; 42 | 43 | /* Hook for plugins to get control in quote_identifier() */ 44 | typedef const char *(*quote_identifier_hook_type)(const char *ident); 45 | extern PGDLLIMPORT quote_identifier_hook_type quote_identifier_hook; 46 | 47 | #endif /* ORA_PARSER_HOOK_H */ 48 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser17/include/oracle_parser/ora_scanner.h: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------- 2 | * 3 | * ora_scanner.h 4 | * API for the core scanner (flex machine) 5 | * 6 | * The core scanner is also used by PL/SQL, so we provide a public API 7 | * for it. However, the rest of the backend is only expected to use the 8 | * higher-level API provided by parser.h. 9 | * 10 | * 11 | * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group 12 | * Portions Copyright (c) 1994, Regents of the University of California 13 | * Portions Copyright (c) 2023-2025, IvorySQL Global Development Team 14 | * 15 | * src/include/oracle-parser/ora_scanner.h 16 | * 17 | *------------------------------------------------------------------------- 18 | */ 19 | 20 | #ifndef ORA_SCANNER_H 21 | #define ORA_SCANNER_H 22 | 23 | #include "common/keywords.h" 24 | 25 | /* 26 | * The scanner returns extra data about scanned tokens in this union type. 27 | * Note that this is a subset of the fields used in YYSTYPE of the bison 28 | * parsers built atop the scanner. 29 | */ 30 | typedef union ora_core_YYSTYPE 31 | { 32 | int ival; /* for integer literals */ 33 | char *str; /* for identifiers and non-integer literals */ 34 | const char *keyword; /* canonical spelling of keywords */ 35 | } ora_core_YYSTYPE; 36 | 37 | /* 38 | * We track token locations in terms of byte offsets from the start of the 39 | * source string, not the column number/line number representation that 40 | * bison uses by default. Also, to minimize overhead we track only one 41 | * location (usually the first token location) for each construct, not 42 | * the beginning and ending locations as bison does by default. It's 43 | * therefore sufficient to make YYLTYPE an int. 44 | */ 45 | #define YYLTYPE int 46 | 47 | /* 48 | * Another important component of the scanner's API is the token code numbers. 49 | * However, those are not defined in this file, because bison insists on 50 | * defining them for itself. The token codes used by the core scanner are 51 | * the ASCII characters plus these: 52 | * %token IDENT UIDENT FCONST SCONST USCONST BCONST XCONST Op 53 | * %token ICONST PARAM 54 | * %token TYPECAST DOT_DOT COLON_EQUALS EQUALS_GREATER 55 | * %token LESS_EQUALS GREATER_EQUALS NOT_EQUALS 56 | * The above token definitions *must* be the first ones declared in any 57 | * bison parser built atop this scanner, so that they will have consistent 58 | * numbers assigned to them (specifically, IDENT = 258 and so on). 59 | */ 60 | 61 | /* 62 | * The YY_EXTRA data that a flex scanner allows us to pass around. 63 | * Private state needed by the core scanner goes here. Note that the actual 64 | * yy_extra struct may be larger and have this as its first component, thus 65 | * allowing the calling parser to keep some fields of its own in YY_EXTRA. 66 | */ 67 | typedef struct ora_core_yy_extra_type 68 | { 69 | /* 70 | * The string the scanner is physically scanning. We keep this mainly so 71 | * that we can cheaply compute the offset of the current token (yytext). 72 | */ 73 | char *scanbuf; 74 | Size scanbuflen; 75 | 76 | /* 77 | * The keyword list to use, and the associated grammar token codes. 78 | */ 79 | const ScanKeywordList *keywordlist; 80 | const uint16 *keyword_tokens; 81 | 82 | /* 83 | * Scanner settings to use. These are initialized from the corresponding 84 | * GUC variables by scanner_init(). Callers can modify them after 85 | * scanner_init() if they don't want the scanner's behavior to follow the 86 | * prevailing GUC settings. 87 | */ 88 | int backslash_quote; 89 | bool escape_string_warning; 90 | bool standard_conforming_strings; 91 | 92 | /* 93 | * literalbuf is used to accumulate literal values when multiple rules are 94 | * needed to parse a single literal. Call startlit() to reset buffer to 95 | * empty, addlit() to add text. NOTE: the string in literalbuf is NOT 96 | * necessarily null-terminated, but there always IS room to add a trailing 97 | * null at offset literallen. We store a null only when we need it. 98 | */ 99 | char *literalbuf; /* palloc'd expandable buffer */ 100 | int literallen; /* actual current string length */ 101 | int literalalloc; /* current allocated buffer size */ 102 | 103 | /* 104 | * Random assorted scanner state. 105 | */ 106 | int state_before_str_stop; /* start cond. before end quote */ 107 | int xcdepth; /* depth of nesting in slash-star comments */ 108 | char *dolqstart; /* current $foo$ quote start string */ 109 | YYLTYPE save_yylloc; /* one-element stack for PUSH_YYLLOC() */ 110 | 111 | /* first part of UTF16 surrogate pair for Unicode escapes */ 112 | int32 utf16_first_part; 113 | 114 | /* state variables for literal-lexing warnings */ 115 | bool warn_on_first_escape; 116 | bool saw_non_ascii; 117 | } ora_core_yy_extra_type; 118 | 119 | /* 120 | * The type of yyscanner is opaque outside scan.l. 121 | */ 122 | typedef void *ora_core_yyscan_t; 123 | 124 | /* Support for scanner_errposition_callback function */ 125 | typedef struct OraScannerCallbackState 126 | { 127 | ora_core_yyscan_t yyscanner; 128 | int location; 129 | ErrorContextCallback errcallback; 130 | } OraScannerCallbackState; 131 | 132 | 133 | /* Constant data exported from oracle_parser/ora_scan.l */ 134 | extern PGDLLIMPORT const uint16 OraScanKeywordTokens[]; 135 | 136 | /* Entry points in oracle_parser/ora_scan.l */ 137 | extern ora_core_yyscan_t ora_scanner_init(const char *str, 138 | ora_core_yy_extra_type *yyext, 139 | const ScanKeywordList *keywordlist, 140 | const uint16 *keyword_tokens); 141 | extern void ora_scanner_finish(ora_core_yyscan_t yyscanner); 142 | extern int ora_core_yylex(ora_core_YYSTYPE *yylval_param, YYLTYPE *yylloc_param, 143 | ora_core_yyscan_t yyscanner); 144 | extern int ora_scanner_errposition(int location, ora_core_yyscan_t yyscanner); 145 | extern void ora_setup_scanner_errposition_callback(OraScannerCallbackState *scbstate, 146 | ora_core_yyscan_t yyscanner, 147 | int location); 148 | extern void ora_cancel_scanner_errposition_callback(OraScannerCallbackState *scbstate); 149 | extern void ora_scanner_yyerror(const char *message, ora_core_yyscan_t yyscanner) pg_attribute_noreturn(); 150 | 151 | #endif /* SCANNER_H */ 152 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser17/include/parser/parser.h: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------- 2 | * 3 | * parser.h 4 | * Definitions for the "raw" parser (flex and bison phases only) 5 | * 6 | * This is the external API for the raw lexing/parsing functions. 7 | * 8 | * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group 9 | * Portions Copyright (c) 1994, Regents of the University of California 10 | * Portions Copyright (c) 2023-2025, IvorySQL Global Development Team 11 | * 12 | * src/include/parser/parser.h 13 | * 14 | *------------------------------------------------------------------------- 15 | */ 16 | #ifndef PARSER_H 17 | #define PARSER_H 18 | 19 | #include "nodes/parsenodes.h" 20 | 21 | 22 | /* 23 | * RawParseMode determines the form of the string that raw_parser() accepts: 24 | * 25 | * RAW_PARSE_DEFAULT: parse a semicolon-separated list of SQL commands, 26 | * and return a List of RawStmt nodes. 27 | * 28 | * RAW_PARSE_TYPE_NAME: parse a type name, and return a one-element List 29 | * containing a TypeName node. 30 | * 31 | * RAW_PARSE_PLPGSQL_EXPR: parse a PL/pgSQL expression, and return 32 | * a one-element List containing a RawStmt node. 33 | * 34 | * RAW_PARSE_PLPGSQL_ASSIGNn: parse a PL/pgSQL assignment statement, 35 | * and return a one-element List containing a RawStmt node. "n" 36 | * gives the number of dotted names comprising the target ColumnRef. 37 | */ 38 | typedef enum 39 | { 40 | RAW_PARSE_DEFAULT = 0, 41 | RAW_PARSE_TYPE_NAME, 42 | RAW_PARSE_PLPGSQL_EXPR, 43 | RAW_PARSE_PLPGSQL_ASSIGN1, 44 | RAW_PARSE_PLPGSQL_ASSIGN2, 45 | RAW_PARSE_PLPGSQL_ASSIGN3, 46 | } RawParseMode; 47 | 48 | /* Values for the backslash_quote GUC */ 49 | typedef enum 50 | { 51 | BACKSLASH_QUOTE_OFF, 52 | BACKSLASH_QUOTE_ON, 53 | BACKSLASH_QUOTE_SAFE_ENCODING, 54 | } BackslashQuoteType; 55 | 56 | /* GUC variables in scan.l (every one of these is a bad idea :-() */ 57 | extern PGDLLIMPORT int backslash_quote; 58 | extern PGDLLIMPORT bool escape_string_warning; 59 | extern PGDLLIMPORT bool standard_conforming_strings; 60 | 61 | /* Hook for plugins to get control in raw_parser() */ 62 | typedef List *(*raw_parser_hook_type) (const char *str, RawParseMode mode); 63 | extern PGDLLIMPORT raw_parser_hook_type sql_raw_parser; 64 | extern PGDLLIMPORT raw_parser_hook_type ora_raw_parser; 65 | 66 | /* Primary entry point for the raw parsing functions */ 67 | extern List *raw_parser(const char *str, RawParseMode mode); 68 | 69 | extern List *standard_raw_parser(const char *str, RawParseMode mode); 70 | 71 | /* Utility functions exported by gram.y (perhaps these should be elsewhere) */ 72 | extern List *SystemFuncName(char *name); 73 | extern TypeName *SystemTypeName(char *name); 74 | extern List *OracleSystemFuncName(char *name); 75 | extern TypeName *OracleSystemTypeName(char *name); 76 | 77 | 78 | #endif /* PARSER_H */ 79 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser17/include/utils/ora_compatible.h: -------------------------------------------------------------------------------- 1 | /*-------------------------------------------------------------------- 2 | * Copyright 2025 IvorySQL Global Development Team 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | * 16 | * ora_compatible.h 17 | * 18 | * Definition enumeration structure is fro supporting different compatibility modes. 19 | * 20 | * Portions Copyright (c) 2023-2025, IvorySQL Global Development Team 21 | * 22 | * src/include/utils/ora_compatible.h 23 | * 24 | * add the file for requirement "SQL PARSER" 25 | * 26 | *---------------------------------------------------------------------- 27 | */ 28 | 29 | #ifndef ORA_COMPATIBLE_H 30 | #define ORA_COMPATIBLE_H 31 | 32 | #define ORA_SEARCH_PATH "sys,\"$user\", public" 33 | #define DB_MODE_PARMATER "ivorysql.database_mode" 34 | 35 | #define CHAR_TYPE_LENGTH_MAX 2000 36 | 37 | typedef enum DBMode 38 | { 39 | DB_PG = 0, 40 | DB_ORACLE 41 | }DBMode; 42 | 43 | typedef enum DBParser 44 | { 45 | PG_PARSER = 0, 46 | ORA_PARSER 47 | }DBParser; 48 | 49 | typedef enum CaseSwitchMode 50 | { 51 | NORMAL = 0, 52 | INTERCHANGE, 53 | LOWERCASE 54 | }CaseSwitchMode; 55 | 56 | typedef enum 57 | { 58 | NLS_LENGTH_BYTE, 59 | NLS_LENGTH_CHAR 60 | } NlsLengthSemantics; 61 | 62 | /* oracle parser static parameters */ 63 | extern int compatible_db; 64 | extern int nls_length_semantics; 65 | extern bool identifier_case_from_pg_dump; 66 | extern bool enable_case_switch; 67 | extern bool enable_emptystring_to_NULL; 68 | extern int identifier_case_switch; 69 | 70 | char *identifier_case_transform(const char *ident, int len); 71 | char *downcase_identifier(const char *ident, int len, bool warn, bool truncate); 72 | char *upcase_identifier(const char *ident, int len, bool warn, bool truncate); 73 | void truncate_identifier(char *ident, int len, bool warn); 74 | bool is_all_upper(const char *src, int len); 75 | 76 | #endif /* ORA_COMPATIBLE_H */ 77 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser17/meson.build: -------------------------------------------------------------------------------- 1 | # Copyright 2025 IvorySQL Global Development Team 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Portions Copyright (c) 2023-2025, IvorySQL Global Development Team 16 | 17 | ora_parser_sources = files( 18 | 'liboracle_parser.c', 19 | 'ora_keywords.c', 20 | ) 21 | 22 | ora_parser_kwlist = custom_target('ora_kwlist', 23 | input: files('../../include/oracle_parser/ora_kwlist.h'), 24 | output: 'ora_kwlist_d.h', 25 | command: [perl, '-I', '@SOURCE_ROOT@/src/tools', files('../../tools/ora_gen_keywordlist.pl'), 26 | '--extern', '--output', '@OUTDIR@', '@INPUT@']) 27 | generated_sources += ora_parser_kwlist 28 | ora_parser_sources += ora_parser_kwlist 29 | 30 | 31 | ora_backend_scanner = custom_target('ora_scan', 32 | input: 'ora_scan.l', 33 | output: 'ora_scan.c', 34 | command: [flex_cmd, '--no-backup', '--fix-warnings', '--', '-CF', '-p', '-p'], 35 | ) 36 | generated_sources += ora_backend_scanner 37 | ora_parser_sources += ora_backend_scanner 38 | 39 | ora_backend_parser = custom_target('ora_gram', 40 | input: 'ora_gram.y', 41 | kwargs: bison_kw, 42 | ) 43 | generated_sources += ora_backend_parser.to_list() 44 | ora_parser_sources += ora_backend_parser 45 | 46 | 47 | liboracle_parser = shared_module('liboracle_parser', 48 | ora_parser_sources, 49 | c_pch: pch_postgres_h, 50 | include_directories: [postgres_inc, include_directories('.')], 51 | kwargs: pg_mod_args, 52 | ) 53 | backend_targets += liboracle_parser 54 | 55 | ora_parser = static_library('ora_parser', 56 | ora_parser_sources, 57 | c_pch: pch_postgres_h, 58 | include_directories: [postgres_inc, include_directories('.')], 59 | kwargs: pg_mod_args 60 | ) 61 | backend_link_with += ora_parser 62 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser17/ora_gramparse.h: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------- 2 | * 3 | * ora_gramparse.h 4 | * Shared definitions for the "raw" parser (flex and bison phases only) 5 | * 6 | * NOTE: this file is only meant to be included in the core parsing files, 7 | * ie, parser.c, ora_gram.y, ora_scan.l, and src/backend/oracle_parser/ora_keywords.c. 8 | * Definitions that are needed outside the core parser should be in parser.h. 9 | * 10 | * 11 | * Portions Copyright (c) 2023-2025, IvorySQL Global Development Team 12 | * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group 13 | * Portions Copyright (c) 1994, Regents of the University of California 14 | * 15 | * src/include/oracle_parser/ora_gramparse.h 16 | * 17 | * add the file for requirement "SQL PARSER" 18 | * 19 | *------------------------------------------------------------------------- 20 | */ 21 | 22 | #ifndef ORA_GRAMPARSE_H 23 | #define ORA_GRAMPARSE_H 24 | 25 | #include "nodes/parsenodes.h" 26 | #include "oracle_parser/ora_scanner.h" 27 | 28 | /* 29 | * NB: include ora_gram.h only AFTER including ora_scanner.h, because ora_scanner.h 30 | * is what #defines YYLTYPE. 31 | */ 32 | #include "ora_gram.h" 33 | 34 | typedef enum OraBodyStyle 35 | { 36 | OraBody_UNKOWN, 37 | OraBody_FUNC, 38 | OraBody_ANONYMOUS_BLOCK, 39 | OraBody_PACKAGE, 40 | OraBody_PACKAGEBODY, 41 | OraBody_MAYBE_ANONYMOUS_BLOCK_BEGIN, 42 | OraBody_MAYBE_ANONYMOUS_BLOCK_DECLARE 43 | }OraBodyStyle; 44 | 45 | /* Auxiliary data about a token (other than the token type) */ 46 | typedef struct 47 | { 48 | YYSTYPE lval; /* semantic information */ 49 | YYLTYPE lloc; /* offset in scanbuf */ 50 | int leng; /* length in bytes */ 51 | } TokenAuxData; 52 | 53 | /* Token pushback stack */ 54 | #define MAX_PUSHBACKS 16 55 | 56 | /* 57 | * The YY_EXTRA data that a flex scanner allows us to pass around. Private 58 | * state needed for raw parsing/lexing goes here. 59 | */ 60 | typedef struct ora_base_yy_extra_type 61 | { 62 | /* 63 | * Fields used by the core scanner. 64 | */ 65 | ora_core_yy_extra_type core_yy_extra; 66 | 67 | /* 68 | * State variables for ora_base_yylex(). 69 | */ 70 | bool have_lookahead; /* is lookahead info valid? */ 71 | int lookahead_token; /* one-token lookahead */ 72 | ora_core_YYSTYPE lookahead_yylval; /* yylval for lookahead token */ 73 | YYLTYPE lookahead_yylloc; /* yylloc for lookahead token */ 74 | char *lookahead_end; /* end of current token */ 75 | char lookahead_hold_char; /* to be put back at *lookahead_end */ 76 | 77 | /* 78 | * State variables that belong to the grammar. 79 | */ 80 | List *parsetree; /* final parse result is delivered here */ 81 | 82 | /* 83 | * The native PG only cache one-token info include yylloc, yylval and token 84 | * number in yyextra, IvorySQL cache multiple tokens info using two arrays. 85 | */ 86 | int max_pushbacks; /* the maxsize of cache array */ 87 | int loc_pushback; /* # of used tokens */ 88 | int num_pushbacks; /* # of cached tokens */ 89 | int *pushback_token; /* token number array */ 90 | TokenAuxData *pushback_auxdata; /* auxdata array */ 91 | 92 | OraBodyStyle body_style; 93 | int body_start; 94 | int body_level; 95 | } ora_base_yy_extra_type; 96 | 97 | /* 98 | * In principle we should use yyget_extra() to fetch the yyextra field 99 | * from a yyscanner struct. However, flex always puts that field first, 100 | * and this is sufficiently performance-critical to make it seem worth 101 | * cheating a bit to use an inline macro. 102 | */ 103 | #define pg_yyget_extra(yyscanner) (*((ora_base_yy_extra_type **) (yyscanner))) 104 | 105 | 106 | /* from libparser_oracle.c */ 107 | extern int ora_base_yylex(YYSTYPE *lvalp, YYLTYPE *llocp, 108 | ora_core_yyscan_t yyscanner); 109 | 110 | extern void set_oracle_plsql_body(ora_core_yyscan_t yyscanner, OraBodyStyle body_style); 111 | extern void set_oracle_plsql_bodystart(ora_core_yyscan_t yyscanner, int body_start, int body_level); 112 | 113 | /* from ora_gram.y */ 114 | extern void ora_parser_init(ora_base_yy_extra_type *yyext); 115 | extern int ora_base_yyparse(ora_core_yyscan_t yyscanner); 116 | 117 | #endif /* ORA_GRAMPARSE_H */ 118 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser17/ora_keywords.c: -------------------------------------------------------------------------------- 1 | /*------------------------------------------------------------------------- 2 | * 3 | * ora_keywords.c 4 | * IvorySQL's list of SQL keywords (Oracle Compatible) 5 | * 6 | * 7 | * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group 8 | * Portions Copyright (c) 1994, Regents of the University of California 9 | * Portions Copyright (c) 2023-2025, IvorySQL Global Development Team 10 | * 11 | * 12 | * IDENTIFICATION 13 | * src/backend/oracle_parser/ora_keywords.c 14 | * 15 | * add the file for requirement "SQL PARSER" 16 | * 17 | *------------------------------------------------------------------------- 18 | */ 19 | #include "c.h" 20 | 21 | #include "oracle_parser/ora_keywords.h" 22 | 23 | 24 | /* ScanKeywordList lookup data for SQL keywords */ 25 | 26 | #include "ora_kwlist_d.h" 27 | 28 | /* Keyword categories for SQL keywords */ 29 | 30 | #define PG_KEYWORD(kwname, value, category, collabel) category, 31 | 32 | const uint8 OraScanKeywordCategories[ORASCANKEYWORDS_NUM_KEYWORDS] = { 33 | #include "oracle_parser/ora_kwlist.h" 34 | }; 35 | 36 | #undef PG_KEYWORD 37 | 38 | /* Keyword can-be-bare-label flags for SQL keywords */ 39 | 40 | #define PG_KEYWORD(kwname, value, category, collabel) collabel, 41 | 42 | #define BARE_LABEL true 43 | #define AS_LABEL false 44 | 45 | const bool OraScanKeywordBareLabel[ORASCANKEYWORDS_NUM_KEYWORDS] = { 46 | #include "oracle_parser/ora_kwlist.h" 47 | }; 48 | 49 | #undef PG_KEYWORD 50 | #undef BARE_LABEL 51 | #undef AS_LABEL 52 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser_patches/Makefile.patch: -------------------------------------------------------------------------------- 1 | diff --git a/oracle_parser_orig/Makefile b/oracle_parser/Makefile 2 | index c7f9efb..493a7f2 100644 3 | --- a/oracle_parser_orig/Makefile 4 | +++ b/oracle_parser/Makefile 5 | @@ -25,15 +25,15 @@ 6 | # add the file for requirement "SQL PARSER" 7 | # 8 | #------------------------------------------------------------------------- 9 | -subdir = src/backend/oracle_parser 10 | -top_builddir = ../../.. 11 | +#subdir = contrib/oracle_parser 12 | +top_builddir = ../../../../../../ 13 | include $(top_builddir)/src/Makefile.global 14 | 15 | PGFILEDESC = "liboracle_parser - raw parser for oracle" 16 | NAME = liboracle_parser 17 | 18 | # include our headers first 19 | -override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS) 20 | +override CPPFLAGS := -I./include -I$(srcdir) $(CPPFLAGS) 21 | rpath = 22 | 23 | OBJS = ora_keywords.o ora_gram.o ora_scan.o liboracle_parser.o $(WIN32RES) 24 | @@ -45,7 +45,7 @@ GEN_KEYWORDLIST_DEPS = $(TOOLSDIR)/ora_gen_keywordlist.pl $(TOOLSDIR)/PerfectHas 25 | 26 | all: all-shared-lib 27 | 28 | -include $(top_srcdir)/src/Makefile.shlib 29 | +include $(top_builddir)/src/Makefile.shlib 30 | 31 | install: all installdirs install-lib 32 | 33 | @@ -59,7 +59,7 @@ ora_gram.h: ora_gram.c 34 | touch $@ 35 | 36 | ora_gram.c: BISONFLAGS += -d 37 | -ora_gram.c: BISON_CHECK_CMD = $(PERL) $(srcdir)/../parser/check_keywords.pl $< $(srcdir)/../../include/oracle_parser/ora_kwlist.h 38 | +ora_gram.c: BISON_CHECK_CMD = $(PERL) $(srcdir)/check_keywords.pl $< $(srcdir)/include/oracle_parser/ora_kwlist.h 39 | 40 | ora_scan.c: FLEXFLAGS = -CF -p -p 41 | ora_scan.c: FLEX_NO_BACKUP=yes 42 | @@ -69,8 +69,8 @@ ora_scan.c: FLEX_FIX_WARNING=yes 43 | ora_scan.o ora_gram.o ora_keywords.o liboracle_parser.o: ora_gram.h 44 | 45 | # generate SQL keyword lookup table to be included into ora_keywords*.o. 46 | -ora_kwlist_d.h: $(top_srcdir)/src/include/oracle_parser/ora_kwlist.h $(GEN_KEYWORDLIST_DEPS) 47 | - $(GEN_KEYWORDLIST) --extern $< 48 | +#ora_kwlist_d.h: $(top_srcdir)/src/include/oracle_parser/ora_kwlist.h $(GEN_KEYWORDLIST_DEPS) 49 | +# $(GEN_KEYWORDLIST) --extern $< 50 | 51 | # Dependencies of ora_keywords*.o need to be managed explicitly to make sure 52 | # that you don't get broken parsing code, even in a non-enable-depend build. 53 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser_patches/liboracle_parser.c.patch: -------------------------------------------------------------------------------- 1 | diff --git a/oracle_parser_orig/liboracle_parser.c b/oracle_parser/liboracle_parser.c 2 | index cfd1e80..c10cd6c 100644 3 | --- a/oracle_parser_orig/liboracle_parser.c 4 | +++ b/oracle_parser/liboracle_parser.c 5 | @@ -74,7 +74,8 @@ typedef struct JumbleState 6 | int highest_extern_param_id; 7 | } JumbleState; 8 | 9 | -/* saved hook value */ 10 | +/* saved hook value - not needed */ 11 | +/* 12 | static raw_parser_hook_type prev_raw_parser = NULL; 13 | static get_keywords_hook_type prev_pg_get_keywords = NULL; 14 | static fill_in_constant_lengths_hook_type prev_fill_in_contant_lengths = NULL; 15 | @@ -82,17 +83,19 @@ quote_identifier_hook_type prev_quote_identifier = NULL; 16 | 17 | void _PG_init(void); 18 | void _PG_fini(void); 19 | +*/ 20 | 21 | static int oracle_comp_location(const void *a, const void *b); 22 | 23 | -static List * oracle_raw_parser(const char *str, RawParseMode mode); 24 | +/* only export raw parser routine */ 25 | +extern PGDLLEXPORT List * oracle_raw_parser(const char *str, RawParseMode mode); 26 | + 27 | static Datum oracle_pg_get_keywords(PG_FUNCTION_ARGS); 28 | static void oracle_fill_in_constant_lengths(void *jjstate, const char *query, int query_loc); 29 | static const char * oracle_quote_identifier(const char *ident); 30 | 31 | +/* as a standalone raw parser there is no need for changing parser hooks */ 32 | /* 33 | - * Module load callback 34 | - */ 35 | void 36 | _PG_init(void) 37 | { 38 | @@ -107,9 +110,6 @@ _PG_init(void) 39 | quote_identifier_hook = oracle_quote_identifier; 40 | } 41 | 42 | -/* 43 | - * Module unload callback 44 | - */ 45 | void 46 | _PG_fini(void) 47 | { 48 | @@ -118,6 +118,7 @@ _PG_fini(void) 49 | fill_in_constant_lengths_hook = prev_fill_in_contant_lengths; 50 | quote_identifier_hook = prev_quote_identifier; 51 | } 52 | +*/ 53 | 54 | /* 55 | * comp_location: comparator for qsorting pgssLocationLen structs by location 56 | @@ -149,7 +150,7 @@ static char *str_udeescape(const char *str, char escape, 57 | * list have the form required by the specified RawParseMode. 58 | */ 59 | 60 | -static List * 61 | +List * 62 | oracle_raw_parser(const char *str, RawParseMode mode) 63 | { 64 | ora_core_yyscan_t yyscanner; 65 | @@ -1118,7 +1119,8 @@ invalid_pair: 66 | return NULL; /* keep compiler quiet */ 67 | } 68 | 69 | -static Datum 70 | + 71 | +__attribute__((unused)) static Datum 72 | oracle_pg_get_keywords(PG_FUNCTION_ARGS) 73 | { 74 | FuncCallContext *funcctx; 75 | @@ -1188,7 +1190,7 @@ oracle_pg_get_keywords(PG_FUNCTION_ARGS) 76 | SRF_RETURN_DONE(funcctx); 77 | } 78 | 79 | -static void 80 | +__attribute__((unused)) static void 81 | oracle_fill_in_constant_lengths(void *jjstate, const char *query, 82 | int query_loc) 83 | { 84 | @@ -1287,7 +1289,7 @@ oracle_fill_in_constant_lengths(void *jjstate, const char *query, 85 | ora_scanner_finish(yyscanner); 86 | } 87 | 88 | -static const char * 89 | +__attribute__((unused)) static const char * 90 | oracle_quote_identifier(const char *ident) 91 | { 92 | /* 93 | @@ -1364,3 +1366,144 @@ oracle_quote_identifier(const char *ident) 94 | 95 | return result; 96 | } 97 | + 98 | +/* to resolve standalone compile error */ 99 | +char * 100 | +downcase_identifier(const char *ident, int len, bool warn, bool truncate) 101 | +{ 102 | + char *result; 103 | + int i; 104 | + bool enc_is_single_byte; 105 | + 106 | + result = palloc(len + 1); 107 | + enc_is_single_byte = pg_database_encoding_max_length() == 1; 108 | + 109 | + /* 110 | + * SQL99 specifies Unicode-aware case normalization, which we don't yet 111 | + * have the infrastructure for. Instead we use tolower() to provide a 112 | + * locale-aware translation. However, there are some locales where this 113 | + * is not right either (eg, Turkish may do strange things with 'i' and 114 | + * 'I'). Our current compromise is to use tolower() for characters with 115 | + * the high bit set, as long as they aren't part of a multi-byte 116 | + * character, and use an ASCII-only downcasing for 7-bit characters. 117 | + */ 118 | + for (i = 0; i < len; i++) 119 | + { 120 | + unsigned char ch = (unsigned char) ident[i]; 121 | + 122 | + if (ch >= 'A' && ch <= 'Z') 123 | + ch += 'a' - 'A'; 124 | + else if (enc_is_single_byte && IS_HIGHBIT_SET(ch) && isupper(ch)) 125 | + ch = tolower(ch); 126 | + result[i] = (char) ch; 127 | + } 128 | + result[i] = '\0'; 129 | + 130 | + if (i >= NAMEDATALEN && truncate) 131 | + truncate_identifier(result, i, warn); 132 | + 133 | + return result; 134 | +} 135 | + 136 | +char * 137 | +upcase_identifier(const char *ident, int len, bool warn, bool truncate) 138 | +{ 139 | + char *result; 140 | + int i; 141 | + bool enc_is_single_byte; 142 | + 143 | + result = palloc(len + 1); 144 | + enc_is_single_byte = pg_database_encoding_max_length() == 1; 145 | + 146 | + /* 147 | + * SQL99 specifies Unicode-aware case normalization, which we don't yet 148 | + * have the infrastructure for. Instead we use toupper() to provide a 149 | + * locale-aware translation. However, there are some locales where this 150 | + * locale-aware translation. However, there are some locales where this 151 | + * is not right either (eg, Turkish may do strange things with 'i' and 152 | + * 'I'). Our current compromise is to use toupper() for characters with 153 | + * the high bit set, as long as they aren't part of a multi-byte 154 | + * character, and use an ASCII-only downcasing for 7-bit characters. 155 | + */ 156 | + for (i = 0; i < len; i++) 157 | + { 158 | + unsigned char ch = (unsigned char) ident[i]; 159 | + 160 | + if (ch >= 'a' && ch <= 'z') 161 | + ch -= 'a' - 'A'; 162 | + else if (enc_is_single_byte && IS_HIGHBIT_SET(ch) && islower(ch)) 163 | + ch = toupper(ch); 164 | + result[i] = (char) ch; 165 | + } 166 | + result[i] = '\0'; 167 | + 168 | + if (i >= NAMEDATALEN && truncate) 169 | + truncate_identifier(result, i, warn); 170 | + 171 | + return result; 172 | +} 173 | + 174 | +char * 175 | +identifier_case_transform(const char *ident, int len) 176 | +{ 177 | + char *upper_ident = NULL, *lower_ident = NULL, *result = NULL; 178 | + 179 | + upper_ident = upcase_identifier(ident, len, true, true); 180 | + lower_ident = downcase_identifier(ident, len, true, true); 181 | + 182 | + if (strcmp(upper_ident, ident) == 0) 183 | + { 184 | + result = lower_ident; 185 | + pfree(upper_ident); 186 | + } 187 | + else if (strcmp(lower_ident, ident) == 0) 188 | + { 189 | + result = upper_ident; 190 | + pfree(lower_ident); 191 | + } 192 | + else 193 | + { 194 | + result = palloc0(len + 1); 195 | + memcpy(result, ident, len); 196 | + pfree(upper_ident); 197 | + pfree(lower_ident); 198 | + } 199 | + 200 | + return result; 201 | +} 202 | + 203 | +void 204 | +truncate_identifier(char *ident, int len, bool warn) 205 | +{ 206 | + if (len >= NAMEDATALEN) 207 | + { 208 | + len = pg_mbcliplen(ident, len, NAMEDATALEN - 1); 209 | + if (warn) 210 | + ereport(NOTICE, 211 | + (errcode(ERRCODE_NAME_TOO_LONG), 212 | + errmsg("identifier \"%s\" will be truncated to \"%.*s\"", 213 | + ident, len, ident))); 214 | + ident[len] = '\0'; 215 | + } 216 | +} 217 | + 218 | +/* 219 | + * Determine whether the letters in the string are all uppercase letters 220 | + */ 221 | +bool 222 | +is_all_upper(const char *src, int len) 223 | +{ 224 | + int i; 225 | + const char *s; 226 | + 227 | + s = src; 228 | + 229 | + for (i = 0; i < len; i++) 230 | + { 231 | + if (isalpha(*s) && islower(*s)) 232 | + return false; 233 | + s++; 234 | + } 235 | + 236 | + return true; 237 | +} 238 | -------------------------------------------------------------------------------- /src/backend/olr/oracle_parser_patches/ora_compatible.h.patch: -------------------------------------------------------------------------------- 1 | diff --git a/include_orig/utils/ora_compatible.h b/include/utils/ora_compatible.h 2 | index 9d4cfd3..3030bfa 100644 3 | --- a/include_orig/utils/ora_compatible.h 4 | +++ b/include/utils/ora_compatible.h 5 | @@ -59,4 +59,18 @@ typedef enum 6 | NLS_LENGTH_CHAR 7 | } NlsLengthSemantics; 8 | 9 | +/* oracle parser static parameters */ 10 | +extern int compatible_db; 11 | +extern int nls_length_semantics; 12 | +extern bool identifier_case_from_pg_dump; 13 | +extern bool enable_case_switch; 14 | +extern bool enable_emptystring_to_NULL; 15 | +extern int identifier_case_switch; 16 | + 17 | +char *identifier_case_transform(const char *ident, int len); 18 | +char *downcase_identifier(const char *ident, int len, bool warn, bool truncate); 19 | +char *upcase_identifier(const char *ident, int len, bool warn, bool truncate); 20 | +void truncate_identifier(char *ident, int len, bool warn); 21 | +bool is_all_upper(const char *src, int len); 22 | + 23 | #endif /* ORA_COMPATIBLE_H */ 24 | -------------------------------------------------------------------------------- /src/backend/utils/netio_utils.c: -------------------------------------------------------------------------------- 1 | /* 2 | * netio_utils.c 3 | * 4 | * Implementation of general network IO routines 5 | * 6 | * Copyright (c) Hornetlabs Technology, Inc. 7 | * 8 | */ 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | #include "postgres.h" 23 | #include "utils/netio_utils.h" 24 | 25 | int 26 | netio_connect(NetioContext *ctx, const char *host, int port) 27 | { 28 | int flags, optval = 1, ret = -1; 29 | struct addrinfo hints = {0}, *res = NULL; 30 | char portstr[16]; 31 | 32 | snprintf(portstr, sizeof portstr, "%d", port); 33 | hints.ai_family = AF_UNSPEC; 34 | hints.ai_socktype = SOCK_STREAM; 35 | hints.ai_protocol = IPPROTO_TCP; 36 | 37 | ret = getaddrinfo(host, portstr, &hints, &res); 38 | if (ret != 0 || !res) 39 | { 40 | elog(WARNING, "getaddrinfo(%s:%s): %s", host, portstr, gai_strerror(ret)); 41 | return -1; 42 | } 43 | 44 | ctx->sockfd = socket(res->ai_family, res->ai_socktype, res->ai_protocol); 45 | if (ctx->sockfd < 0) 46 | { 47 | return ret; 48 | } 49 | 50 | if (setsockopt(ctx->sockfd, SOL_SOCKET, SO_REUSEADDR, 51 | &optval, sizeof(optval)) < 0) 52 | { 53 | goto error; 54 | } 55 | 56 | 57 | if (setsockopt(ctx->sockfd, SOL_SOCKET, SO_KEEPALIVE, 58 | &optval, sizeof(optval)) < 0) 59 | { 60 | goto error; 61 | } 62 | 63 | 64 | if ((flags = fcntl(ctx->sockfd, F_GETFL, 0)) < 0 || 65 | fcntl(ctx->sockfd, F_SETFL, flags | O_NONBLOCK) < 0) 66 | { 67 | goto error; 68 | } 69 | 70 | if (connect(ctx->sockfd, res->ai_addr, res->ai_addrlen) < 0) 71 | { 72 | int errnum = errno; 73 | if (errnum == EINPROGRESS) 74 | { 75 | struct pollfd pfd = { .fd = ctx->sockfd, .events = POLLOUT }; 76 | if (poll(&pfd, 1, 5000) > 0) /* todo: configurable conn timeout */ 77 | { 78 | int err = 0; 79 | socklen_t len = sizeof(err); 80 | if (getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, &err, &len) == 0 && err == 0) 81 | { 82 | /* Connection established */ 83 | elog(DEBUG1, "OLR connected"); 84 | } 85 | else 86 | { 87 | /* Connection failed: err has errno-like value */ 88 | elog(WARNING, "connect failed: %s (errno=%d)", strerror(err), err); 89 | ret = -1; 90 | goto error; 91 | } 92 | } 93 | else 94 | { 95 | elog(WARNING, "connect timed out or poll error"); 96 | ret = -1; 97 | goto error; 98 | } 99 | } 100 | else 101 | { 102 | elog(WARNING, "connect failed errno = %d", errnum); 103 | ret = -1; 104 | goto error; 105 | } 106 | } 107 | 108 | strncpy(ctx->host, host, sizeof(ctx->host) - 1); 109 | ctx->port = port; 110 | ctx->is_connected = true; 111 | 112 | /* success if code reaches here */ 113 | ret = 0; 114 | error: 115 | /* close the socket on error */ 116 | if (ret) 117 | close(ctx->sockfd); 118 | return ret; 119 | } 120 | 121 | ssize_t 122 | netio_write(NetioContext *ctx, const void *buf, size_t len) 123 | { 124 | if (!ctx->is_connected) 125 | return -1; 126 | 127 | return send(ctx->sockfd, buf, len, 0); 128 | } 129 | 130 | ssize_t 131 | netio_read(NetioContext *ctx, StringInfoData * buf, int size) 132 | { 133 | fd_set readfds; 134 | struct timeval timeout = {2, 0}; 135 | char tmp[8192]; 136 | ssize_t total_read = 0; 137 | int sel = -1; 138 | 139 | if (!ctx || !ctx->is_connected || !buf) 140 | return -1; 141 | 142 | FD_ZERO(&readfds); 143 | FD_SET(ctx->sockfd, &readfds); 144 | 145 | sel = select(ctx->sockfd + 1, &readfds, NULL, NULL, 146 | &timeout); 147 | if (sel <= 0) 148 | { 149 | /* No data to read or error */ 150 | return -1; 151 | } 152 | 153 | /* unspecified size - read as much as possible until EAGAIN, EWOULDBLOCK or error */ 154 | if (size == -1) 155 | { 156 | while (true) 157 | { 158 | ssize_t n = recv(ctx->sockfd, tmp, sizeof(tmp), 0); 159 | if (n > 0) 160 | { 161 | appendBinaryStringInfo(buf, tmp, n); 162 | total_read += n; 163 | } 164 | else if (n == 0) 165 | { 166 | /* Peer closed connection */ 167 | elog(WARNING, "peer disconnected"); 168 | ctx->is_connected = false; 169 | break; 170 | } 171 | else 172 | { 173 | if (errno == EAGAIN || errno == EWOULDBLOCK) 174 | break; /* no more data to read or now */ 175 | if (errno == EINTR) 176 | continue; /* try again */ 177 | 178 | /* recv error */ 179 | elog(WARNING, "recv error"); 180 | ctx->is_connected = false; 181 | return -1; 182 | } 183 | } 184 | } 185 | else 186 | { 187 | ssize_t remaining = size; 188 | while (remaining > 0) 189 | { 190 | ssize_t to_read = remaining < sizeof(tmp) ? remaining : sizeof(tmp); 191 | 192 | ssize_t n = recv(ctx->sockfd, tmp, to_read, 0); 193 | if (n > 0) 194 | { 195 | appendBinaryStringInfo(buf, tmp, n); 196 | total_read += n; 197 | remaining -= n; 198 | } 199 | else if (n == 0) 200 | { 201 | elog(WARNING, "peer disconnected"); 202 | ctx->is_connected = false; 203 | break; 204 | } 205 | else 206 | { 207 | if (errno == EAGAIN || errno == EWOULDBLOCK) 208 | break; 209 | if (errno == EINTR) 210 | continue; 211 | 212 | /* recv error */ 213 | elog(WARNING, "recv error"); 214 | ctx->is_connected = false; 215 | return -1; 216 | } 217 | } 218 | } 219 | 220 | if (total_read == 0) 221 | return -1; 222 | 223 | return total_read; 224 | } 225 | 226 | void 227 | netio_disconnect(NetioContext *ctx) 228 | { 229 | if (ctx->is_connected) 230 | { 231 | close(ctx->sockfd); 232 | ctx->is_connected = false; 233 | } 234 | } 235 | -------------------------------------------------------------------------------- /src/include/converter/debezium_event_handler.h: -------------------------------------------------------------------------------- 1 | /* 2 | * debezium_event_handler.h 3 | * 4 | * contains routines to process change events originated from 5 | * debezium connectors. 6 | */ 7 | 8 | #ifndef SYNCHDB_SRC_INCLUDE_CONVERTER_DEBEZIUM_EVENT_HANDLER_H_ 9 | #define SYNCHDB_SRC_INCLUDE_CONVERTER_DEBEZIUM_EVENT_HANDLER_H_ 10 | 11 | /* 12 | * DbzType 13 | * 14 | * enum that represents how debezium could represent 15 | * a data value 16 | */ 17 | typedef enum _DbzType 18 | { 19 | /* DBZ types */ 20 | DBZTYPE_UNDEF = 0, 21 | DBZTYPE_FLOAT32, 22 | DBZTYPE_FLOAT64, 23 | DBZTYPE_FLOAT, 24 | DBZTYPE_DOUBLE, 25 | DBZTYPE_BYTES, 26 | DBZTYPE_INT8, 27 | DBZTYPE_INT16, 28 | DBZTYPE_INT32, 29 | DBZTYPE_INT64, 30 | DBZTYPE_STRUCT, 31 | DBZTYPE_STRING, 32 | } DbzType; 33 | 34 | int fc_processDBZChangeEvent(const char * event, SynchdbStatistics * myBatchStats, 35 | int flag, const char * name, bool isfirst, bool islast); 36 | 37 | 38 | #endif /* SYNCHDB_SRC_INCLUDE_CONVERTER_DEBEZIUM_EVENT_HANDLER_H_ */ 39 | -------------------------------------------------------------------------------- /src/include/converter/format_converter.h: -------------------------------------------------------------------------------- 1 | /* 2 | * format_converter.h 3 | * 4 | * Header file for the SynchDB format converter module 5 | * 6 | * This module provides structures and functions for processing 7 | * database change events in Debezium (DBZ) format and converting 8 | * them to a format suitable for SynchDB. 9 | * 10 | * Key components: 11 | * - Structures for representing DDL (Data Definition Language) events 12 | * - Structures for representing DML (Data Manipulation Language) events 13 | * - Functions for processing and converting DBZ change events 14 | * 15 | * Copyright (c) Hornetlabs Technology, Inc. 16 | * 17 | */ 18 | 19 | #ifndef SYNCHDB_FORMAT_CONVERTER_H_ 20 | #define SYNCHDB_FORMAT_CONVERTER_H_ 21 | 22 | #include "utils/hsearch.h" 23 | #include "nodes/pg_list.h" 24 | #include "utils/jsonb.h" 25 | #include "executor/replication_agent.h" 26 | #include "synchdb/synchdb.h" 27 | 28 | /* constants */ 29 | #define RULEFILE_DATATYPE_TRANSFORM 1 30 | #define RULEFILE_OBJECTNAME_TRANSFORM 2 31 | #define RULEFILE_EXPRESSION_TRANSFORM 3 32 | 33 | /* structure to hold possible time representations in DBZ engine */ 34 | typedef enum _timeRep 35 | { 36 | TIME_UNDEF = 0, 37 | TIME_DATE, /* number of days since epoch */ 38 | TIME_TIME, /* number of milliseconds since epoch */ 39 | TIME_MICROTIME, /* number of microseconds since midnight */ 40 | TIME_NANOTIME, /* number of nanoseconds since midnight */ 41 | TIME_TIMESTAMP, /* number of milliseconds since epoch */ 42 | TIME_MICROTIMESTAMP, /* number of microseconds since epoch */ 43 | TIME_NANOTIMESTAMP, /* number of nanoseconds since epoch */ 44 | TIME_ZONEDTIMESTAMP, /* string representation of timestamp with timezone */ 45 | TIME_MICRODURATION, /* duration expressed in microseconds */ 46 | DATA_VARIABLE_SCALE, /* indication if scale is variable (for oracle) */ 47 | DATA_GEOMETRY, /* indication of geometry data */ 48 | DATA_ENUM, /* indication of enum data */ 49 | } TimeRep; 50 | 51 | /* Structure to represent a column in a DDL event */ 52 | typedef struct dbz_ddl_column 53 | { 54 | char * name; 55 | int length; 56 | bool optional; 57 | int position; 58 | char * typeName; 59 | char * enumValues; 60 | char * charsetName; 61 | bool autoIncremented; 62 | char * defaultValueExpression; 63 | int scale; 64 | } DBZ_DDL_COLUMN; 65 | 66 | /* another alias to the same DDL COLUMN struct for clarity */ 67 | typedef DBZ_DDL_COLUMN OLR_DDL_COLUMN; 68 | 69 | /* Structure to represent a DDL event */ 70 | typedef struct dbz_ddl 71 | { 72 | char * id; 73 | DdlType type; 74 | AlterSubType subtype; 75 | char * primaryKeyColumnNames; 76 | char * constraintName; 77 | List * columns; /* list of DBZ_DDL_COLUMN */ 78 | unsigned long long dbz_ts_ms; /* time(ms) when this DDL is processed by DBZ */ 79 | unsigned long long src_ts_ms; /* time(ms) when this DDL is generated by source database */ 80 | } DBZ_DDL; 81 | 82 | /* another alias to the same DDL struct for clarity */ 83 | typedef DBZ_DDL OLR_DDL; 84 | 85 | typedef struct 86 | { 87 | char name[NAMEDATALEN]; 88 | Oid oid; 89 | int position; 90 | int typemod; 91 | bool ispk; 92 | char typcategory; 93 | bool typispreferred; 94 | char typname[NAMEDATALEN]; 95 | } NameOidEntry; 96 | 97 | typedef struct 98 | { 99 | char name[NAMEDATALEN]; 100 | int jsonpos; 101 | int dbztype; 102 | TimeRep timerep; 103 | int scale; 104 | } NameJsonposEntry; 105 | 106 | /* Structure to represent a column value in a DML event */ 107 | typedef struct dbz_dml_column_value 108 | { 109 | char * name; /* name of the column field */ 110 | char * remoteColumnName; /* original column name from remote server */ 111 | char * value; /* expressed as string as taken from json */ 112 | Oid datatype; /* data type Oid as defined by PostgreSQL */ 113 | int position; /* position of this column value, start from 1 */ 114 | int scale; /* location of decimal point - decimal type only */ 115 | int timerep; /* how dbz represents time related fields */ 116 | int typemod; /* extra data type modifier */ 117 | bool ispk; /* indicate if this column is a primary key*/ 118 | int dbztype; /* data literal type as defined by dbz */ 119 | char typcategory; /* type category defined by pg */ 120 | bool typispreferred; /* wether type category is preferred by pg */ 121 | char * typname; /* the name of the data type */ 122 | } DBZ_DML_COLUMN_VALUE; 123 | 124 | /* Structure to represent a DML event */ 125 | typedef struct dbz_dml 126 | { 127 | char op; 128 | char * schema; 129 | char * table; 130 | char * remoteObjectId; /* db.schema.table or db.table on remote side */ 131 | char * mappedObjectId; /* schema.table, or just table on PG side */ 132 | Oid tableoid; 133 | int natts; /* number of columns of this pg table */ 134 | List * columnValuesBefore; /* list of DBZ_DML_COLUMN_VALUE */ 135 | List * columnValuesAfter; /* list of DBZ_DML_COLUMN_VALUE */ 136 | unsigned long long dbz_ts_ms; /* time(ms) when this DML is processed by DBZ */ 137 | unsigned long long src_ts_ms; /* time(ms) when this DML is generated by source database */ 138 | } DBZ_DML; 139 | 140 | /* another alias to the same DML struct for clarity */ 141 | typedef DBZ_DML OLR_DML; 142 | 143 | /* dml cache structure */ 144 | typedef struct dataCacheKey 145 | { 146 | char schema[SYNCHDB_CONNINFO_DB_NAME_SIZE]; 147 | char table[SYNCHDB_CONNINFO_DB_NAME_SIZE]; 148 | } DataCacheKey; 149 | typedef struct dataCacheEntry 150 | { 151 | DataCacheKey key; 152 | TupleDesc tupdesc; 153 | Oid tableoid; 154 | HTAB * typeidhash; 155 | HTAB * namejsonposhash; 156 | int natts; 157 | } DataCacheEntry; 158 | 159 | typedef struct datatypeHashKey 160 | { 161 | char extTypeName[SYNCHDB_DATATYPE_NAME_SIZE]; 162 | bool autoIncremented; 163 | } DatatypeHashKey; 164 | 165 | typedef struct datatypeHashEntry 166 | { 167 | DatatypeHashKey key; 168 | char pgsqlTypeName[SYNCHDB_DATATYPE_NAME_SIZE]; 169 | int pgsqlTypeLength; 170 | } DatatypeHashEntry; 171 | 172 | typedef struct objMapHashKey 173 | { 174 | char extObjName[SYNCHDB_OBJ_NAME_SIZE]; 175 | char extObjType[SYNCHDB_OBJ_TYPE_SIZE]; 176 | } ObjMapHashKey; 177 | 178 | typedef struct objMapHashEntry 179 | { 180 | ObjMapHashKey key; 181 | char pgsqlObjName[SYNCHDB_OBJ_NAME_SIZE]; 182 | } ObjMapHashEntry; 183 | 184 | typedef struct transformExpressionHashKey 185 | { 186 | char extObjName[SYNCHDB_OBJ_NAME_SIZE]; 187 | } TransformExpressionHashKey; 188 | 189 | typedef struct transformExpressionHashEntry 190 | { 191 | TransformExpressionHashKey key; 192 | char pgsqlTransExpress[SYNCHDB_TRANSFORM_EXPRESSION_SIZE]; 193 | } TransformExpressionHashEntry; 194 | 195 | /* Function prototypes */ 196 | ConnectorType fc_get_connector_type(const char * connector); 197 | void fc_initFormatConverter(ConnectorType connectorType); 198 | void fc_deinitFormatConverter(ConnectorType connectorType); 199 | void fc_initDataCache(void); 200 | void fc_deinitDataCache(void); 201 | void fc_resetDataCache(void); 202 | bool fc_load_objmap(const char * name, ConnectorType connectorType); 203 | char * escapeSingleQuote(const char * in, bool addquote); 204 | int getPathElementString(Jsonb * jb, char * path, StringInfoData * strinfoout, bool removequotes); 205 | void remove_double_quotes(StringInfoData * str); 206 | bool find_exact_string_match(const char * line, const char * wordtofind); 207 | char * transform_object_name(const char * objid, const char * objtype); 208 | void splitIdString(char * id, char ** db, char ** schema, char ** table, bool usedb); 209 | int list_sort_cmp(const ListCell *a, const ListCell *b); 210 | PG_DDL * convert2PGDDL(DBZ_DDL * dbzddl, ConnectorType type); 211 | void updateSynchdbAttribute(DBZ_DDL * dbzddl, PG_DDL * pgddl, ConnectorType conntype, const char * name); 212 | PG_DML * convert2PGDML(DBZ_DML * dbzdml, ConnectorType type); 213 | 214 | #endif /* SYNCHDB_FORMAT_CONVERTER_H_ */ 215 | -------------------------------------------------------------------------------- /src/include/converter/olr_event_handler.h: -------------------------------------------------------------------------------- 1 | /* 2 | * olr_event_handler.h 3 | * 4 | * contains routines to process change events originated from 5 | * openlog replicator. 6 | */ 7 | 8 | #ifndef SYNCHDB_SRC_INCLUDE_CONVERTER_OLR_EVENT_HANDLER_H_ 9 | #define SYNCHDB_SRC_INCLUDE_CONVERTER_OLR_EVENT_HANDLER_H_ 10 | 11 | #define DBZ_LOG_MINING_FLUSH_TABLE "LOG_MINING_FLUSH" 12 | 13 | /* 14 | * OlrType 15 | * 16 | * enum that represents how openlog replicator could represent 17 | * a data value 18 | */ 19 | typedef enum _OlrType 20 | { 21 | /* OLR types */ 22 | OLRTYPE_UNDEF, 23 | OLRTYPE_NUMBER, 24 | OLRTYPE_STRING 25 | } OlrType; 26 | 27 | int fc_processOLRChangeEvent(const char * event, SynchdbStatistics * myBatchStats, 28 | const char * name, bool * sendconfirm, bool isfirst, bool islast); 29 | 30 | void unload_oracle_parser(void); 31 | 32 | 33 | 34 | #endif /* SYNCHDB_SRC_INCLUDE_CONVERTER_OLR_EVENT_HANDLER_H_ */ 35 | -------------------------------------------------------------------------------- /src/include/executor/replication_agent.h: -------------------------------------------------------------------------------- 1 | /* 2 | * replication_agent.h 3 | * 4 | * Header file for the SynchDB replication agent 5 | * 6 | * This file defines the data structures and function prototypes 7 | * used by the replication agent to handle DDL and DML operations 8 | * in PostgreSQL format. 9 | * 10 | * Key components: 11 | * - Structures for representing DDL and DML operations 12 | * - Function prototypes for executing DDL and DML operations 13 | * 14 | * Copyright (c) 2024 Hornetlabs Technology, Inc. 15 | * 16 | */ 17 | 18 | #ifndef SYNCHDB_REPLICATION_AGENT_H_ 19 | #define SYNCHDB_REPLICATION_AGENT_H_ 20 | 21 | #include "executor/tuptable.h" 22 | #include "synchdb/synchdb.h" 23 | 24 | /* Data structures representing PostgreSQL data formats */ 25 | typedef struct pg_ddl 26 | { 27 | char * ddlquery; /* to be fed into SPI*/ 28 | DdlType type; /* CREATE, DROP or ALTER...etc */ 29 | AlterSubType subtype; /* subtype for ALTER TABLE */ 30 | char * schema; /* name of PG schema */ 31 | char * tbname; /* name of PG table */ 32 | List * columns; /* list of PG_DDL_COLUMN */ 33 | } PG_DDL; 34 | 35 | /* 36 | * Structure to represent a PG column in a DDL event that is 37 | * sufficient to update the attribute table. It does not need 38 | * to contain full column information 39 | */ 40 | typedef struct pg_ddl_column 41 | { 42 | char * attname; 43 | char * atttype; 44 | int position; 45 | } PG_DDL_COLUMN; 46 | 47 | typedef struct pg_dml_column_value 48 | { 49 | char * value; /* string representation of column values that 50 | * is processed and ready to be used to built 51 | * into TupleTableSlot. 52 | */ 53 | Oid datatype; 54 | int position; /* position of this value's attribute in tupdesc */ 55 | } PG_DML_COLUMN_VALUE; 56 | 57 | typedef struct pg_dml 58 | { 59 | char * dmlquery; /* to be fed into SPI */ 60 | 61 | char op; 62 | Oid tableoid; 63 | int natts; /* number of columns of this pg table */ 64 | List * columnValuesBefore; /* list of PG_DML_COLUMN_VALUE */ 65 | List * columnValuesAfter; /* list of PG_DML_COLUMN_VALUE */ 66 | } PG_DML; 67 | 68 | /* Function prototypes */ 69 | int ra_executePGDDL(PG_DDL * pgddl, ConnectorType type); 70 | int ra_executePGDML(PG_DML * pgdml, ConnectorType type, SynchdbStatistics * myBatchStats); 71 | int ra_getConninfoByName(const char * name, ConnectionInfo * conninfo, char ** connector); 72 | int ra_executeCommand(const char * query); 73 | int ra_listConnInfoNames(char ** out, int * numout); 74 | char * ra_transformDataExpression(char * data, char * wkb, char * srid, char * expression); 75 | int ra_listObjmaps(const char * name, ObjectMap ** out, int * numout); 76 | 77 | void destroyPGDDL(PG_DDL * ddlinfo); 78 | void destroyPGDML(PG_DML * dmlinfo); 79 | 80 | #endif /* SYNCHDB_REPLICATION_AGENT_H_ */ 81 | -------------------------------------------------------------------------------- /src/include/olr/olr_client.h: -------------------------------------------------------------------------------- 1 | /* 2 | * olr_client.h 3 | * 4 | * Implementation of Openlog Replicator Client 5 | * 6 | * Copyright (c) Hornetlabs Technology, Inc. 7 | * 8 | */ 9 | 10 | #ifndef SYNCHDB_OLR_CLIENT_H_ 11 | #define SYNCHDB_OLR_CLIENT_H_ 12 | 13 | #include "postgres.h" 14 | #include "synchdb/synchdb.h" 15 | #include "utils/netio_utils.h" 16 | 17 | /** 18 | * RedoResponseCode - Enum representing response code from OLR 19 | */ 20 | typedef enum _ResponseCode 21 | { 22 | RES_READY = 0, 23 | RES_FAILED_START, 24 | RES_STARTING, 25 | RES_ALREADY_STARTED, 26 | RES_REPLICATE, 27 | RES_PAYLOAD, 28 | RES_INVALID_DATABASE, 29 | RES_INVALID_COMMAND 30 | } ResponseCode; 31 | 32 | int olr_client_start_or_cont_replication(char * source, bool which); 33 | int olr_client_init(const char * hostname, unsigned int port); 34 | void olr_client_shutdown(void); 35 | int olr_client_get_change(int myConnectorId, bool * dbzExitSignal, 36 | SynchdbStatistics * myBatchStats, bool * sendconfirm); 37 | void olr_client_set_scns(orascn scn, orascn c_scn, orascn c_idx); 38 | orascn olr_client_get_c_scn(void); 39 | orascn olr_client_get_scn(void); 40 | orascn olr_client_get_c_idx(void); 41 | int olr_client_confirm_scn(char * source); 42 | bool olr_client_write_scn_state(ConnectorType type, const char * name, const char * srcdb, bool force); 43 | bool olr_client_init_scn_state(ConnectorType type, const char * name, const char * srcdb); 44 | bool olr_client_get_connect_status(void); 45 | bool olr_client_write_snapshot_state(ConnectorType type, const char * name, const char * dstdb, bool done); 46 | bool olr_client_read_snapshot_state(ConnectorType type, const char * name, const char * dstdb, bool * done); 47 | 48 | #endif /* SYNCHDB_OLR_CLIENT_H_ */ 49 | -------------------------------------------------------------------------------- /src/include/utils/netio_utils.h: -------------------------------------------------------------------------------- 1 | /* 2 | * netio_utils.h 3 | * 4 | * Implementation of general network IO routines 5 | * 6 | * Copyright (c) Hornetlabs Technology, Inc. 7 | * 8 | */ 9 | #ifndef SYNCHDB_NETIO_UTILS_H_ 10 | #define SYNCHDB_NETIO_UTILS_H_ 11 | 12 | #include "synchdb/synchdb.h" 13 | 14 | #define NETIO_NODATA -1 15 | #define NETIO_PEER_DISCONNECTED -2 16 | #define NETIO_FATAL_ERROR -3 17 | 18 | typedef struct 19 | { 20 | int sockfd; 21 | char host[SYNCHDB_CONNINFO_HOSTNAME_SIZE]; 22 | int port; 23 | bool is_connected; 24 | int errcode; 25 | } NetioContext; 26 | 27 | int netio_connect(NetioContext *ctx, const char *host, int port); 28 | ssize_t netio_write(NetioContext *ctx, const void *buf, size_t len); 29 | ssize_t netio_read(NetioContext *ctx, StringInfoData * buf, int size); 30 | void netio_disconnect(NetioContext *ctx); 31 | 32 | #endif /* SYNCHDB_NETIO_UTILS_H_ */ 33 | -------------------------------------------------------------------------------- /src/monitoring/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | services: 4 | prometheus: 5 | image: prom/prometheus:latest 6 | container_name: prometheus 7 | #network_mode: "host" 8 | volumes: 9 | - ./prometheus.yml:/etc/prometheus/prometheus.yml 10 | networks: 11 | - synchdbnet 12 | - pubnet 13 | ports: 14 | - "9090:9090" 15 | 16 | grafana: 17 | image: grafana/grafana:latest 18 | container_name: grafana 19 | volumes: 20 | - ./grafana-provisioning:/etc/grafana/provisioning 21 | networks: 22 | - synchdbnet 23 | - pubnet 24 | ports: 25 | - "3000:3000" 26 | depends_on: 27 | - prometheus 28 | 29 | networks: 30 | synchdbnet: 31 | name: synchdbnet 32 | driver: bridge 33 | internal: true 34 | pubnet: 35 | name: pubnet 36 | driver: bridge 37 | -------------------------------------------------------------------------------- /src/monitoring/grafana-provisioning/dashboards/dashboards.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | providers: 3 | - name: 'synchdb' 4 | orgId: 1 5 | folder: 'SynchDB' 6 | type: file 7 | disableDeletion: false 8 | allowUiUpdates: true 9 | updateIntervalSeconds: 30 10 | options: 11 | # IMPORTANT: point to the folder where your JSONs live 12 | path: /etc/grafana/provisioning/dashboards 13 | foldersFromFilesStructure: true 14 | -------------------------------------------------------------------------------- /src/monitoring/grafana-provisioning/datasources/datasource.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | datasources: 3 | - name: Prometheus 4 | type: prometheus 5 | uid: aepv5908xcwe8f # <-- matches template's datasource uid 6 | access: proxy 7 | orgId: 1 8 | url: http://prometheus:9090 9 | isDefault: true 10 | editable: true 11 | 12 | -------------------------------------------------------------------------------- /src/monitoring/jmx-conf/jmxacc.file: -------------------------------------------------------------------------------- 1 | # jmxremote.access 2 | # Format: username access_level 3 | 4 | monitorRole readonly 5 | controlRole readwrite 6 | 7 | -------------------------------------------------------------------------------- /src/monitoring/jmx-conf/jmxexport.conf: -------------------------------------------------------------------------------- 1 | startDelaySeconds: 0 2 | ssl: false 3 | lowercaseOutputName: true 4 | lowercaseOutputLabelNames: true 5 | 6 | rules: 7 | - pattern: ".*" 8 | 9 | -------------------------------------------------------------------------------- /src/monitoring/jmx-conf/jmxpwd.file: -------------------------------------------------------------------------------- 1 | # jmxremote.password 2 | # Format: username password 3 | monitorRole mySecretPassword 4 | controlRole anotherSecretPassword 5 | -------------------------------------------------------------------------------- /src/monitoring/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s 3 | scrape_timeout: 10s 4 | evaluation_interval: 15s 5 | 6 | alerting: 7 | alertmanagers: 8 | - static_configs: 9 | - targets: [] 10 | 11 | scrape_configs: 12 | - job_name: 'prometheus' 13 | static_configs: 14 | - targets: 15 | - localhost:9090 16 | labels: 17 | app: prometheus 18 | 19 | - job_name: 'debezium-jvm' 20 | static_configs: 21 | - targets: 22 | - synchdb:9404 # replace with SynchDB MySQL connector endpoint 23 | - synchdb:9405 # replace with SynchDB SQL Server connector endpoint 24 | - synchdb:9406 # replace with SynchDB Oracle23ai connector endpoint 25 | - synchdb:9407 # replace with SynchDB Oracle19c connector endpoint 26 | -------------------------------------------------------------------------------- /src/test/pytests/hammerdb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hornetlabs/synchdb/7433f60bba5133f17c33d29ac3dc5c705c3db63c/src/test/pytests/hammerdb/__init__.py -------------------------------------------------------------------------------- /src/test/pytests/hammerdb/conftest.py: -------------------------------------------------------------------------------- 1 | pytest_plugins = ["conftest"] 2 | -------------------------------------------------------------------------------- /src/test/pytests/synchdbtests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hornetlabs/synchdb/7433f60bba5133f17c33d29ac3dc5c705c3db63c/src/test/pytests/synchdbtests/__init__.py -------------------------------------------------------------------------------- /src/test/pytests/synchdbtests/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import tempfile 4 | import time 5 | import shutil 6 | import psycopg2 7 | import pytest 8 | 9 | PG_PORT = "5432" 10 | PG_HOST = "127.0.0.1" 11 | OLRVER = "1.3.0" 12 | 13 | @pytest.fixture(scope="session") 14 | def pg_instance(request): 15 | #temp_dir = tempfile.mkdtemp(prefix="synchdb_pg_") 16 | temp_dir = "synchdb_testdir" 17 | data_dir = os.path.join(temp_dir, "data") 18 | log_file = os.path.join(temp_dir, "logfile") 19 | 20 | # remove dir if exists 21 | if os.path.isdir(temp_dir): 22 | shutil.rmtree(temp_dir) 23 | 24 | # Init DB 25 | subprocess.run(["initdb", "-D", data_dir], check=True, stdout=subprocess.DEVNULL) 26 | 27 | conf_file = os.path.join(data_dir, "postgresql.conf") 28 | with open(conf_file, "a") as f: 29 | # f.write("\nlog_min_messages = debug1\n") 30 | f.write("\nsynchdb.naptime = 10\n") 31 | f.write("\nsynchdb.dbz_batch_size= 16384\n") 32 | f.write("\nsynchdb.dbz_queue_size= 32768\n") 33 | f.write("\nsynchdb.jvm_max_heap_size= 2048\n") 34 | f.write("\nlog_min_messages = debug1\n") 35 | 36 | # Start Postgres 37 | #print("[setup] setting up postgresql for test...") 38 | subprocess.run([ 39 | "pg_ctl", "-D", data_dir, "-o", f"-p {PG_PORT}", "-l", log_file, "start" 40 | ], check=True) 41 | 42 | # Wait for startup 43 | for _ in range(10): 44 | try: 45 | conn = psycopg2.connect(host=PG_HOST, dbname="postgres", port=PG_PORT) 46 | conn.close() 47 | break 48 | except Exception as e: 49 | time.sleep(1) 50 | else: 51 | with open(log_file) as f: 52 | print(f.read()) 53 | raise RuntimeError("PostgreSQL failed to start") 54 | 55 | # Yield PostgreSQL runtime info 56 | yield { 57 | "host": PG_HOST, 58 | "port": PG_PORT, 59 | "dbname": "postgres", 60 | "temp_dir": temp_dir, 61 | "data_dir": data_dir, 62 | "log_file": log_file 63 | } 64 | 65 | # do not remove postgresql server dir if failed 66 | if request.session.testsfailed > 0: 67 | print(f"test failed: postgresql server dir and log retained at {data_dir} and {log_file}") 68 | subprocess.run(["pg_ctl", "-D", data_dir, "stop", "-m", "immediate"], check=True, stdout=subprocess.DEVNULL) 69 | else: 70 | subprocess.run(["pg_ctl", "-D", data_dir, "stop", "-m", "immediate"], check=True, stdout=subprocess.DEVNULL) 71 | shutil.rmtree(temp_dir) 72 | 73 | @pytest.fixture(scope="session") 74 | def pg_cursor(pg_instance): 75 | # Establish one shared connection + cursor 76 | conn = psycopg2.connect( 77 | host=pg_instance["host"], 78 | dbname=pg_instance["dbname"], 79 | port=pg_instance["port"] 80 | ) 81 | conn.autocommit = True 82 | cur = conn.cursor() 83 | 84 | # Create extension (only once) 85 | cur.execute("CREATE EXTENSION IF NOT EXISTS synchdb CASCADE;") 86 | 87 | yield cur 88 | #print("tearing down pg_cursor..") 89 | cur.close() 90 | conn.close() 91 | 92 | def pytest_addoption(parser): 93 | parser.addoption( 94 | "--dbvendor", action="store", default="mysql", 95 | help="Source database vendor to test against (mysql, sqlserver, oracle)" 96 | ) 97 | parser.addoption( 98 | "--tpccmode", action="store", default="serial", 99 | help="tpcc running mode, serial or parallel" 100 | ) 101 | 102 | @pytest.fixture(scope="session") 103 | def dbvendor(pytestconfig): 104 | return pytestconfig.getoption("dbvendor") 105 | 106 | @pytest.fixture(scope="session") 107 | def tpccmode(pytestconfig): 108 | return pytestconfig.getoption("tpccmode") 109 | 110 | @pytest.fixture(scope="session", autouse=True) 111 | def setup_remote_instance(dbvendor, request): 112 | env = os.environ.copy() 113 | env["DBTYPE"] = dbvendor 114 | env["WHICH"] = "n/a" 115 | env["OLRVER"] = OLRVER 116 | env["INTERNAL"] = "0" 117 | 118 | #print(f"[setup] setting up heterogeneous database {dbvendor}...") 119 | subprocess.run(["bash", "./ci/setup-remotedbs.sh"], check=True, env=env, stdout=subprocess.DEVNULL) 120 | 121 | yield 122 | 123 | teardown_remote_instance(dbvendor) 124 | 125 | @pytest.fixture(scope="session") 126 | def hammerdb(dbvendor): 127 | env = os.environ.copy() 128 | env["DBTYPE"] = "hammerdb" 129 | env["WHICH"] = dbvendor 130 | 131 | subprocess.run(["bash", "./ci/setup-remotedbs.sh"], check=True, env=env, stdout=subprocess.DEVNULL) 132 | subprocess.run(["docker", "network", "create", "tpccnet"], check=True, stdout=subprocess.DEVNULL) 133 | subprocess.run(["docker", "network", "connect", "tpccnet", f"{dbvendor}"], check=True, stdout=subprocess.DEVNULL) 134 | subprocess.run(["docker", "network", "connect", "tpccnet", "hammerdb"], check=True, stdout=subprocess.DEVNULL) 135 | 136 | yield 137 | 138 | subprocess.run(["docker", "network", "disconnect", "tpccnet", f"{dbvendor}"], check=True, stdout=subprocess.DEVNULL) 139 | subprocess.run(["docker", "network", "disconnect", "tpccnet", "hammerdb"], check=True, stdout=subprocess.DEVNULL) 140 | subprocess.run(["docker", "network", "rm", "tpccnet"], check=True, stdout=subprocess.DEVNULL) 141 | teardown_remote_instance("hammerdb") 142 | 143 | def teardown_remote_instance(dbvendor): 144 | env = os.environ.copy() 145 | env["DBTYPE"] = dbvendor 146 | 147 | subprocess.run(["bash", "./ci/teardown-remotedbs.sh"], check=True, env=env, stdout=subprocess.DEVNULL) 148 | 149 | 150 | 151 | -------------------------------------------------------------------------------- /src/test/pytests/synchdbtests/t/test_002_ddl.py: -------------------------------------------------------------------------------- 1 | import common 2 | from common import run_pg_query, run_pg_query_one, run_remote_query, create_synchdb_connector, getConnectorName, getDbname, verify_default_type_mappings 3 | 4 | def test_CreateTable(pg_cursor, dbvendor): 5 | assert True 6 | 7 | def test_CreateTableWithError(pg_cursor, dbvendor): 8 | assert True 9 | 10 | def test_CreateTableWithNoPK(pg_cursor, dbvendor): 11 | assert True 12 | 13 | def test_DropTable(pg_cursor, dbvendor): 14 | assert True 15 | 16 | def test_DropTableWithError(pg_cursor, dbvendor): 17 | assert True 18 | 19 | def test_AlterTableAlterColumn(pg_cursor, dbvendor): 20 | assert True 21 | 22 | def test_AlterTableAlterColumnWithError(pg_cursor, dbvendor): 23 | assert True 24 | 25 | def test_AlterTableAlterColumnAddPK(pg_cursor, dbvendor): 26 | assert True 27 | 28 | def test_AlterTableiAddColumn(pg_cursor, dbvendor): 29 | assert True 30 | 31 | def test_AlterTableiAddColumnWithError(pg_cursor, dbvendor): 32 | assert True 33 | 34 | def test_AlterTableAddColumnAddPK(pg_cursor, dbvendor): 35 | assert True 36 | 37 | def test_AlterTableDropColumn(pg_cursor, dbvendor): 38 | assert True 39 | 40 | def test_AlterTableDropColumnWithError(pg_cursor, dbvendor): 41 | assert True 42 | 43 | def test_AlterTableDropColumnDropPK(pg_cursor, dbvendor): 44 | assert True 45 | -------------------------------------------------------------------------------- /src/test/pytests/synchdbtests/t/test_004_dml.py: -------------------------------------------------------------------------------- 1 | import common 2 | import time 3 | from common import run_pg_query, run_pg_query_one, run_remote_query, create_synchdb_connector, getConnectorName, getDbname, create_and_start_synchdb_connector, stop_and_delete_synchdb_connector, drop_default_pg_schema 4 | 5 | def test_Insert(pg_cursor, dbvendor): 6 | name = getConnectorName(dbvendor) + "_insert" 7 | dbname = getDbname(dbvendor).lower() 8 | 9 | result = create_and_start_synchdb_connector(pg_cursor, dbvendor, name, "no_data") 10 | assert result == 0 11 | 12 | if dbvendor == "mysql": 13 | query = """ 14 | CREATE TABLE inserttable( 15 | a INT PRIMARY KEY, 16 | b VARCHAR(255)); 17 | """ 18 | elif dbvendor == "sqlserver": 19 | query = """ 20 | CREATE TABLE inserttable( 21 | a INT NOT NULL PRIMARY KEY, 22 | b VARCHAR(255)); 23 | EXEC sys.sp_cdc_enable_table @source_schema = 'dbo', 24 | @source_name = 'inserttable', @role_name = NULL, 25 | @supports_net_changes = 0; 26 | """ 27 | else: 28 | query = """ 29 | CREATE TABLE inserttable( 30 | a NUMBER PRIMARY KEY, 31 | b VARCHAR(255)); 32 | """ 33 | 34 | run_remote_query(dbvendor, query) 35 | if dbvendor == "oracle": 36 | time.sleep(30) 37 | else: 38 | time.sleep(10) 39 | 40 | out=run_remote_query(dbvendor, "INSERT INTO inserttable (a, b) VALUES (1, 'Hello')") 41 | out=run_remote_query(dbvendor, "COMMIT") 42 | if dbvendor == "oracle": 43 | time.sleep(75) 44 | else: 45 | time.sleep(15) 46 | 47 | extrows = run_remote_query(dbvendor, f"SELECT a, b FROM inserttable") 48 | rows = run_pg_query(pg_cursor, f"SELECT a, b FROM {dbname}.inserttable") 49 | assert len(extrows) > 0 50 | assert len(rows) > 0 51 | assert len(extrows) == len(rows) 52 | 53 | for row, extrow in zip(rows, extrows): 54 | assert int(row[0]) == int(extrow[0]) 55 | assert str(row[1]) == str(extrow[1]) 56 | 57 | extrows = run_remote_query(dbvendor, f"DROP TABLE inserttable") 58 | stop_and_delete_synchdb_connector(pg_cursor, name) 59 | drop_default_pg_schema(pg_cursor, dbvendor) 60 | 61 | def test_InsertWithError(pg_cursor, dbvendor): 62 | assert True 63 | 64 | def test_Update(pg_cursor, dbvendor): 65 | name = getConnectorName(dbvendor) + "_update" 66 | dbname = getDbname(dbvendor).lower() 67 | 68 | result = create_and_start_synchdb_connector(pg_cursor, dbvendor, name, "no_data") 69 | assert result == 0 70 | 71 | if dbvendor == "mysql": 72 | query = """ 73 | CREATE TABLE updatetable( 74 | a INT PRIMARY KEY, 75 | b VARCHAR(255)); 76 | """ 77 | elif dbvendor == "sqlserver": 78 | query = """ 79 | CREATE TABLE updatetable( 80 | a INT NOT NULL PRIMARY KEY, 81 | b VARCHAR(255)); 82 | EXEC sys.sp_cdc_enable_table @source_schema = 'dbo', 83 | @source_name = 'updatetable', @role_name = NULL, 84 | @supports_net_changes = 0; 85 | """ 86 | else: 87 | query = """ 88 | CREATE TABLE updatetable( 89 | a NUMBER PRIMARY KEY, 90 | b VARCHAR(255)); 91 | """ 92 | 93 | run_remote_query(dbvendor, query) 94 | if dbvendor == "oracle" or dbvendor == "olr": 95 | run_remote_query(dbvendor, "ALTER TABLE updatetable ADD SUPPLEMENTAL LOG DATA (ALL) COLUMNS") 96 | time.sleep(30) 97 | else: 98 | time.sleep(10) 99 | 100 | run_remote_query(dbvendor, "INSERT INTO updatetable (a, b) VALUES (1, 'Hello')") 101 | run_remote_query(dbvendor, "UPDATE updatetable SET a = 2") 102 | run_remote_query(dbvendor, "UPDATE updatetable SET b = 'olleH'") 103 | run_remote_query(dbvendor, "COMMIT") 104 | 105 | if dbvendor == "oracle": 106 | time.sleep(75) 107 | else: 108 | time.sleep(10) 109 | 110 | extrows = run_remote_query(dbvendor, f"SELECT a, b FROM updatetable") 111 | rows = run_pg_query(pg_cursor, f"SELECT a, b FROM {dbname}.updatetable") 112 | assert len(extrows) > 0 113 | assert len(rows) > 0 114 | assert len(extrows) == len(rows) 115 | 116 | for row, extrow in zip(rows, extrows): 117 | assert int(row[0]) == int(extrow[0]) 118 | assert str(row[1]) == str(extrow[1]) 119 | 120 | extrows = run_remote_query(dbvendor, f"DROP TABLE updatetable") 121 | stop_and_delete_synchdb_connector(pg_cursor, name) 122 | drop_default_pg_schema(pg_cursor, dbvendor) 123 | 124 | def test_UpdateWithError(pg_cursor, dbvendor): 125 | assert True 126 | 127 | def test_Delete(pg_cursor, dbvendor): 128 | name = getConnectorName(dbvendor) + "_delete" 129 | dbname = getDbname(dbvendor).lower() 130 | 131 | result = create_and_start_synchdb_connector(pg_cursor, dbvendor, name, "no_data") 132 | assert result == 0 133 | 134 | if dbvendor == "mysql": 135 | query = """ 136 | CREATE TABLE deletetable( 137 | a INT PRIMARY KEY, 138 | b VARCHAR(255)); 139 | """ 140 | elif dbvendor == "sqlserver": 141 | query = """ 142 | CREATE TABLE deletetable( 143 | a INT NOT NULL PRIMARY KEY, 144 | b VARCHAR(255)); 145 | EXEC sys.sp_cdc_enable_table @source_schema = 'dbo', 146 | @source_name = 'deletetable', @role_name = NULL, 147 | @supports_net_changes = 0; 148 | """ 149 | else: 150 | query = """ 151 | CREATE TABLE deletetable( 152 | a NUMBER PRIMARY KEY, 153 | b VARCHAR(255)); 154 | """ 155 | 156 | run_remote_query(dbvendor, query) 157 | if dbvendor == "oracle" or dbvendor == "olr": 158 | run_remote_query(dbvendor, "ALTER TABLE deletetable ADD SUPPLEMENTAL LOG DATA (ALL) COLUMNS") 159 | time.sleep(30) 160 | else: 161 | time.sleep(10) 162 | 163 | run_remote_query(dbvendor, "INSERT INTO deletetable (a, b) VALUES (1, 'Hello')") 164 | run_remote_query(dbvendor, "INSERT INTO deletetable (a, b) VALUES (2, 'SynchDB')") 165 | run_remote_query(dbvendor, "INSERT INTO deletetable (a, b) VALUES (3, 'Pytest')") 166 | run_remote_query(dbvendor, "COMMIT") 167 | 168 | if dbvendor == "oracle": 169 | time.sleep(75) 170 | else: 171 | time.sleep(15) 172 | 173 | extrows = run_remote_query(dbvendor, f"SELECT a, b FROM deletetable") 174 | rows = run_pg_query(pg_cursor, f"SELECT a, b FROM {dbname}.deletetable") 175 | assert len(extrows) > 0 and len(extrows) == 3 176 | assert len(rows) > 0 and len(rows) == 3 177 | assert len(extrows) == len(rows) 178 | 179 | for row, extrow in zip(rows, extrows): 180 | assert int(row[0]) == int(extrow[0]) 181 | assert str(row[1]) == str(extrow[1]) 182 | 183 | run_remote_query(dbvendor, "DELETE FROM deletetable WHERE a = 2") 184 | if dbvendor == "oracle": 185 | time.sleep(75) 186 | else: 187 | time.sleep(15) 188 | 189 | extrows = run_remote_query(dbvendor, f"SELECT a, b FROM deletetable") 190 | rows = run_pg_query(pg_cursor, f"SELECT a, b FROM {dbname}.deletetable") 191 | assert len(rows) > 0 and len(rows) == 2 192 | assert len(extrows) > 0 and len(extrows) == 2 193 | assert len(extrows) == len(rows) 194 | 195 | for row, extrow in zip(rows, extrows): 196 | assert int(row[0]) == int(extrow[0]) 197 | assert str(row[1]) == str(extrow[1]) 198 | 199 | extrows = run_remote_query(dbvendor, f"DROP TABLE deletetable") 200 | stop_and_delete_synchdb_connector(pg_cursor, name) 201 | drop_default_pg_schema(pg_cursor, dbvendor) 202 | 203 | def test_DeleteWithError(pg_cursor, dbvendor): 204 | assert True 205 | 206 | def test_SPIInsert(pg_cursor, dbvendor): 207 | assert True 208 | 209 | def test_SPIInsertWithError(pg_cursor, dbvendor): 210 | assert True 211 | 212 | def test_SPIUpdate(pg_cursor, dbvendor): 213 | assert True 214 | 215 | def test_SPIUpdateWithError(pg_cursor, dbvendor): 216 | assert True 217 | 218 | def test_SPIDelete(pg_cursor, dbvendor): 219 | assert True 220 | 221 | def test_SPIDeleteWithError(pg_cursor, dbvendor): 222 | assert True 223 | 224 | -------------------------------------------------------------------------------- /src/test/pytests/synchdbtests/t/test_005_utility.py: -------------------------------------------------------------------------------- 1 | import common 2 | from common import run_pg_query, run_pg_query_one, run_remote_query, create_synchdb_connector, getConnectorName, getDbname, verify_default_type_mappings 3 | 4 | def test_ConnectorPause(pg_cursor, dbvendor): 5 | assert True 6 | 7 | def test_ConnectorPauseWithError(pg_cursor, dbvendor): 8 | assert True 9 | 10 | def test_ConnectorResume(pg_cursor, dbvendor): 11 | assert True 12 | 13 | def test_ConnectorResumeWithError(pg_cursor, dbvendor): 14 | assert True 15 | 16 | def test_GetStateView(pg_cursor, dbvendor): 17 | assert True 18 | 19 | def test_GetStatsView(pg_cursor, dbvendor): 20 | assert True 21 | 22 | def test_ResetStatsView(pg_cursor, dbvendor): 23 | assert True 24 | 25 | def test_SetOffset(pg_cursor, dbvendor): 26 | assert True 27 | 28 | def test_LogJavaMeminfo(pg_cursor, dbvendor): 29 | assert True 30 | 31 | def test_CreateConnectorInfoMax(pg_cursor, dbvendor): 32 | assert True 33 | 34 | def test_OffsetFileRemoval(pg_cursor, dbvendor): 35 | assert True 36 | 37 | def test_SchemaHistoryFileRemoval(pg_cursor, dbvendor): 38 | assert True 39 | -------------------------------------------------------------------------------- /src/test/regress/results/regression.diffs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hornetlabs/synchdb/7433f60bba5133f17c33d29ac3dc5c705c3db63c/src/test/regress/results/regression.diffs -------------------------------------------------------------------------------- /src/test/regress/sql/synchdb.sql: -------------------------------------------------------------------------------- 1 | CREATE EXTENSION synchdb CASCADE; 2 | 3 | \d 4 | 5 | SELECT synchdb_add_conninfo('mysqlconn','127.0.0.1', 3306, 'mysqluser', 'mysqlpwd', 'inventory', 'postgres', 'inventory.orders,inventory.customers', 'null', 'mysql'); 6 | SELECT synchdb_add_conninfo('sqlserverconn','127.0.0.1', 1433, 'sa', 'Password!', 'testDB', 'postgres', 'null', 'null', 'sqlserver'); 7 | SELECT synchdb_add_conninfo('oracleconn','127.0.0.1', 1521, 'c##dbzuser', 'dbz', 'mydb', 'postgres', 'null', 'null', 'oracle'); 8 | SELECT synchdb_add_conninfo('errorconn','127.0.0.1', 1521, 'c##dbzuser', 'dbz', 'mydb', 'postgres', 'null', 'null', 'nonexist'); 9 | 10 | SELECT name, isactive, data->'hostname' AS hostname, data->'port' AS port, data->'user' AS user, data->'srcdb' AS srcdb, data->'table' AS table, data->'snapshottable' AS snapshottable, data->'connector' AS connector FROM synchdb_conninfo; 11 | 12 | SELECT synchdb_add_extra_conninfo('mysqlconn', 'verufy_ca', 'keystore1', 'keystorepass', 'truststore1', 'truststorepass'); 13 | SELECT synchdb_add_extra_conninfo('sqlserverconn', 'verufy_ca', 'keystore2', 'keystorepass', 'truststore2', 'truststorepass'); 14 | SELECT synchdb_add_extra_conninfo('oracleconn', 'verufy_ca', 'keystore3', 'keystorepass', 'truststore3', 'truststorepass'); 15 | 16 | SELECT synchdb_del_extra_conninfo('mysqlconn'); 17 | SELECT synchdb_del_extra_conninfo('sqlserverconn'); 18 | SELECT synchdb_del_extra_conninfo('oracleconn'); 19 | 20 | SELECT name, isactive, data->'hostname' AS hostname, data->'port' AS port, data->'user' AS user, data->'srcdb' AS srcdb, data->'table' AS table, data->'snapshottable' AS snapshottable, data->'connector' AS connector FROM synchdb_conninfo; 21 | 22 | SELECT data->'ssl_mode' AS ssl_mode, data->'ssl_keystore' AS ssl_keystore, data->'ssl_truststore' AS ssl_truststore FROM synchdb_conninfo; 23 | 24 | SELECT synchdb_add_objmap('mysqlconn', 'table', 'ext_db1.ext_table1', 'pg_table1'); 25 | SELECT synchdb_add_objmap('mysqlconn', 'column', 'ext_db1.ext_table1.ext_column1', 'pg_column1'); 26 | SELECT synchdb_add_objmap('mysqlconn', 'datatype', 'int', 'bigint'); 27 | SELECT synchdb_add_objmap('mysqlconn', 'datatype', 'ext_db1.ext_table1.ext_column1', 'text'); 28 | SELECT synchdb_add_objmap('mysqlconn', 'transform', 'ext_db1.ext_table1.ext_column1', '''>>>>>'' || ''%d'' || ''<<<<<'''); 29 | 30 | SELECT synchdb_add_objmap('sqlserverconn', 'table', 'ext_db1.ext_table2', 'pg_table2'); 31 | SELECT synchdb_add_objmap('sqlserverconn', 'column', 'ext_db1.ext_table2.ext_column1', 'pg_column2'); 32 | SELECT synchdb_add_objmap('sqlserverconn', 'datatype', 'nchar', 'test'); 33 | SELECT synchdb_add_objmap('sqlserverconn', 'datatype', 'ext_db1.ext_table2.ext_column1', 'datetime'); 34 | SELECT synchdb_add_objmap('sqlserverconn', 'transform', 'ext_db1.ext_table2.ext_column1', '''>>>>>'' || ''%d'' || ''<<<<<'''); 35 | 36 | SELECT synchdb_add_objmap('oracleconn', 'table', 'ext_db1.ext_table3', 'pg_table3'); 37 | SELECT synchdb_add_objmap('oracleconn', 'column', 'ext_db1.ext_table3.ext_column1', 'pg_column3'); 38 | SELECT synchdb_add_objmap('oracleconn', 'datatype', 'number', 'bigint'); 39 | SELECT synchdb_add_objmap('oracleconn', 'datatype', 'ext_db1.ext_table3.ext_column1', 'varchar'); 40 | SELECT synchdb_add_objmap('oracleconn', 'transform', 'ext_db1.ext_table3.ext_column1', '''>>>>>'' || ''%d'' || ''<<<<<'''); 41 | 42 | SELECT synchdb_add_objmap('oracleconn', 'notexit', 'notexist', 'notexist'); 43 | 44 | SELECT * FROM synchdb_objmap; 45 | 46 | SELECT synchdb_del_objmap('mysqlconn', 'table', 'ext_db1.ext_table1'); 47 | SELECT synchdb_del_objmap('mysqlconn', 'column', 'ext_db1.ext_table1.ext_column1'); 48 | SELECT synchdb_del_objmap('mysqlconn', 'datatype', 'int'); 49 | SELECT synchdb_del_objmap('mysqlconn', 'datatype', 'ext_db1.ext_table1.ext_column1'); 50 | SELECT synchdb_del_objmap('mysqlconn', 'transform', 'ext_db1.ext_table1.ext_column1'); 51 | 52 | SELECT synchdb_del_objmap('sqlserverconn', 'table', 'ext_db1.ext_table2'); 53 | SELECT synchdb_del_objmap('sqlserverconn', 'column', 'ext_db1.ext_table2.ext_column1'); 54 | SELECT synchdb_del_objmap('sqlserverconn', 'datatype', 'nchar'); 55 | SELECT synchdb_del_objmap('sqlserverconn', 'datatype', 'ext_db1.ext_table2.ext_column1'); 56 | SELECT synchdb_del_objmap('sqlserverconn', 'transform', 'ext_db1.ext_table2.ext_column1'); 57 | 58 | SELECT synchdb_del_objmap('oracleconn', 'table', 'ext_db1.ext_table3'); 59 | SELECT synchdb_del_objmap('oracleconn', 'column', 'ext_db1.ext_table3.ext_column1'); 60 | SELECT synchdb_del_objmap('oracleconn', 'datatype', 'number'); 61 | SELECT synchdb_del_objmap('oracleconn', 'datatype', 'ext_db1.ext_table3.ext_column1'); 62 | SELECT synchdb_del_objmap('oracleconn', 'transform', 'ext_db1.ext_table3.ext_column1'); 63 | 64 | SELECT * FROM synchdb_objmap; 65 | 66 | SELECT synchdb_del_conninfo('mysqlconn'); 67 | SELECT synchdb_del_conninfo('sqlserverconn'); 68 | SELECT synchdb_del_conninfo('oracleconn'); 69 | 70 | SELECT name, isactive, data->'hostname' AS hostname, data->'port' AS port, data->'user' AS user, data->'srcdb' AS srcdb, data->'table' AS table, data->'snapshottable' AS snapshottable, data->'connector' AS connector FROM synchdb_conninfo; 71 | SELECT * FROM synchdb_objmap; 72 | 73 | DROP EXTENSION synchdb; 74 | -------------------------------------------------------------------------------- /src/test/scripts/db_config.conf: -------------------------------------------------------------------------------- 1 | #----------------------------------------------------------------------------- 2 | # MySQL Connection Settings 3 | #----------------------------------------------------------------------------- 4 | # Database user credentials and connection details 5 | DB_USER="root" # MySQL username (root or user with sufficient privileges) 6 | DB_PASS="mysqlpwdroot" # MySQL password for authentication 7 | DB_HOST="127.0.0.1" # Database host (localhost for local Docker container) 8 | DB_PORT="3306" # MySQL port (default is 3306) 9 | DB_NAME="localtest" # Target database name 10 | 11 | #----------------------------------------------------------------------------- 12 | # Performance Configuration 13 | #----------------------------------------------------------------------------- 14 | # Parallel Processing Settings 15 | NUM_INSTANCES=10 # Number of parallel processes to run 16 | # Recommended: Leave 2 cores free for system operations 17 | # Example: On a 12-core system, use 10 instances 18 | 19 | # Table Creation Settings 20 | TABLES_PER_INSTANCE=100 # Number of tables each instance will create 21 | # Total tables = NUM_INSTANCES × TABLES_PER_INSTANCE 22 | # Example: 10 instances × 100 tables = 1,000 total tables 23 | 24 | # Data Population Settings 25 | RECORDS_PER_TABLE=100 # Number of records to insert into each table 26 | # Total records = Total tables × RECORDS_PER_TABLE 27 | # Example: 1,000 tables × 100 records = 100,000 total records 28 | 29 | # Batch Processing Settings 30 | BATCH_SIZE=500 # Number of operations to group in one SQL transaction 31 | # Larger batch size = faster but uses more memory 32 | # Recommended: 500-1000 for optimal performance 33 | 34 | #----------------------------------------------------------------------------- 35 | # Database Management 36 | #----------------------------------------------------------------------------- 37 | RECREATE_DATABASE=false # Database initialization option: 38 | # true = Drop and recreate database if it exists 39 | # false = Use existing database if it exists 40 | 41 | RECREATE_TABLES=false # Table initialization option: 42 | # true = Drop and recreate tables if they exist 43 | # false = Skip existing tables and create only new ones 44 | 45 | #----------------------------------------------------------------------------- 46 | # Performance Impact Guidelines 47 | #----------------------------------------------------------------------------- 48 | # 1. NUM_INSTANCES: Affects CPU usage and parallel processing capability 49 | # - Higher value = faster processing but more system load 50 | # - Lower value = slower processing but less system load 51 | # 52 | # 2. BATCH_SIZE: Affects memory usage and transaction speed 53 | # - Higher value = faster but more memory intensive 54 | # - Lower value = slower but less memory intensive 55 | # 56 | # 3. RECORDS_PER_TABLE: Affects total execution time and database size 57 | # - Consider available disk space and time constraints 58 | # 59 | # 4. Total Resource Calculation: 60 | # - Total Tables = NUM_INSTANCES × TABLES_PER_INSTANCE 61 | # - Total Records = Total Tables × RECORDS_PER_TABLE 62 | # - Estimated Size = Total Records × ~100 bytes per record -------------------------------------------------------------------------------- /synchdb--1.0.sql: -------------------------------------------------------------------------------- 1 | --complain if script is sourced in psql, rather than via CREATE EXTENSION 2 | \echo Use "CREATE EXTENSION synchdb" to load this file. \quit 3 | 4 | CREATE OR REPLACE FUNCTION synchdb_start_engine_bgw(name) RETURNS int 5 | AS '$libdir/synchdb', 'synchdb_start_engine_bgw' 6 | LANGUAGE C IMMUTABLE STRICT; 7 | 8 | CREATE OR REPLACE FUNCTION synchdb_start_engine_bgw(name, name) RETURNS int 9 | AS '$libdir/synchdb', 'synchdb_start_engine_bgw_snapshot_mode' 10 | LANGUAGE C IMMUTABLE STRICT; 11 | 12 | CREATE OR REPLACE FUNCTION synchdb_stop_engine_bgw(name) RETURNS int 13 | AS '$libdir/synchdb' 14 | LANGUAGE C IMMUTABLE STRICT; 15 | 16 | CREATE OR REPLACE FUNCTION synchdb_get_state() RETURNS SETOF record 17 | AS '$libdir/synchdb' 18 | LANGUAGE C IMMUTABLE STRICT; 19 | 20 | CREATE VIEW synchdb_state_view AS SELECT * FROM synchdb_get_state() AS (name text, connector_type text, pid int, stage text, state text, err text, last_dbz_offset text); 21 | 22 | CREATE OR REPLACE FUNCTION synchdb_pause_engine(name) RETURNS int 23 | AS '$libdir/synchdb' 24 | LANGUAGE C IMMUTABLE STRICT; 25 | 26 | CREATE OR REPLACE FUNCTION synchdb_resume_engine(name) RETURNS int 27 | AS '$libdir/synchdb' 28 | LANGUAGE C IMMUTABLE STRICT; 29 | 30 | CREATE OR REPLACE FUNCTION synchdb_set_offset(name, text) RETURNS int 31 | AS '$libdir/synchdb' 32 | LANGUAGE C IMMUTABLE STRICT; 33 | 34 | CREATE OR REPLACE FUNCTION synchdb_add_conninfo(name, text, int, text, text, text, text, text, text, text) RETURNS int 35 | AS '$libdir/synchdb' 36 | LANGUAGE C IMMUTABLE STRICT; 37 | 38 | CREATE OR REPLACE FUNCTION synchdb_restart_connector(name, name) RETURNS int 39 | AS '$libdir/synchdb' 40 | LANGUAGE C IMMUTABLE STRICT; 41 | 42 | CREATE OR REPLACE FUNCTION synchdb_log_jvm_meminfo(name) RETURNS int 43 | AS '$libdir/synchdb' 44 | LANGUAGE C IMMUTABLE STRICT; 45 | 46 | CREATE OR REPLACE FUNCTION synchdb_get_stats() RETURNS SETOF record 47 | AS '$libdir/synchdb' 48 | LANGUAGE C IMMUTABLE STRICT; 49 | 50 | CREATE OR REPLACE FUNCTION synchdb_reset_stats(name) RETURNS int 51 | AS '$libdir/synchdb' 52 | LANGUAGE C IMMUTABLE STRICT; 53 | 54 | CREATE VIEW synchdb_stats_view AS SELECT * FROM synchdb_get_stats() AS (name text, ddls bigint, dmls bigint, reads bigint, creates bigint, updates bigint, deletes bigint, bad_events bigint, total_events bigint, batches_done bigint, avg_batch_size bigint, first_src_ts bigint, first_dbz_ts bigint, first_pg_ts bigint, last_src_ts bigint, last_dbz_ts bigint, last_pg_ts bigint); 55 | 56 | CREATE TABLE IF NOT EXISTS synchdb_conninfo(name TEXT PRIMARY KEY, isactive BOOL, data JSONB); 57 | 58 | CREATE TABLE IF NOT EXISTS synchdb_attribute ( 59 | name name, 60 | type name, 61 | attrelid oid, 62 | attnum smallint, 63 | ext_tbname name, 64 | ext_attname name, 65 | ext_atttypename name, 66 | PRIMARY KEY (name, type, attrelid, attnum) 67 | ); 68 | 69 | CREATE OR REPLACE FUNCTION synchdb_add_objmap(name, name, name, text) RETURNS int 70 | AS '$libdir/synchdb' 71 | LANGUAGE C IMMUTABLE STRICT; 72 | 73 | CREATE OR REPLACE FUNCTION synchdb_reload_objmap(name) RETURNS int 74 | AS '$libdir/synchdb' 75 | LANGUAGE C IMMUTABLE STRICT; 76 | 77 | CREATE TABLE IF NOT EXISTS synchdb_objmap ( 78 | name name, 79 | objtype name, 80 | enabled bool, 81 | srcobj name, 82 | dstobj text, 83 | PRIMARY KEY (name, objtype, srcobj) 84 | ); 85 | 86 | CREATE VIEW synchdb_att_view AS 87 | SELECT 88 | name, 89 | type, 90 | synchdb_attribute.attnum, 91 | ext_tbname, 92 | (SELECT n.nspname || '.' || c.relname AS table_full_name FROM pg_class c JOIN pg_namespace n ON c.relnamespace = n.oid WHERE c.oid=pg_attribute.attrelid) AS pg_tbname, 93 | synchdb_attribute.ext_attname, 94 | pg_attribute.attname AS pg_attname, 95 | synchdb_attribute.ext_atttypename, 96 | format_type(pg_attribute.atttypid, NULL) AS pg_atttypename, 97 | (SELECT dstobj FROM synchdb_objmap WHERE synchdb_objmap.objtype='transform' AND synchdb_objmap.enabled=true AND synchdb_objmap.srcobj = synchdb_attribute.ext_tbname || '.' || synchdb_attribute.ext_attname) AS transform 98 | FROM synchdb_attribute 99 | LEFT JOIN pg_attribute 100 | ON synchdb_attribute.attrelid = pg_attribute.attrelid 101 | AND synchdb_attribute.attnum = pg_attribute.attnum 102 | ORDER BY (name, type, ext_tbname, synchdb_attribute.attnum); 103 | 104 | CREATE OR REPLACE FUNCTION synchdb_add_extra_conninfo(name, name, text, text, text, text) RETURNS int 105 | AS '$libdir/synchdb' 106 | LANGUAGE C IMMUTABLE STRICT; 107 | 108 | CREATE OR REPLACE FUNCTION synchdb_del_extra_conninfo(name) RETURNS int 109 | AS '$libdir/synchdb' 110 | LANGUAGE C IMMUTABLE STRICT; 111 | 112 | CREATE OR REPLACE FUNCTION synchdb_del_conninfo(name) RETURNS int 113 | AS '$libdir/synchdb' 114 | LANGUAGE C IMMUTABLE STRICT; 115 | 116 | CREATE OR REPLACE FUNCTION synchdb_del_objmap(name, name, name) RETURNS int 117 | AS '$libdir/synchdb' 118 | LANGUAGE C IMMUTABLE STRICT; 119 | 120 | CREATE OR REPLACE FUNCTION synchdb_add_jmx_conninfo(name, text, int, text, int, bool, text, text, bool, text, text, text, text) RETURNS int 121 | AS '$libdir/synchdb' 122 | LANGUAGE C IMMUTABLE STRICT; 123 | 124 | CREATE OR REPLACE FUNCTION synchdb_del_jmx_conninfo(name) RETURNS int 125 | AS '$libdir/synchdb' 126 | LANGUAGE C IMMUTABLE STRICT; 127 | 128 | CREATE OR REPLACE FUNCTION synchdb_add_jmx_exporter_conninfo(name, text, int, text) RETURNS int 129 | AS '$libdir/synchdb' 130 | LANGUAGE C IMMUTABLE STRICT; 131 | 132 | CREATE OR REPLACE FUNCTION synchdb_del_jmx_exporter_conninfo(name) RETURNS int 133 | AS '$libdir/synchdb' 134 | LANGUAGE C IMMUTABLE STRICT; 135 | 136 | CREATE OR REPLACE FUNCTION synchdb_add_olr_conninfo(name, text, int, text) RETURNS int 137 | AS '$libdir/synchdb' 138 | LANGUAGE C IMMUTABLE STRICT; 139 | 140 | CREATE OR REPLACE FUNCTION synchdb_del_olr_conninfo(name) RETURNS int 141 | AS '$libdir/synchdb' 142 | LANGUAGE C IMMUTABLE STRICT; 143 | 144 | CREATE OR REPLACE FUNCTION synchdb_add_infinispan(name, name, int) RETURNS int 145 | AS '$libdir/synchdb' 146 | LANGUAGE C IMMUTABLE STRICT; 147 | 148 | CREATE OR REPLACE FUNCTION synchdb_del_infinispan(name) RETURNS int 149 | AS '$libdir/synchdb' 150 | LANGUAGE C IMMUTABLE STRICT; 151 | -------------------------------------------------------------------------------- /synchdb.control: -------------------------------------------------------------------------------- 1 | # synchdb postgresql extension 2 | comment = 'synchdb extension' 3 | default_version = '1.0' 4 | module_pathname = '$libdir/synchdb' 5 | relocatable = true 6 | requires = 'pgcrypto' 7 | 8 | -------------------------------------------------------------------------------- /testenv/hammerdb/mysql_buildschema.tcl: -------------------------------------------------------------------------------- 1 | dbset db mysql 2 | diset connection mysql_host mysql 3 | diset connection mysql_port 3306 4 | diset tpcc mysql_user mysqluser 5 | diset tpcc mysql_pass mysqlpwd 6 | diset tpcc mysql_dbase tpcc 7 | diset tpcc mysql_count_ware 1 8 | diset tpcc mysql_num_vu 1 9 | buildschema 10 | exit 11 | -------------------------------------------------------------------------------- /testenv/hammerdb/mysql_runtpcc.tcl: -------------------------------------------------------------------------------- 1 | dbset db mysql 2 | diset connection mysql_host mysql 3 | diset connection mysql_port 3306 4 | diset tpcc mysql_user mysqluser 5 | diset tpcc mysql_pass mysqlpwd 6 | diset tpcc mysql_dbase tpcc 7 | vuset vu 1 8 | vurun 9 | exit 10 | 11 | -------------------------------------------------------------------------------- /testenv/hammerdb/oracle_buildschema.tcl: -------------------------------------------------------------------------------- 1 | dbset db ora 2 | diset connection system_user c##dbzuser 3 | diset connection system_password dbz 4 | diset connection instance "oracle:1521/FREE" 5 | diset tpcc tpcc_user c##dbzuser 6 | diset tpcc tpcc_pass dbz 7 | diset tpcc tpcc_def_tab LOGMINER_TBS 8 | buildschema 9 | exit 10 | -------------------------------------------------------------------------------- /testenv/hammerdb/oracle_runtpcc.tcl: -------------------------------------------------------------------------------- 1 | dbset db ora 2 | diset connection system_user c##dbzuser 3 | diset connection system_password dbz 4 | diset connection instance "oracle:1521/FREE" 5 | diset tpcc tpcc_user c##dbzuser 6 | diset tpcc tpcc_pass dbz 7 | diset tpcc tpcc_def_tab LOGMINER_TBS 8 | vurun 9 | exit 10 | 11 | -------------------------------------------------------------------------------- /testenv/hammerdb/sqlserver_buildschema.tcl: -------------------------------------------------------------------------------- 1 | dbset db mssqls 2 | diset connection mssqls_server sqlserver 3 | diset connection mssqls_linux_server sqlserver 4 | diset connection mssqls_port 1433 5 | diset connection mssqls_uid sa 6 | diset connection mssqls_pass Password! 7 | diset connection mssqls_encrypt_connection false 8 | diset tpcc mssqls_count_ware 1 9 | diset tpcc mssqls_num_vu 1 10 | buildschema 11 | exit 12 | -------------------------------------------------------------------------------- /testenv/hammerdb/sqlserver_runtpcc.tcl: -------------------------------------------------------------------------------- 1 | diset connection mssqls_server sqlserver 2 | diset connection mssqls_linux_server sqlserver 3 | diset connection mssqls_port 1433 4 | diset connection mssqls_uid sa 5 | diset connection mssqls_pass Password! 6 | diset connection mssqls_encrypt_connection false 7 | diset tpcc mssqls_count_ware 1 8 | diset tpcc mssqls_num_vu 1 9 | vuset vu 1 10 | vurun 11 | exit 12 | 13 | -------------------------------------------------------------------------------- /testenv/mysql/synchdb-mysql-test-internal.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # this yaml deploys mysql and connects it to internal docker network 3 | # for internal testing 4 | # 5 | 6 | services: 7 | mysql: 8 | container_name: mysql 9 | image: quay.io/debezium/example-mysql:2.6 10 | environment: 11 | - MYSQL_ROOT_PASSWORD=mysqlpwdroot 12 | - MYSQL_USER=mysqluser 13 | - MYSQL_PASSWORD=mysqlpwd 14 | networks: 15 | - synchdbnet 16 | 17 | networks: 18 | synchdbnet: 19 | name: synchdbnet 20 | driver: bridge 21 | internal: true 22 | -------------------------------------------------------------------------------- /testenv/mysql/synchdb-mysql-test.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # this yaml deploys mysql and exposes service ports 3 | # 4 | 5 | services: 6 | mysql: 7 | container_name: mysql 8 | image: quay.io/debezium/example-mysql:2.6 9 | ports: 10 | - 3306:3306 11 | environment: 12 | - MYSQL_ROOT_PASSWORD=mysqlpwdroot 13 | - MYSQL_USER=mysqluser 14 | - MYSQL_PASSWORD=mysqlpwd 15 | -------------------------------------------------------------------------------- /testenv/olr/1.3.0/OpenLogReplicator.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.3.0", 3 | "source": [ 4 | { 5 | "alias": "SOURCE", 6 | "name": "ORACLE", 7 | "reader": { 8 | "type": "online", 9 | "user": "DBZUSER", 10 | "password": "dbz", 11 | "server": "//ora19c:1521/FREE" 12 | }, 13 | "format": { 14 | "type": "json", 15 | "column": 2, 16 | "db": 3, 17 | "interval-dts": 9, 18 | "interval-ytm": 4, 19 | "message": 2, 20 | "rid": 1, 21 | "schema": 7, 22 | "timestamp-all": 1, 23 | "scn-all": 1 24 | }, 25 | "memory": { 26 | "min-mb": 64, 27 | "max-mb": 1024 28 | }, 29 | "filter": { 30 | "table": [ 31 | {"owner": "DBZUSER", "table": ".*"} 32 | ] 33 | }, 34 | "flags": 32 35 | } 36 | ], 37 | "target": [ 38 | { 39 | "alias": "DEBEZIUM", 40 | "source": "SOURCE", 41 | "writer": { 42 | "type": "network", 43 | "uri": "0.0.0.0:7070" 44 | } 45 | } 46 | ] 47 | } 48 | -------------------------------------------------------------------------------- /testenv/olr/1.7.0/OpenLogReplicator.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.7.0", 3 | "source": [ 4 | { 5 | "alias": "SOURCE", 6 | "name": "ORACLE", 7 | "reader": { 8 | "type": "online", 9 | "user": "DBZUSER", 10 | "password": "dbz", 11 | "server": "//ora19c:1521/FREE" 12 | }, 13 | "format": { 14 | "type": "json", 15 | "column": 2, 16 | "db": 3, 17 | "interval-dts": 9, 18 | "interval-ytm": 4, 19 | "message": 2, 20 | "rid": 1, 21 | "schema": 7, 22 | "timestamp-all": 1, 23 | "scn-all": 1 24 | }, 25 | "memory": { 26 | "min-mb": 64, 27 | "max-mb": 1024 28 | }, 29 | "filter": { 30 | "table": [ 31 | {"owner": "DBZUSER", "table": ".*"} 32 | ] 33 | }, 34 | "flags": 32 35 | } 36 | ], 37 | "target": [ 38 | { 39 | "alias": "DEBEZIUM", 40 | "source": "SOURCE", 41 | "writer": { 42 | "type": "network", 43 | "uri": "0.0.0.0:7070" 44 | } 45 | } 46 | ] 47 | } 48 | -------------------------------------------------------------------------------- /testenv/olr/1.8.5/OpenLogReplicator.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.8.5", 3 | "source": [ 4 | { 5 | "alias": "SOURCE", 6 | "name": "ORACLE", 7 | "reader": { 8 | "type": "online", 9 | "user": "DBZUSER", 10 | "password": "dbz", 11 | "server": "//ora19c:1521/FREE" 12 | }, 13 | "format": { 14 | "type": "json", 15 | "column": 2, 16 | "db": 3, 17 | "interval-dts": 9, 18 | "interval-ytm": 4, 19 | "message": 2, 20 | "rid": 1, 21 | "schema": 7, 22 | "timestamp-all": 1, 23 | "scn-type": 1 24 | }, 25 | "memory": { 26 | "min-mb": 256, 27 | "max-mb": 512, 28 | "swap-path": "/opt/OpenLogReplicator/olrswap" 29 | }, 30 | "filter": { 31 | "table": [ 32 | {"owner": "DBZUSER", "table": ".*"} 33 | ] 34 | }, 35 | "flags": 32 36 | } 37 | ], 38 | "target": [ 39 | { 40 | "alias": "DEBEZIUM", 41 | "source": "SOURCE", 42 | "writer": { 43 | "type": "network", 44 | "uri": "0.0.0.0:7070" 45 | } 46 | } 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /testenv/olr/synchdb-olr-test.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # this yaml deploys oracle and exposes service ports 3 | # 4 | 5 | services: 6 | openlogreplicator: 7 | container_name: OpenLogReplicator 8 | image: hgneon/openlogreplicator:${OLRVER} 9 | ports: 10 | - 7070:7070 11 | user: "54321:54321" 12 | volumes: 13 | - /opt/fast-recovery-area:/opt/fast-recovery-area 14 | - ./${OLRVER}:/opt/OpenLogReplicator/scripts 15 | - ./checkpoint:/opt/OpenLogReplicator/checkpoint 16 | - ./oradata:/opt/oracle/oradata 17 | - ./olrswap:/opt/OpenLogReplicator/olrswap 18 | 19 | networks: 20 | - synchdbnet 21 | 22 | networks: 23 | synchdbnet: 24 | name: synchdbnet 25 | driver: bridge 26 | internal: true 27 | 28 | -------------------------------------------------------------------------------- /testenv/ora19c/synchdb-ora19c-test-internal.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # this yaml deploys ora19c and exposes service ports. 3 | # 4 | 5 | services: 6 | ora19c: 7 | container_name: ora19c 8 | image: doctorkirk/oracle-19c 9 | environment: 10 | - ORACLE_SID=FREE 11 | - ORACLE_PWD=oracle 12 | - ORACLE_CHARACTERSET=AL32UTF8 13 | 14 | networks: 15 | - synchdbnet 16 | 17 | networks: 18 | synchdbnet: 19 | name: synchdbnet 20 | driver: bridge 21 | internal: true 22 | 23 | -------------------------------------------------------------------------------- /testenv/ora19c/synchdb-ora19c-test-olr-internal.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # this yaml deploys ora19c and exposes service ports. 3 | # 4 | 5 | services: 6 | ora19c: 7 | container_name: ora19c 8 | image: doctorkirk/oracle-19c 9 | environment: 10 | - ORACLE_SID=FREE 11 | - ORACLE_PWD=oracle 12 | - ORACLE_CHARACTERSET=AL32UTF8 13 | networks: 14 | - synchdbnet 15 | volumes: 16 | - ../olr/oradata:/opt/oracle/oradata 17 | 18 | networks: 19 | synchdbnet: 20 | name: synchdbnet 21 | driver: bridge 22 | internal: true 23 | 24 | -------------------------------------------------------------------------------- /testenv/ora19c/synchdb-ora19c-test-olr.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # this yaml deploys ora19c and exposes service ports. 3 | # 4 | 5 | services: 6 | ora19c: 7 | container_name: ora19c 8 | image: doctorkirk/oracle-19c 9 | ports: 10 | - 1521:1521 11 | environment: 12 | - ORACLE_SID=FREE 13 | - ORACLE_PWD=oracle 14 | - ORACLE_CHARACTERSET=AL32UTF8 15 | volumes: 16 | - ../olr/oradata:/opt/oracle/oradata 17 | networks: 18 | - synchdbnet 19 | 20 | networks: 21 | synchdbnet: 22 | name: synchdbnet 23 | driver: bridge 24 | internal: true 25 | -------------------------------------------------------------------------------- /testenv/ora19c/synchdb-ora19c-test.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # this yaml deploys ora19c and exposes service ports. 3 | # 4 | 5 | services: 6 | ora19c: 7 | container_name: ora19c 8 | image: doctorkirk/oracle-19c 9 | ports: 10 | - 1521:1521 11 | environment: 12 | - ORACLE_SID=FREE 13 | - ORACLE_PWD=oracle 14 | - ORACLE_CHARACTERSET=AL32UTF8 15 | -------------------------------------------------------------------------------- /testenv/oracle/synchdb-oracle-test-internal.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # this yaml deploys oracle and connects it to internal 3 | # docker network for internal testing 4 | # 5 | 6 | services: 7 | oracle: 8 | container_name: oracle 9 | image: hgneon/testdb-oracle:23ai 10 | networks: 11 | - synchdbnet 12 | 13 | networks: 14 | synchdbnet: 15 | name: synchdbnet 16 | driver: bridge 17 | internal: true 18 | -------------------------------------------------------------------------------- /testenv/oracle/synchdb-oracle-test.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # this yaml deploys oracle and exposes service ports 3 | # 4 | 5 | services: 6 | oracle: 7 | container_name: oracle 8 | image: hgneon/testdb-oracle:23ai 9 | ports: 10 | - 1521:1521 11 | -------------------------------------------------------------------------------- /testenv/sqlserver/inventory.sql: -------------------------------------------------------------------------------- 1 | -- Create the test database 2 | CREATE DATABASE testDB; 3 | GO 4 | USE testDB; 5 | EXEC sys.sp_cdc_enable_db; 6 | 7 | -- Create and populate our products using a single insert with many rows 8 | CREATE TABLE products ( 9 | id INTEGER IDENTITY(101,1) NOT NULL PRIMARY KEY, 10 | name VARCHAR(255) NOT NULL, 11 | description VARCHAR(512), 12 | weight FLOAT 13 | ); 14 | INSERT INTO products(name,description,weight) 15 | VALUES ('scooter','Small 2-wheel scooter',3.14); 16 | INSERT INTO products(name,description,weight) 17 | VALUES ('car battery','12V car battery',8.1); 18 | INSERT INTO products(name,description,weight) 19 | VALUES ('12-pack drill bits','12-pack of drill bits with sizes ranging from #40 to #3',0.8); 20 | INSERT INTO products(name,description,weight) 21 | VALUES ('hammer','12oz carpenter''s hammer',0.75); 22 | INSERT INTO products(name,description,weight) 23 | VALUES ('hammer','14oz carpenter''s hammer',0.875); 24 | INSERT INTO products(name,description,weight) 25 | VALUES ('hammer','16oz carpenter''s hammer',1.0); 26 | INSERT INTO products(name,description,weight) 27 | VALUES ('rocks','box of assorted rocks',5.3); 28 | INSERT INTO products(name,description,weight) 29 | VALUES ('jacket','water resistent black wind breaker',0.1); 30 | INSERT INTO products(name,description,weight) 31 | VALUES ('spare tire','24 inch spare tire',22.2); 32 | EXEC sys.sp_cdc_enable_table @source_schema = 'dbo', @source_name = 'products', @role_name = NULL, @supports_net_changes = 0; 33 | -- Create and populate the products on hand using multiple inserts 34 | CREATE TABLE products_on_hand ( 35 | product_id INTEGER NOT NULL PRIMARY KEY, 36 | quantity INTEGER NOT NULL, 37 | FOREIGN KEY (product_id) REFERENCES products(id) 38 | ); 39 | INSERT INTO products_on_hand VALUES (101,3); 40 | INSERT INTO products_on_hand VALUES (102,8); 41 | INSERT INTO products_on_hand VALUES (103,18); 42 | INSERT INTO products_on_hand VALUES (104,4); 43 | INSERT INTO products_on_hand VALUES (105,5); 44 | INSERT INTO products_on_hand VALUES (106,0); 45 | INSERT INTO products_on_hand VALUES (107,44); 46 | INSERT INTO products_on_hand VALUES (108,2); 47 | INSERT INTO products_on_hand VALUES (109,5); 48 | EXEC sys.sp_cdc_enable_table @source_schema = 'dbo', @source_name = 'products_on_hand', @role_name = NULL, @supports_net_changes = 0; 49 | -- Create some customers ... 50 | CREATE TABLE customers ( 51 | id INTEGER IDENTITY(1001,1) NOT NULL PRIMARY KEY, 52 | first_name VARCHAR(255) NOT NULL, 53 | last_name VARCHAR(255) NOT NULL, 54 | email VARCHAR(255) NOT NULL UNIQUE 55 | ); 56 | INSERT INTO customers(first_name,last_name,email) 57 | VALUES ('Sally','Thomas','sally.thomas@acme.com'); 58 | INSERT INTO customers(first_name,last_name,email) 59 | VALUES ('George','Bailey','gbailey@foobar.com'); 60 | INSERT INTO customers(first_name,last_name,email) 61 | VALUES ('Edward','Walker','ed@walker.com'); 62 | INSERT INTO customers(first_name,last_name,email) 63 | VALUES ('Anne','Kretchmar','annek@noanswer.org'); 64 | EXEC sys.sp_cdc_enable_table @source_schema = 'dbo', @source_name = 'customers', @role_name = NULL, @supports_net_changes = 0; 65 | -- Create some very simple orders 66 | CREATE TABLE orders ( 67 | order_number INTEGER IDENTITY(10001,1) NOT NULL PRIMARY KEY, 68 | order_date DATE NOT NULL, 69 | purchaser INTEGER NOT NULL, 70 | quantity INTEGER NOT NULL, 71 | product_id INTEGER NOT NULL, 72 | FOREIGN KEY (purchaser) REFERENCES customers(id), 73 | FOREIGN KEY (product_id) REFERENCES products(id) 74 | ); 75 | INSERT INTO orders(order_date,purchaser,quantity,product_id) 76 | VALUES ('16-JAN-2016', 1001, 1, 102); 77 | INSERT INTO orders(order_date,purchaser,quantity,product_id) 78 | VALUES ('17-JAN-2016', 1002, 2, 105); 79 | INSERT INTO orders(order_date,purchaser,quantity,product_id) 80 | VALUES ('19-FEB-2016', 1002, 2, 106); 81 | INSERT INTO orders(order_date,purchaser,quantity,product_id) 82 | VALUES ('21-FEB-2016', 1003, 1, 107); 83 | EXEC sys.sp_cdc_enable_table @source_schema = 'dbo', @source_name = 'orders', @role_name = NULL, @supports_net_changes = 0; 84 | GO 85 | -------------------------------------------------------------------------------- /testenv/sqlserver/synchdb-combined-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC4kfnufjmvZ5qA 3 | hdWn8TKOHcNRmxVe9zZeLdgLrsC4VNjS+ZbMFCZcDU/LcRaQqqP0oH/yIqc5rxsi 4 | lDtxj168U7Sv56vE6tITsFMbYaveLPt9uNxgy2yekiwEHuViIS5p2Tv0XStH/7sg 5 | TvyVoUSF+Mww4mkN6kUUXtAV+MBS08fGczXJoE1CigY4LylJBzj96SXDZZvHfDiR 6 | YuYKeaDv/nT1JPOsCZIsgLASeKGvi0L+DX4HPoXiRszduImibsm7MxH10wu1fQtO 7 | hVy/dIJgWIz4tqqNeNInNMFpVMX3E3m4AGeWihboACfD/U7280xykDySAm1KDpuD 8 | rIWhDmH3AgMBAAECggEACfBKXg5L+ucASdVrGN0DFOYDY3yPXQrA0bd0P8TMOeDP 9 | wCSSezDGlARffJv9VBLsOzr9SMOW76M/OdBzlvzaZBe5zhMSi8r9PvQqtXm/8HnJ 10 | nQ58R3YMxkkmt54WSf9xaV+6O8FcmEbsA6RdM414jCJGlIdMkuQ/jgZA/D3l1U/c 11 | q8T6uMQEaa3l6YcaN4TO6AJppoH3zsNOUm3JtueIbG2fVi0aUDWgHM7bVFdPHUMX 12 | PPE0PkydpMjtvoEvJSP2oIHXHcmlVJh5Vv5KPPnTGrweYnsbKQvGcnA0D0W69h05 13 | ggkiGL2ToLy/zlbOFjw+jrNGwl533UfaCNF6vxUa+QKBgQDlImTbT/YVIMrkt+KI 14 | HuX50Eb6se5hJLuOdOeH1GrylpIcIdLAY4k8QUMI/9nIbr3ryVoWbVXOSM9t2JSq 15 | zAu8OGUJssH+PxphsJdnZHfKPDp8EYxU8CSF42D42jG1o9OKGuwbtIeI6wD/WS7c 16 | Zfq6g9vPXAQDHrbG5wz1NKKsYwKBgQDONfaGKfXeDpKxP4S5DuzEqxuOKoU07KoZ 17 | DAHHDm58JziRtn145pMHLftEVDl5h3Fk12FGH3f9ynaI6vUtz9ms7HbdRYzfdBQI 18 | LXGtcnx1pvd3EiWlLpAJXa1x+3UE3/ejpLjiV/yUWWM55LRyWIJqHGsNsveBnlep 19 | nlO3jinWXQKBgQC1mWW/34bURfIoBYk3gu2X6TdoFz8rpLY1eHfL0lGUxDs2qqPi 20 | sMCijcK4TjZnwUUqq6GHps3buR8VGxLwrtkaKsQBRLi1bYPWKuREIH9EPCnKDQpg 21 | gxcfOg+wCxZctGPC0AWTC6hjnoVi22blbLmjuz+p/256nowT5ikB6d6HqwKBgQDC 22 | q2TggUpP6HgJhopYXcOdYl8wFeoM7YMjBBfFgFVDNY+M17eZ5CpW88yqU8yycu/U 23 | oBN/rIONfo3qcc0mA3teaRlJiLgV1zJ2CdiwQk1GkGo76VDKoKtvfBUabqWpBXp1 24 | 7TJw01lp/vzSkZrb+7yaSduyYgqQPhhJ02L6/OV4LQKBgQCiePcJjVDQOqyFkM+E 25 | blqn8R6OwjhnFEfATC50eGzThse/9PuE0+88HClIqHF6vsX0YcFELCDB5RsxvY93 26 | yYFSb7CEwAUuKUC6SmCdbbcMH0apIqYxLW1VkcbpdNtA/6fMbTzt1woDU12X6who 27 | SWa4Xc/ULy/nKZrUT/dogyIMhA== 28 | -----END PRIVATE KEY----- 29 | -----BEGIN CERTIFICATE----- 30 | MIIDBTCCAe2gAwIBAgIUbXGjZ6TAEbaASjOtqvGWA2P80lUwDQYJKoZIhvcNAQEL 31 | BQAwEjEQMA4GA1UEAwwHc3luY2hkYjAeFw0yNDA4MTMxNzU4MjVaFw0yNTA4MTMx 32 | NzU4MjVaMBIxEDAOBgNVBAMMB3N5bmNoZGIwggEiMA0GCSqGSIb3DQEBAQUAA4IB 33 | DwAwggEKAoIBAQC4kfnufjmvZ5qAhdWn8TKOHcNRmxVe9zZeLdgLrsC4VNjS+ZbM 34 | FCZcDU/LcRaQqqP0oH/yIqc5rxsilDtxj168U7Sv56vE6tITsFMbYaveLPt9uNxg 35 | y2yekiwEHuViIS5p2Tv0XStH/7sgTvyVoUSF+Mww4mkN6kUUXtAV+MBS08fGczXJ 36 | oE1CigY4LylJBzj96SXDZZvHfDiRYuYKeaDv/nT1JPOsCZIsgLASeKGvi0L+DX4H 37 | PoXiRszduImibsm7MxH10wu1fQtOhVy/dIJgWIz4tqqNeNInNMFpVMX3E3m4AGeW 38 | ihboACfD/U7280xykDySAm1KDpuDrIWhDmH3AgMBAAGjUzBRMB0GA1UdDgQWBBQ6 39 | PTr9xHjpcZaL9WAge10Klq9V+zAfBgNVHSMEGDAWgBQ6PTr9xHjpcZaL9WAge10K 40 | lq9V+zAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCeMlQyEup5 41 | 9nrNZyMB7trN5HsmNcSiC9vBKIiUouMD/n9c9H1W2//yRqwAzoXcE2OhWtJr6JBs 42 | Y45EPkcLKV77fEpp0wusG++0yFUV7fZ4AsnJwlDgixLFXrCAsAugQU4bXxr/6mfL 43 | GRdxcYqNCKDDlcFqqS1jnG9Qh3fK+FseFFsySJRFzJn8VkmLQ2eDKqfWLQ7cu/nW 44 | lD784TVTaFFK7dzgkH2KZgtcs9piOLdboGD+RY5aNpgTkTconERoTbsN52Reh4DV 45 | BdB/S4mENzjgOlCvtTQ8/ehYTsF7bcTGR/f4q7c9mMEBdJDIA3KTe/3l8FPqRT17 46 | HVXQ9b/AEIDU 47 | -----END CERTIFICATE----- 48 | -------------------------------------------------------------------------------- /testenv/sqlserver/synchdb-private.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC4kfnufjmvZ5qA 3 | hdWn8TKOHcNRmxVe9zZeLdgLrsC4VNjS+ZbMFCZcDU/LcRaQqqP0oH/yIqc5rxsi 4 | lDtxj168U7Sv56vE6tITsFMbYaveLPt9uNxgy2yekiwEHuViIS5p2Tv0XStH/7sg 5 | TvyVoUSF+Mww4mkN6kUUXtAV+MBS08fGczXJoE1CigY4LylJBzj96SXDZZvHfDiR 6 | YuYKeaDv/nT1JPOsCZIsgLASeKGvi0L+DX4HPoXiRszduImibsm7MxH10wu1fQtO 7 | hVy/dIJgWIz4tqqNeNInNMFpVMX3E3m4AGeWihboACfD/U7280xykDySAm1KDpuD 8 | rIWhDmH3AgMBAAECggEACfBKXg5L+ucASdVrGN0DFOYDY3yPXQrA0bd0P8TMOeDP 9 | wCSSezDGlARffJv9VBLsOzr9SMOW76M/OdBzlvzaZBe5zhMSi8r9PvQqtXm/8HnJ 10 | nQ58R3YMxkkmt54WSf9xaV+6O8FcmEbsA6RdM414jCJGlIdMkuQ/jgZA/D3l1U/c 11 | q8T6uMQEaa3l6YcaN4TO6AJppoH3zsNOUm3JtueIbG2fVi0aUDWgHM7bVFdPHUMX 12 | PPE0PkydpMjtvoEvJSP2oIHXHcmlVJh5Vv5KPPnTGrweYnsbKQvGcnA0D0W69h05 13 | ggkiGL2ToLy/zlbOFjw+jrNGwl533UfaCNF6vxUa+QKBgQDlImTbT/YVIMrkt+KI 14 | HuX50Eb6se5hJLuOdOeH1GrylpIcIdLAY4k8QUMI/9nIbr3ryVoWbVXOSM9t2JSq 15 | zAu8OGUJssH+PxphsJdnZHfKPDp8EYxU8CSF42D42jG1o9OKGuwbtIeI6wD/WS7c 16 | Zfq6g9vPXAQDHrbG5wz1NKKsYwKBgQDONfaGKfXeDpKxP4S5DuzEqxuOKoU07KoZ 17 | DAHHDm58JziRtn145pMHLftEVDl5h3Fk12FGH3f9ynaI6vUtz9ms7HbdRYzfdBQI 18 | LXGtcnx1pvd3EiWlLpAJXa1x+3UE3/ejpLjiV/yUWWM55LRyWIJqHGsNsveBnlep 19 | nlO3jinWXQKBgQC1mWW/34bURfIoBYk3gu2X6TdoFz8rpLY1eHfL0lGUxDs2qqPi 20 | sMCijcK4TjZnwUUqq6GHps3buR8VGxLwrtkaKsQBRLi1bYPWKuREIH9EPCnKDQpg 21 | gxcfOg+wCxZctGPC0AWTC6hjnoVi22blbLmjuz+p/256nowT5ikB6d6HqwKBgQDC 22 | q2TggUpP6HgJhopYXcOdYl8wFeoM7YMjBBfFgFVDNY+M17eZ5CpW88yqU8yycu/U 23 | oBN/rIONfo3qcc0mA3teaRlJiLgV1zJ2CdiwQk1GkGo76VDKoKtvfBUabqWpBXp1 24 | 7TJw01lp/vzSkZrb+7yaSduyYgqQPhhJ02L6/OV4LQKBgQCiePcJjVDQOqyFkM+E 25 | blqn8R6OwjhnFEfATC50eGzThse/9PuE0+88HClIqHF6vsX0YcFELCDB5RsxvY93 26 | yYFSb7CEwAUuKUC6SmCdbbcMH0apIqYxLW1VkcbpdNtA/6fMbTzt1woDU12X6who 27 | SWa4Xc/ULy/nKZrUT/dogyIMhA== 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /testenv/sqlserver/synchdb-sqlserver-test-internal.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # this yaml deploys sqlserver and connects it to internal 3 | # docker network for internal testing 4 | # 5 | 6 | services: 7 | sqlserver: 8 | container_name: sqlserver 9 | image: mcr.microsoft.com/mssql/server:2019-latest 10 | environment: 11 | - ACCEPT_EULA=Y 12 | - MSSQL_PID=Standard 13 | - SA_PASSWORD=Password! 14 | - MSSQL_AGENT_ENABLED=true 15 | networks: 16 | - synchdbnet 17 | 18 | networks: 19 | synchdbnet: 20 | name: synchdbnet 21 | driver: bridge 22 | internal: true 23 | 24 | -------------------------------------------------------------------------------- /testenv/sqlserver/synchdb-sqlserver-test.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # this yaml deploys sqlserver and exposes service ports 3 | # 4 | 5 | services: 6 | sqlserver: 7 | container_name: sqlserver 8 | image: mcr.microsoft.com/mssql/server:2019-latest 9 | ports: 10 | - 1433:1433 11 | environment: 12 | - ACCEPT_EULA=Y 13 | - MSSQL_PID=Standard 14 | - SA_PASSWORD=Password! 15 | - MSSQL_AGENT_ENABLED=true 16 | -------------------------------------------------------------------------------- /testenv/sqlserver/synchdb-sqlserver-withssl-test.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | sqlserver-ssl-2019: 3 | image: mcr.microsoft.com/mssql/server:2019-latest 4 | platform: linux/amd64 5 | ports: 6 | - 1433:1433 7 | environment: 8 | - ACCEPT_EULA=Y 9 | - MSSQL_PID=Standard 10 | - SA_PASSWORD=Password! 11 | - MSSQL_AGENT_ENABLED=true 12 | - MSSQL_TLS_CERT=/etc/ssl/certs/synchdb-combined-cert.pem 13 | - MSSQL_TLS_KEY=/etc/ssl/private/synchdb-private.key 14 | volumes: 15 | - ./synchdb-combined-cert.pem:/etc/ssl/certs/synchdb-combined-cert.pem:ro 16 | - ./synchdb-private.key:/etc/ssl/private/synchdb-private.key:ro 17 | -------------------------------------------------------------------------------- /testenv/synchdb/Dockerfile: -------------------------------------------------------------------------------- 1 | ############################################# 2 | # DOCKER FILE FOR UBUNTU BIONIC 3 | ############################################# 4 | FROM ubuntu:24.04 5 | 6 | # We don't want a prompt during installation 7 | ENV DEBIAN_FRONTEND=noninteractive 8 | ENV POSTGRES_VERSION=17 9 | ENV JAVA_VERSION=17 10 | ENV SYNCHDB_BRANCH=synchdb-devel 11 | ENV LIBPROTOBUF_C_VERSION=v1.5.2 12 | ENV POSTGRES_BRANCH=REL_17_6 13 | 14 | # Install necessary 15 | RUN apt-get update 16 | RUN apt-get install -y vim wget gawk net-tools expect apt-utils openssh-client 17 | RUN apt-get install -y zlib1g-dev libxml2-utils xsltproc ccache pkg-config 18 | RUN apt-get install -y build-essential git lcov bison flex 19 | RUN apt-get install -y openjdk-${JAVA_VERSION}-jdk maven 20 | RUN apt-get install -y libkrb5-dev libssl-dev libldap-dev libpam-dev 21 | RUN apt-get install -y gettext libxml2-dev libxslt-dev 22 | RUN apt-get install -y libreadline-dev libedit-dev 23 | RUN apt-get install -y uuid-dev libossp-uuid-dev 24 | RUN apt-get install -y libipc-run-perl libtime-hires-perl libtest-simple-perl 25 | RUN apt-get install -y cppcheck 26 | RUN apt-get install -y chrpath sudo 27 | RUN apt-get install -y autoconf automake libtool 28 | RUN apt-get install -y protobuf-compiler libprotobuf-dev libprotoc-dev 29 | 30 | #RUN apt install -y postgresql-common 31 | #RUN echo -ne "\n" | sudo /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh 32 | #RUN apt-get install -y postgresql-client-${POSTGRES_VERSION} 33 | #RUN apt-get install -y postgresql-${POSTGRES_VERSION} 34 | #RUN apt-get install -y postgresql-server-dev-${POSTGRES_VERSION} 35 | 36 | # Restore environment for manual usage 37 | ENV DEBIAN_FRONTEND= 38 | 39 | # Arguments with default values 40 | ARG USERNAME=ubuntu 41 | ARG GROUPNAME=ubuntu 42 | #ARG UID=1000 43 | #ARG GID=1001 44 | ARG USER_HOME=/home/ubuntu 45 | 46 | # Exposing interfaces 47 | #VOLUME ["/outputs"] 48 | 49 | # We want a non-root user to validate certain build conditions 50 | #RUN groupadd -g $GID $USERNAME 51 | #RUN useradd -G $GROUPNAME -N -m -s /bin/bash $USERNAME 52 | RUN echo 'root:root' | chpasswd 53 | RUN echo "$USERNAME:$USERNAME" | chpasswd 54 | RUN adduser ubuntu sudo 55 | 56 | # Gitlab/Github access 57 | RUN mkdir -p $USER_HOME/.ssh 58 | 59 | RUN ssh-keyscan github.com >> $USER_HOME/.ssh/known_hosts 60 | #ADD keys/github/* $USER_HOME/.ssh/ 61 | 62 | RUN chown -R $USERNAME:$GROUPNAME $USER_HOME/.ssh 63 | RUN chmod 700 $USER_HOME/.ssh 64 | RUN chmod 600 $USER_HOME/.ssh/* 65 | 66 | # dependency for Openlog Replicator Connector 67 | WORKDIR $USER_HOME 68 | RUN git clone https://github.com/protobuf-c/protobuf-c.git --branch ${LIBPROTOBUF_C_VERSION} 69 | 70 | WORKDIR $USER_HOME/protobuf-c 71 | RUN ./autogen.sh && \ 72 | ./configure && \ 73 | make && \ 74 | make install && \ 75 | ldconfig 76 | 77 | # Copy the build script 78 | WORKDIR $USER_HOME 79 | RUN git clone https://github.com/postgres/postgres.git --branch ${POSTGRES_BRANCH} 80 | RUN cd postgres && \ 81 | ./configure --prefix=/usr/lib/postgresql/${POSTGRES_VERSION} \ 82 | --enable-cassert \ 83 | -enable-rpath \ 84 | --enable-injection-points \ 85 | --with-libedit-preferred \ 86 | --with-libxml \ 87 | --with-libxslt \ 88 | --with-icu \ 89 | --with-ssl=openssl && \ 90 | make && \ 91 | make install 92 | 93 | RUN cd postgres/contrib && \ 94 | make && \ 95 | make install 96 | 97 | RUN cd $USER_HOME/postgres/contrib && \ 98 | git clone https://github.com/Hornetlabs/synchdb.git --branch ${SYNCHDB_BRANCH} 99 | 100 | WORKDIR $USER_HOME/postgres/contrib/synchdb 101 | RUN make oracle_parser && make install_oracle_parser 102 | RUN make WITH_OLR=1 build_dbz 103 | RUN make WITH_OLR=1 104 | RUN make WITH_OLR=1 install 105 | RUN make WITH_OLR=1 install_dbz 106 | #RUN make oracle_parser && make install_oracle_parser 107 | #RUN USE_PGXS=1 make WITH_OLR=1 build_dbz PG_CONFIG=/usr/lib/postgresql/${POSTGRES_VERSION}/bin/pg_config 108 | #RUN USE_PGXS=1 make WITH_OLR=1 PG_CONFIG=/usr/lib/postgresql/${POSTGRES_VERSION}/bin/pg_config 109 | #RUN USE_PGXS=1 make WITH_OLR=1 install PG_CONFIG=/usr/lib/postgresql/${POSTGRES_VERSION}/bin/pg_config 110 | #RUN USE_PGXS=1 make WITH_OLR=1 install_dbz PG_CONFIG=/usr/lib/postgresql/${POSTGRES_VERSION}/bin/pg_config 111 | 112 | ENV PATH="/usr/lib/postgresql/${POSTGRES_VERSION}/bin:${PATH}" 113 | COPY init-synchdb.sh /usr/local/bin 114 | COPY jmx_prometheus_javaagent-1.3.0.jar $USER_HOME 115 | COPY jmxexport.conf $USER_HOME 116 | RUN chmod +x /usr/local/bin/init-synchdb.sh 117 | 118 | RUN mkdir -p /var/run/postgresql \ 119 | && chown $USERNAME:$USERNAME /var/run/postgresql \ 120 | && chmod 755 /var/run/postgresql 121 | 122 | RUN echo "/usr/lib/jvm/java-${JAVA_VERSION}-openjdk-amd64/lib/server" >> /etc/ld.so.conf.d/x86_64-linux-gnu.conf \ 123 | && ldconfig 124 | 125 | USER $USERNAME 126 | WORKDIR $USER_HOME 127 | 128 | ENTRYPOINT ["/usr/local/bin/init-synchdb.sh"] 129 | #ENTRYPOINT ["tail", "-f", "/dev/null"] 130 | CMD ["bash"] 131 | 132 | -------------------------------------------------------------------------------- /testenv/synchdb/init-synchdb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PGDATA=${HOME}/synchdb-test 4 | PGLOG=${HOME}/logfile 5 | SOCKDIR=/var/run/postgresql 6 | 7 | if [ ! -d $SOCKDIR ]; then 8 | mkdir -p $SOCKDIR 9 | chown ubuntu:ubuntu /var/run/postgresql 10 | chmod 775 /var/run/postgresql 11 | fi 12 | 13 | if [ -d "$PGDATA" ]; then 14 | echo "synchdb is already initialized" 15 | #pg_ctl -D $PGDATA -l $PGLOG start 16 | exec postgres -D $PGDATA 17 | exit 0 18 | fi 19 | 20 | initdb -D $PGDATA 21 | #echo "unix_socket_directories = '/home/ubuntu/tmp'" >> $PGDATA/postgresql.conf 22 | echo "listen_addresses ='*'" >> $PGDATA/postgresql.conf 23 | #pg_ctl -D $PGDATA -l $PGLOG start 24 | exec postgres -D $PGDATA 25 | -------------------------------------------------------------------------------- /testenv/synchdb/jmx_prometheus_javaagent-1.3.0.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Hornetlabs/synchdb/7433f60bba5133f17c33d29ac3dc5c705c3db63c/testenv/synchdb/jmx_prometheus_javaagent-1.3.0.jar -------------------------------------------------------------------------------- /testenv/synchdb/jmxexport.conf: -------------------------------------------------------------------------------- 1 | startDelaySeconds: 0 2 | ssl: false 3 | lowercaseOutputName: true 4 | lowercaseOutputLabelNames: true 5 | 6 | rules: 7 | - pattern: ".*" 8 | 9 | -------------------------------------------------------------------------------- /testenv/synchdb/synchdb-test.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # this yaml deploys pre-compiled synchdb and connects it 3 | # to internal docker network for internal testing 4 | # 5 | 6 | services: 7 | synchdb: 8 | container_name: synchdb 9 | image: hgneon/synchdbtest:pg17 10 | networks: 11 | - synchdbnet 12 | 13 | networks: 14 | synchdbnet: 15 | name: synchdbnet 16 | driver: bridge 17 | internal: true 18 | --------------------------------------------------------------------------------