├── .github
└── workflows
│ ├── go.yml
│ └── golangci-lint.yml
├── .gitignore
├── CHANGELOG.md
├── LICENSE
├── Makefile
├── README.md
├── build.sh
├── cmd
├── ccr_syncer
│ ├── ccr_syncer.go
│ ├── job_collector.go
│ ├── monitor.go
│ └── signal_mux.go
├── get_binlog
│ └── get_binlog.go
├── get_lag
│ └── get_lag.go
├── get_master_token
│ └── get_master_token.go
├── get_meta
│ └── get_meta.go
├── ingest_binlog
│ └── ingest_binlog.go
├── json_t
│ └── json_t.go
├── metrics
│ └── metrics_demo.go
├── rows_parse
│ └── rows_parse.go
├── spec_checker
│ └── spec_checker.go
└── thrift_get_meta
│ └── thrift_get_meta.go
├── dashboard
└── ccr_dashboard.json
├── devtools
├── ccr_db.sh
├── ccr_lightningschemachange.sh
├── ccr_partition.sh
├── ccr_t1.sh
├── clean.sh
├── get_lag.sh
├── issue_test
│ └── priv.sh
├── pause_job.sh
├── resume_job.sh
├── run-regression-test.sh
├── status.sh
├── test_alter_job.sh
├── test_ccr_db_table_alias.sh
├── test_ccr_many_rows.sh
├── test_ccr_table.sh
├── test_ccr_table_alias.sh
├── test_ccr_truncate_table.sh
├── test_limit_speed.sh
├── test_rollup.sh
└── update_job.sh
├── doc
├── dashboard.md
├── db_enable_binlog.md
├── notes.md
├── operations.md
├── pic
│ ├── dashboard-1.jpeg
│ ├── dashboard-2.jpeg
│ ├── dashboard-3.jpeg
│ ├── dashboard-4.jpeg
│ └── framework.png
├── pprof.md
├── run-regression-test-en.md
├── run-regression-test-zh.md
├── start_syncer.md
└── stop_syncer.md
├── go.mod
├── go.sum
├── pkg
├── ccr
│ ├── base
│ │ ├── backend.go
│ │ ├── extra_info.go
│ │ ├── pool.go
│ │ ├── spec.go
│ │ ├── spec_test.go
│ │ ├── specer.go
│ │ └── specer_factory.go
│ ├── be_mock.go
│ ├── checker.go
│ ├── errors.go
│ ├── factory.go
│ ├── fe_mock.go
│ ├── handle
│ │ ├── add_partition.go
│ │ ├── alter_job_v2.go
│ │ ├── alter_view.go
│ │ ├── create_table.go
│ │ ├── drop_partition.go
│ │ ├── drop_rollup.go
│ │ ├── drop_table.go
│ │ ├── dummy.go
│ │ ├── idempotent.go
│ │ ├── index_change_job.go
│ │ ├── modify_distribution_bucket_num.go
│ │ ├── modify_distribution_type.go
│ │ ├── modify_property.go
│ │ ├── recover_info.go
│ │ ├── rename_partition.go
│ │ ├── rename_rollup.go
│ │ └── replace_table.go
│ ├── ingest_binlog_job.go
│ ├── job.go
│ ├── job_factory.go
│ ├── job_handle.go
│ ├── job_manager.go
│ ├── job_pipeline.go
│ ├── job_progress.go
│ ├── job_test.go
│ ├── jober.go
│ ├── label.go
│ ├── meta.go
│ ├── metaer.go
│ ├── metaer_factory.go
│ ├── metaer_factory_mock.go
│ ├── metaer_mock.go
│ ├── record
│ │ ├── add_partition.go
│ │ ├── alter_job_v2.go
│ │ ├── alter_view.go
│ │ ├── barrier_log.go
│ │ ├── create_table.go
│ │ ├── drop_partition.go
│ │ ├── drop_rollup.go
│ │ ├── drop_table.go
│ │ ├── dummy.go
│ │ ├── index.go
│ │ ├── index_change_job.go
│ │ ├── modify_comment.go
│ │ ├── modify_distribution_bucket_num.go
│ │ ├── modify_distribution_type.go
│ │ ├── modify_property.go
│ │ ├── modify_table_add_or_drop_columns.go
│ │ ├── modify_table_add_or_drop_inverted_indices.go
│ │ ├── record.go
│ │ ├── recover_info.go
│ │ ├── rename_column.go
│ │ ├── rename_partition.go
│ │ ├── rename_rollup.go
│ │ ├── rename_table.go
│ │ ├── replace_partition.go
│ │ ├── replace_table.go
│ │ ├── table_type.go
│ │ ├── truncate_table.go
│ │ └── upsert.go
│ ├── rpc_factory_mock.go
│ ├── snapshot_job.go
│ ├── specer_factory_mock.go
│ ├── specer_mock.go
│ ├── thrift_meta.go
│ └── utils.go
├── rpc
│ ├── Makefile
│ ├── be.go
│ ├── build.sh
│ ├── concurrency.go
│ ├── error.go
│ ├── fe.go
│ ├── kitex_gen
│ │ ├── agentservice
│ │ │ ├── AgentService.go
│ │ │ ├── k-AgentService.go
│ │ │ └── k-consts.go
│ │ ├── backendservice
│ │ │ ├── BackendService.go
│ │ │ ├── backendservice
│ │ │ │ ├── backendservice.go
│ │ │ │ ├── client.go
│ │ │ │ ├── invoker.go
│ │ │ │ └── server.go
│ │ │ ├── k-BackendService.go
│ │ │ └── k-consts.go
│ │ ├── data
│ │ │ ├── Data.go
│ │ │ ├── k-Data.go
│ │ │ └── k-consts.go
│ │ ├── datasinks
│ │ │ ├── DataSinks.go
│ │ │ ├── k-DataSinks.go
│ │ │ └── k-consts.go
│ │ ├── descriptors
│ │ │ ├── Descriptors.go
│ │ │ ├── k-Descriptors.go
│ │ │ └── k-consts.go
│ │ ├── dorisexternalservice
│ │ │ ├── DorisExternalService.go
│ │ │ ├── k-DorisExternalService.go
│ │ │ ├── k-consts.go
│ │ │ └── tdorisexternalservice
│ │ │ │ ├── client.go
│ │ │ │ ├── invoker.go
│ │ │ │ ├── server.go
│ │ │ │ └── tdorisexternalservice.go
│ │ ├── exprs
│ │ │ ├── Exprs.go
│ │ │ ├── k-Exprs.go
│ │ │ └── k-consts.go
│ │ ├── frontendservice
│ │ │ ├── FrontendService.go
│ │ │ ├── frontendservice
│ │ │ │ ├── client.go
│ │ │ │ ├── frontendservice.go
│ │ │ │ ├── invoker.go
│ │ │ │ └── server.go
│ │ │ ├── k-FrontendService.go
│ │ │ └── k-consts.go
│ │ ├── heartbeatservice
│ │ │ ├── HeartbeatService.go
│ │ │ ├── heartbeatservice
│ │ │ │ ├── client.go
│ │ │ │ ├── heartbeatservice.go
│ │ │ │ ├── invoker.go
│ │ │ │ └── server.go
│ │ │ ├── k-HeartbeatService.go
│ │ │ └── k-consts.go
│ │ ├── masterservice
│ │ │ ├── MasterService.go
│ │ │ ├── k-MasterService.go
│ │ │ └── k-consts.go
│ │ ├── metrics
│ │ │ ├── Metrics.go
│ │ │ ├── k-Metrics.go
│ │ │ └── k-consts.go
│ │ ├── opcodes
│ │ │ ├── Opcodes.go
│ │ │ ├── k-Opcodes.go
│ │ │ └── k-consts.go
│ │ ├── palointernalservice
│ │ │ ├── PaloInternalService.go
│ │ │ ├── k-PaloInternalService.go
│ │ │ └── k-consts.go
│ │ ├── paloservice
│ │ │ ├── PaloService.go
│ │ │ ├── k-PaloService.go
│ │ │ └── k-consts.go
│ │ ├── partitions
│ │ │ ├── Partitions.go
│ │ │ ├── k-Partitions.go
│ │ │ └── k-consts.go
│ │ ├── planner
│ │ │ ├── Planner.go
│ │ │ ├── k-Planner.go
│ │ │ └── k-consts.go
│ │ ├── plannodes
│ │ │ ├── PlanNodes.go
│ │ │ ├── k-PlanNodes.go
│ │ │ └── k-consts.go
│ │ ├── querycache
│ │ │ ├── QueryCache.go
│ │ │ ├── k-QueryCache.go
│ │ │ └── k-consts.go
│ │ ├── runtimeprofile
│ │ │ ├── RuntimeProfile.go
│ │ │ ├── k-RuntimeProfile.go
│ │ │ └── k-consts.go
│ │ ├── status
│ │ │ ├── Status.go
│ │ │ ├── k-Status.go
│ │ │ └── k-consts.go
│ │ └── types
│ │ │ ├── Types.go
│ │ │ ├── k-Types.go
│ │ │ └── k-consts.go
│ ├── rpc_factory.go
│ └── thrift
│ │ ├── AgentService.thrift
│ │ ├── BackendService.thrift
│ │ ├── Data.thrift
│ │ ├── DataSinks.thrift
│ │ ├── Ddl.thrift
│ │ ├── Descriptors.thrift
│ │ ├── DorisExternalService.thrift
│ │ ├── Exprs.thrift
│ │ ├── FrontendService.thrift
│ │ ├── HeartbeatService.thrift
│ │ ├── Makefile
│ │ ├── MasterService.thrift
│ │ ├── MetricDefs.thrift
│ │ ├── Metrics.thrift
│ │ ├── NetworkTest.thrift
│ │ ├── Normalization.thrift
│ │ ├── Opcodes.thrift
│ │ ├── PaloBrokerService.thrift
│ │ ├── PaloInternalService.thrift
│ │ ├── PaloService.thrift
│ │ ├── Partitions.thrift
│ │ ├── PlanNodes.thrift
│ │ ├── Planner.thrift
│ │ ├── QueryCache.thrift
│ │ ├── QueryPlanExtra.thrift
│ │ ├── RuntimeProfile.thrift
│ │ ├── Status.thrift
│ │ ├── Types.thrift
│ │ └── parquet.thrift
├── service
│ └── http_service.go
├── storage
│ ├── db.go
│ ├── mysql.go
│ ├── postgresql.go
│ ├── sqlite.go
│ └── utils.go
├── test_util
│ └── db_mock.go
├── utils
│ ├── array.go
│ ├── failpoint.go
│ ├── gzip.go
│ ├── job_hook.go
│ ├── log.go
│ ├── map.go
│ ├── map_test.go
│ ├── math.go
│ ├── observer.go
│ ├── slice.go
│ ├── sql.go
│ └── thrift_wrapper.go
├── version
│ └── version.go
├── xerror
│ ├── stack.go
│ ├── withMessage.go
│ ├── withstack.go
│ ├── xerror.go
│ └── xerror_test.go
└── xmetrics
│ ├── tags.go
│ └── xmetrics.go
├── regression-test
├── common
│ └── helper.groovy
├── data
│ ├── db_sync
│ │ ├── partition
│ │ │ ├── drop_1
│ │ │ │ └── test_ds_part_drop_1.out
│ │ │ ├── recover
│ │ │ │ └── test_ds_part_recover.out
│ │ │ └── recover1
│ │ │ │ └── test_ds_part_recover_new.out
│ │ └── table
│ │ │ ├── recover
│ │ │ └── test_ds_tbl_drop_recover.out
│ │ │ ├── recover1
│ │ │ └── test_ds_tbl_drop_recover_new.out
│ │ │ ├── recover2
│ │ │ └── test_ds_tbl_drop_recover2.out
│ │ │ └── recover3
│ │ │ └── test_ds_tbl_drop_recover3.out
│ └── table_sync
│ │ ├── dml
│ │ └── insert_overwrite
│ │ │ └── test_ts_dml_insert_overwrite.out
│ │ ├── partition
│ │ ├── recover
│ │ │ └── test_tbl_part_recover.out
│ │ └── recover1
│ │ │ └── test_tbl_part_recover_new.out
│ │ └── table
│ │ └── res_inverted_idx
│ │ └── test_ts_tbl_res_inverted_idx.out
└── suites
│ ├── cross_ds
│ ├── drop
│ │ └── alter_job
│ │ │ └── test_cds_drop_alter_job.groovy
│ ├── fullsync
│ │ ├── alt_prop
│ │ │ └── bloom_filter
│ │ │ │ └── test_ds_alt_prop_bf_fullsync.groovy
│ │ ├── alt_view
│ │ │ └── test_ds_view_alt_fullsync.groovy
│ │ ├── tbl_drop_create
│ │ │ └── test_cds_fullsync_tbl_drop_create.groovy
│ │ ├── tbl_drop_create_1
│ │ │ └── test_cds_tbl_drop_create_1.groovy
│ │ ├── upsert_drop_cre
│ │ │ └── test_cds_upsert_drop_create.groovy
│ │ ├── with_alias
│ │ │ └── test_cds_fullsync_with_alias.groovy
│ │ └── with_alias_1
│ │ │ └── test_cds_fullsync_with_alias_1.groovy
│ ├── part
│ │ └── drop
│ │ │ ├── add
│ │ │ └── test_cds_part_add_drop.groovy
│ │ │ └── replace
│ │ │ └── test_cds_part_replace_drop.groovy
│ ├── partialsync
│ │ ├── tbl_drop
│ │ │ └── test_cds_ps_tbl_drop.groovy
│ │ ├── tbl_drop_create
│ │ │ └── test_cds_ps_tbl_drop_create.groovy
│ │ ├── tbl_drop_recover
│ │ │ └── test_cds_ps_tbl_recover.groovy
│ │ ├── tbl_rename
│ │ │ └── test_cds_ps_tbl_rename.groovy
│ │ ├── tbl_rename_create
│ │ │ └── test_cds_ps_tbl_rename_create.groovy
│ │ ├── tbl_replace
│ │ │ └── test_cds_ps_tbl_replace.groovy
│ │ ├── tbl_replace_1
│ │ │ └── test_cds_ps_tbl_replace_1.groovy
│ │ ├── tbl_replace_swap
│ │ │ └── test_cds_ps_tbl_replace_swap.groovy
│ │ └── tbl_replace_swap_1
│ │ │ └── test_cds_ps_tbl_replace_swap_1.groovy
│ ├── table
│ │ ├── backup
│ │ │ └── create_drop
│ │ │ │ └── test_cds_tbl_backup_create_drop.groovy
│ │ ├── create_vars
│ │ │ └── test_cds_tbl_create_vars.groovy
│ │ ├── drop
│ │ │ ├── alter
│ │ │ │ └── test_cds_tbl_alter_drop.groovy
│ │ │ ├── alter_create
│ │ │ │ └── test_cds_tbl_alter_drop_create.groovy
│ │ │ ├── create
│ │ │ │ └── test_cds_tbl_create_drop.groovy
│ │ │ └── idx_inverted
│ │ │ │ └── test_cds_tbl_idx_inverted_drop.groovy
│ │ ├── rename
│ │ │ ├── alter
│ │ │ │ └── test_cds_tbl_rename_alter.groovy
│ │ │ ├── alter_create
│ │ │ │ └── test_cds_tbl_rename_alter_create.groovy
│ │ │ ├── alter_create_1
│ │ │ │ └── test_cds_tbl_rename_alter_create_1.groovy
│ │ │ └── create
│ │ │ │ └── test_cds_tbl_rename_create.groovy
│ │ ├── replace
│ │ │ ├── alter
│ │ │ │ └── test_cds_tbl_alter_replace.groovy
│ │ │ ├── alter_create
│ │ │ │ └── test_cds_tbl_alter_replace_create.groovy
│ │ │ ├── alter_create_1
│ │ │ │ └── test_cds_tbl_alter_replace_create_1.groovy
│ │ │ ├── alter_swap
│ │ │ │ └── test_cds_tbl_alter_replace_swap.groovy
│ │ │ ├── recover
│ │ │ │ └── test_cds_tbl_recover_replace.groovy
│ │ │ └── recover_swap
│ │ │ │ └── test_cds_tbl_recover_replace_swap.groovy
│ │ └── signature_not_matched
│ │ │ └── test_cds_tbl_signature_not_matched.groovy
│ ├── upsert_index
│ │ └── test_cds_upsert_index.groovy
│ └── view
│ │ ├── alter_view
│ │ └── test_cds_view_alter_view.groovy
│ │ ├── drop_col
│ │ └── test_cds_view_drop_col.groovy
│ │ ├── signature_not_matched
│ │ └── test_cds_view_signature_not_matched.groovy
│ │ └── sync_twice
│ │ └── test_cds_view_sync_twice.groovy
│ ├── cross_ts
│ ├── fullsync
│ │ └── replace
│ │ │ └── test_cts_fullsync_replace.groovy
│ ├── keyword_name
│ │ └── test_cts_keyword.groovy
│ └── table
│ │ └── alter_replace
│ │ └── test_cts_tbl_alter_replace.groovy
│ ├── db_ps_inc
│ ├── add_partition
│ │ └── test_db_partial_sync_inc_add_partition.groovy
│ ├── alter
│ │ └── test_db_partial_sync_inc_alter.groovy
│ ├── cache
│ │ └── test_db_partial_sync_cache.groovy
│ ├── drop_partition
│ │ └── test_db_partial_sync_inc_drop_partition.groovy
│ ├── drop_table
│ │ └── test_db_psi_drop_table.groovy
│ ├── lightning_sc
│ │ └── test_db_partial_sync_inc_lightning_sc.groovy
│ ├── merge
│ │ └── test_db_partial_sync_merge.groovy
│ ├── replace_partition
│ │ └── test_db_partial_sync_inc_replace_partition.groovy
│ ├── truncate_table
│ │ └── test_db_partial_sync_inc_trunc_table.groovy
│ └── upsert
│ │ └── test_db_partial_sync_inc_upsert.groovy
│ ├── db_sync
│ ├── alt_prop
│ │ ├── bloom_filter
│ │ │ └── test_ds_alt_prop_bloom_filter.groovy
│ │ ├── bucket
│ │ │ └── test_ds_alt_prop_bucket.groovy
│ │ ├── colocate
│ │ │ └── test_ds_alt_prop_colocate_with.groovy
│ │ ├── comment
│ │ │ └── test_ds_alt_prop_comment.groovy
│ │ ├── compaction
│ │ │ └── test_ds_alt_prop_compaction.groovy
│ │ ├── distribution_num
│ │ │ └── test_ds_alt_prop_distr_num.groovy
│ │ ├── distribution_type
│ │ │ └── test_ds_alt_prop_distr_type.groovy
│ │ ├── dy_part
│ │ │ └── test_ds_alt_prop_dy_part.groovy
│ │ ├── dynamic_partition
│ │ │ └── test_ds_alt_prop_dy_part_sp.groovy
│ │ ├── light_schema_change
│ │ │ └── test_ds_alt_prop_light_schema_change.groovy
│ │ ├── row_store
│ │ │ └── test_ds_alt_prop_row_store.groovy
│ │ ├── sequence
│ │ │ └── test_ds_alt_prop_seq.groovy
│ │ ├── skip_bitmap_column
│ │ │ └── test_ds_alt_prop_skip_bitmap_column.groovy
│ │ ├── storage_policy
│ │ │ └── test_ds_alt_prop_stor_policy.groovy
│ │ └── synced
│ │ │ └── test_ds_alt_prop_synced.groovy
│ ├── async_mv
│ │ └── test_ds_async_mv_filter.groovy
│ ├── auto_increment
│ │ ├── duplicate
│ │ │ └── test_ds_auto_incerment_duplicate.groovy
│ │ ├── unique
│ │ │ └── test_ds_auto_incerment_unique.groovy
│ │ └── update
│ │ │ ├── key
│ │ │ └── test_ds_auto_incer_update_key.groovy
│ │ │ └── val
│ │ │ └── test_ds_auto_incer_update_val.groovy
│ ├── cdt
│ │ ├── column
│ │ │ ├── add
│ │ │ │ └── test_ds_cdt_col_add.groovy
│ │ │ ├── drop
│ │ │ │ └── test_ds_cdt_col_drop.groovy
│ │ │ └── rename
│ │ │ │ └── test_ds_cdt_col_rename.groovy
│ │ └── dml
│ │ │ └── test_ds_cdt_dml_insert.groovy
│ ├── column
│ │ ├── alter_comment
│ │ │ └── test_ds_col_alter_comment.groovy
│ │ ├── alter_type
│ │ │ └── test_ds_col_alter_type.groovy
│ │ ├── basic
│ │ │ └── test_ds_col_basic.groovy
│ │ ├── default_value
│ │ │ └── test_ds_col_default_value.groovy
│ │ ├── drop_key
│ │ │ └── test_ds_col_drop_key_col.groovy
│ │ ├── drop_val
│ │ │ └── test_ds_col_drop_val_col.groovy
│ │ ├── order_by
│ │ │ └── test_ds_col_order_by.groovy
│ │ └── rename
│ │ │ └── test_ds_col_rename.groovy
│ ├── common
│ │ └── test_ds_common.groovy
│ ├── delete
│ │ ├── mor
│ │ │ └── test_ds_delete_mor.groovy
│ │ └── mow
│ │ │ └── test_ds_delete_mow.groovy
│ ├── dml
│ │ ├── delete
│ │ │ └── test_ds_dml_delete.groovy
│ │ ├── insert
│ │ │ └── test_ds_dml_insert.groovy
│ │ ├── insert_overwrite
│ │ │ └── test_ds_dml_insert_overwrite.groovy
│ │ └── update_unique
│ │ │ └── test_ds_dml_update_unique.groovy
│ ├── idx_bf
│ │ ├── add_drop
│ │ │ └── test_ds_idx_bf_add_drop.groovy
│ │ └── fpp
│ │ │ └── test_ds_idx_bf_fpp.groovy
│ ├── idx_ngbf
│ │ └── add_drop
│ │ │ └── test_ds_idx_ngbf_add_drop.groovy
│ ├── inverted
│ │ └── add_drop
│ │ │ └── test_ds_idx_inverted_add_build_drop.groovy
│ ├── mv
│ │ ├── basic
│ │ │ └── test_ds_mv_basic.groovy
│ │ └── create_drop
│ │ │ └── test_ds_mv_create_drop.groovy
│ ├── partition
│ │ ├── alter
│ │ │ └── test_ds_part_alter.groovy
│ │ ├── default
│ │ │ └── test_ds_partition_default_list_insert.groovy
│ │ ├── drop
│ │ │ └── test_ds_part_drop.groovy
│ │ ├── drop_1
│ │ │ └── test_ds_part_drop_1.groovy
│ │ ├── recover
│ │ │ └── test_ds_part_recover.groovy
│ │ ├── recover1
│ │ │ └── test_ds_part_recover_new.groovy
│ │ ├── rename
│ │ │ └── test_ds_part_rename.groovy
│ │ ├── replace
│ │ │ └── test_ds_part_replace.groovy
│ │ ├── replace_1
│ │ │ └── test_ds_part_replace_1.groovy
│ │ └── replace_2
│ │ │ └── test_ds_part_replace_2.groovy
│ ├── prop
│ │ ├── auto_bucket
│ │ │ └── test_ds_prop_auto_bucket.groovy
│ │ ├── auto_compaction
│ │ │ └── test_ds_prop_auto_compaction.groovy
│ │ ├── auto_increment
│ │ │ └── test_ds_prop_auto_increment.groovy
│ │ ├── binlog
│ │ │ └── test_ds_prop_binlog.groovy
│ │ ├── bloom_filter
│ │ │ └── test_ds_prop_bloom_filter.groovy
│ │ ├── colocate_with
│ │ │ └── test_ds_prop_colocate_with.groovy
│ │ ├── compaction_policy
│ │ │ └── test_ds_prop_compaction_policy.groovy
│ │ ├── compression
│ │ │ └── test_ds_prop_compression.groovy
│ │ ├── dynamic_partition
│ │ │ └── test_ds_prop_dynamic_partition.groovy
│ │ ├── generated_column
│ │ │ └── test_ds_prop_generated_column.groovy
│ │ ├── group_commit
│ │ │ └── test_ds_prop_group_commit.groovy
│ │ ├── index
│ │ │ └── test_ds_prop_index.groovy
│ │ ├── repi_alloc
│ │ │ └── test_ds_prop_repli_alloc.groovy
│ │ ├── row_store
│ │ │ └── test_ds_prop_row_store.groovy
│ │ ├── schema_change
│ │ │ └── test_ds_prop_schema_change.groovy
│ │ ├── seq_col
│ │ │ └── test_ds_prop_seq_col.groovy
│ │ ├── single_compact
│ │ │ └── test_ds_prop_single_compact.groovy
│ │ ├── storage_medium
│ │ │ └── test_ds_prop_storage_medium.groovy
│ │ ├── storage_policy
│ │ │ └── test_ds_prop_storage_policy.groovy
│ │ ├── tm_compact
│ │ │ └── test_ds_prop_tm_compact.groovy
│ │ ├── unique_key_mow
│ │ │ └── test_ds_prop_unique_key_mow.groovy
│ │ └── variant_nested
│ │ │ └── test_ds_prop_variant_nested.groovy
│ ├── prop_incrsync
│ │ ├── auto_bucket
│ │ │ └── test_ds_prop_incrsync_auto_bucket.groovy
│ │ ├── auto_compaction
│ │ │ └── test_ds_prop_incrsync_auto_compaction.groovy
│ │ ├── auto_increment
│ │ │ └── test_ds_prop_incrsync_auto_increment.groovy
│ │ ├── binlog
│ │ │ └── test_ds_prop_incrsync_binlog.groovy
│ │ ├── bloom_filter
│ │ │ └── test_ds_prop_incrsync_bloom_filter.groovy
│ │ ├── colocate_with
│ │ │ └── test_ds_prop_incrsync_colocate_with.groovy
│ │ ├── compaction_policy
│ │ │ └── test_ds_prop_incrsync_compaction_policy.groovy
│ │ ├── compression
│ │ │ └── test_ds_prop_incrsync_compression.groovy
│ │ ├── dynamic_partition
│ │ │ └── test_ds_prop_incrsync_dynamic_partition.groovy
│ │ ├── generated_column
│ │ │ └── test_ds_prop_incrsync_generated_column.groovy
│ │ ├── group_commit
│ │ │ └── test_ds_prop_incrsync_group_commit.groovy
│ │ ├── index
│ │ │ └── test_ds_prop_incrsync_index.groovy
│ │ ├── repi_alloc
│ │ │ └── test_ds_prop_incrsync_repli_alloc.groovy
│ │ ├── row_store
│ │ │ └── test_ds_prop_incrsync_row_store.groovy
│ │ ├── schema_change
│ │ │ └── test_ds_prop_incrsync_schema_change.groovy
│ │ ├── seq_col
│ │ │ └── test_ds_prop_incrsync_seq_col.groovy
│ │ ├── single_compact
│ │ │ └── test_ds_prop_incrsync_single_compact.groovy
│ │ ├── storage_medium
│ │ │ └── test_ds_prop_incrsync_storage_medium.groovy
│ │ ├── storage_policy
│ │ │ └── test_ds_prop_incrsync_storage_policy.groovy
│ │ ├── tm_compact
│ │ │ └── test_ds_prop_incrsync_tm_compact.groovy
│ │ ├── unique_key_mow
│ │ │ └── test_ds_prop_incrsync_unique_key_mow.groovy
│ │ └── variant_nested
│ │ │ └── test_ds_prop_incrsync_variant_nested.groovy
│ ├── rollup
│ │ ├── add_drop
│ │ │ └── test_ds_rollup_add_drop.groovy
│ │ └── rename
│ │ │ └── test_ds_rollup_rename.groovy
│ ├── rollup_col
│ │ ├── add
│ │ │ └── test_ds_rollup_col_add.groovy
│ │ ├── drop
│ │ │ └── test_ds_rollup_col_drop.groovy
│ │ └── order_by
│ │ │ └── test_ds_rollup_col_order_by.groovy
│ ├── sequence
│ │ ├── column
│ │ │ └── test_ds_sequence_column.groovy
│ │ └── type
│ │ │ └── test_ds_sequence_type.groovy
│ ├── table
│ │ ├── aggregate_key
│ │ │ └── test_ds_tbl_aggregate_key.groovy
│ │ ├── clean_restore
│ │ │ └── test_ds_clean_restore.groovy
│ │ ├── create_comment
│ │ │ └── test_ds_tbl_create_comment.groovy
│ │ ├── create_drop
│ │ │ └── test_ds_tbl_create_drop.groovy
│ │ ├── create_index
│ │ │ └── test_ds_tbl_create_index.groovy
│ │ ├── create_resource
│ │ │ └── test_ds_tbl_create_resource.groovy
│ │ ├── create_rollup
│ │ │ └── test_ds_tbl_create_rollup.groovy
│ │ ├── diff_schema
│ │ │ └── test_ds_tbl_diff_schema.groovy
│ │ ├── drop_create
│ │ │ └── test_ds_tbl_drop_create.groovy
│ │ ├── duplicate
│ │ │ └── test_ds_tbl_duplicate.groovy
│ │ ├── modify_comment
│ │ │ └── test_ds_table_modify_comment.groovy
│ │ ├── part_bucket
│ │ │ └── test_ds_tbl_part_bucket.groovy
│ │ ├── recover
│ │ │ └── test_ds_tbl_drop_recover.groovy
│ │ ├── recover1
│ │ │ └── test_ds_tbl_drop_recover_new.groovy
│ │ ├── recover2
│ │ │ └── test_ds_tbl_drop_recover2.groovy
│ │ ├── recover3
│ │ │ └── test_ds_tbl_drop_recover3.groovy
│ │ ├── rename
│ │ │ └── test_ds_tbl_rename.groovy
│ │ ├── rename_dep
│ │ │ └── test_ds_tbl_rename_dep.groovy
│ │ ├── replace
│ │ │ └── test_ds_tbl_replace.groovy
│ │ ├── replace_different
│ │ │ └── test_ds_tbl_replace_different.groovy
│ │ ├── res_agg_state
│ │ │ └── test_ds_tbl_res_agg_state.groovy
│ │ ├── truncate
│ │ │ └── test_ds_tbl_truncate.groovy
│ │ └── unique_key
│ │ │ └── test_ds_tbl_unique_key.groovy
│ ├── txn_insert
│ │ └── test_ds_txn_insert.groovy
│ └── view
│ │ ├── alter
│ │ └── test_ds_view_alter.groovy
│ │ ├── basic
│ │ └── test_ds_view_basic.groovy
│ │ ├── create_func
│ │ └── test_ds_view_create_func.groovy
│ │ ├── drop_create
│ │ └── test_ds_view_drop_create.groovy
│ │ ├── drop_delete_create
│ │ └── test_ds_view_drop_delete_create.groovy
│ │ └── modify_comment
│ │ └── test_ds_view_modify_comment.groovy
│ ├── db_sync_absorb
│ ├── auto_incre
│ │ ├── dup
│ │ │ └── test_ds_absorb_auto_incre_dup.groovy
│ │ └── uni
│ │ │ └── test_ds_absorb_auto_incre_uni.groovy
│ ├── col
│ │ ├── add_key
│ │ │ └── test_ds_absorb_col_add_key.groovy
│ │ ├── add_key_drop
│ │ │ ├── key
│ │ │ │ └── test_ds_absorb_col_add_key_drop_key.groovy
│ │ │ └── val
│ │ │ │ └── test_ds_absorb_col_add_key_drop_val.groovy
│ │ ├── add_key_mod
│ │ │ └── test_ds_absorb_col_add_key_mod.groovy
│ │ ├── add_val
│ │ │ └── test_ds_absorb_col_add_value.groovy
│ │ ├── add_val_drop
│ │ │ ├── key
│ │ │ │ └── test_ds_absorb_col_add_val_drop_key.groovy
│ │ │ └── val
│ │ │ │ └── test_ds_absorb_col_add_val_drop_val.groovy
│ │ ├── add_val_mod
│ │ │ └── test_ds_absorb_col_add_val_mod.groovy
│ │ ├── drop_key
│ │ │ └── test_ds_absorb_col_drop_key.groovy
│ │ ├── drop_key_add
│ │ │ └── test_ds_absorb_col_drop_key_add.groovy
│ │ ├── drop_val_add
│ │ │ └── test_ds_absorb_col_drop_val_add.groovy
│ │ ├── drop_value
│ │ │ └── test_ds_absorb_col_drop_value.groovy
│ │ ├── modify_comm
│ │ │ └── test_ds_absorb_col_modify_comment.groovy
│ │ ├── order_by
│ │ │ └── test_ds_absorb_col_order_by.groovy
│ │ └── rename
│ │ │ └── test_ds_absorb_col_rename.groovy
│ ├── delete
│ │ ├── mor
│ │ │ └── test_ds_absorb_delete_mor.groovy
│ │ └── mow
│ │ │ └── test_ds_absorb_delete_mow.groovy
│ ├── dml
│ │ └── insert
│ │ │ └── test_ds_absorb_dml_insert.groovy
│ ├── index
│ │ ├── add_bf
│ │ │ └── test_ds_absorb_idx_add_bf.groovy
│ │ ├── add_inverted
│ │ │ └── test_ds_absorb_idx_add_inverted.groovy
│ │ ├── add_ng_bf
│ │ │ └── test_ds_absorb_idx_add_ng_bf.groovy
│ │ ├── drop_bf
│ │ │ └── test_ds_absorb_idx_drop_bf.groovy
│ │ ├── drop_inverted
│ │ │ └── test_ds_absorb_idx_drop_inverted.groovy
│ │ └── drop_ng_bf
│ │ │ └── test_ds_absorb_idx_drop_ng_bf.groovy
│ ├── mv
│ │ ├── create
│ │ │ └── test_ds_absorb_mv_create.groovy
│ │ └── drop
│ │ │ └── test_ds_absorb_mv_drop.groovy
│ ├── partition
│ │ ├── add
│ │ │ └── test_ds_absorb_part_add.groovy
│ │ ├── add_drop
│ │ │ └── test_ds_absorb_part_add_drop.groovy
│ │ ├── add_rename
│ │ │ └── test_ds_absorb_part_add_rename.groovy
│ │ ├── drop
│ │ │ └── test_ds_absorb_part_drop.groovy
│ │ ├── drop_add
│ │ │ └── test_ds_absorb_part_drop_add.groovy
│ │ ├── rename
│ │ │ └── test_ds_absorb_part_rename.groovy
│ │ └── replace
│ │ │ └── test_ds_absorb_part_replace.groovy
│ ├── rollup
│ │ ├── add
│ │ │ └── test_ds_absorb_rollup_add.groovy
│ │ ├── add_col
│ │ │ └── test_ds_absorb_rollup_add_col.groovy
│ │ ├── drop
│ │ │ └── test_ds_absorb_rollup_drop.groovy
│ │ ├── drop_col
│ │ │ └── test_ds_absorb_rollup_drop_col.groovy
│ │ ├── modify_col
│ │ │ └── test_ds_absorb_rollup_modify_col.groovy
│ │ ├── rename
│ │ │ └── test_ds_absorb_rollup_rename.groovy
│ │ └── rename_col
│ │ │ └── test_ds_absorb_rollup_rename_col.groovy
│ ├── sequence
│ │ └── create
│ │ │ ├── col
│ │ │ └── test_ds_absorb_sequence_create_col.groovy
│ │ │ └── type
│ │ │ └── test_ds_absorb_sequence_create_type.groovy
│ ├── tbl
│ │ ├── alt_comment
│ │ │ └── test_ds_absorb_tbl_alt_comment.groovy
│ │ ├── alt_prop
│ │ │ ├── bfilter
│ │ │ │ └── test_ds_absorb_tbl_alt_prop_bloom_filter.groovy
│ │ │ ├── bucket
│ │ │ │ └── test_ds_absorb_tbl_alt_prop_bucket.groovy
│ │ │ ├── colocate
│ │ │ │ └── test_ds_absorb_tbl_alt_prop_colocate.groovy
│ │ │ ├── comm
│ │ │ │ └── test_ds_absorb_tbl_alt_prop_comment.groovy
│ │ │ ├── compact
│ │ │ │ └── test_ds_absorb_tbl_alt_prop_compaction.groovy
│ │ │ ├── dist_num
│ │ │ │ └── test_ds_absorb_tbl_alt_prop_distr_num.groovy
│ │ │ ├── dist_tp
│ │ │ │ └── test_ds_absorb_tbl_alt_prop_distr_type.groovy
│ │ │ ├── dy_part
│ │ │ │ └── test_ds_absorb_tbl_alt_prop_dy_part.groovy
│ │ │ ├── dy_pt_sp
│ │ │ │ └── test_ds_absorb_tbl_alt_prop_dynamic_partition_sp.groovy
│ │ │ ├── lgt_schema
│ │ │ │ └── test_ds_absorb_tbl_alt_prop_light_schema_change.groovy
│ │ │ ├── row_store
│ │ │ │ └── test_ds_absorb_tbl_alt_prop_row_store.groovy
│ │ │ ├── sequence
│ │ │ │ └── test_ds_absorb_alt_prop_seq.groovy
│ │ │ ├── sk_bitm_col
│ │ │ │ └── test_ds_absorb_tbl_alt_prop_skip_bitmap_column.groovy
│ │ │ ├── stor_poli
│ │ │ │ └── test_ds_absorb_tbl_alt_prop_storage_policy.groovy
│ │ │ └── synced
│ │ │ │ └── test_ds_absorb_tbl_alt_prop_synced.groovy
│ │ ├── create_alter
│ │ │ ├── bfilter
│ │ │ │ └── test_ds_absorb_tbl_create_alt_bloom_filter.groovy
│ │ │ ├── bucket
│ │ │ │ └── test_ds_absorb_tbl_create_alt_bucket.groovy
│ │ │ ├── colocate
│ │ │ │ └── test_ds_absorb_tbl_create_alt_colocate.groovy
│ │ │ ├── comm
│ │ │ │ └── test_ds_absorb_tbl_create_alt_comment.groovy
│ │ │ ├── compact
│ │ │ │ └── test_ds_absorb_tbl_create_alt_compaction.groovy
│ │ │ ├── dist_num
│ │ │ │ └── test_ds_absorb_tbl_create_alt_distr_num.groovy
│ │ │ ├── dist_tp
│ │ │ │ └── test_ds_absorb_tbl_create_alt_distr_type.groovy
│ │ │ ├── dy_part
│ │ │ │ └── test_ds_absorb_tbl_create_alt_dy_part.groovy
│ │ │ ├── dy_pt_sp
│ │ │ │ └── test_ds_absorb_tbl_create_alt_dynamic_partition_sp.groovy
│ │ │ ├── lgt_schema
│ │ │ │ └── test_ds_absorb_tbl_create_alt_light_schema_change.groovy
│ │ │ ├── row_store
│ │ │ │ └── test_ds_absorb_tbl_create_alt_row_store.groovy
│ │ │ ├── sk_bitm_col
│ │ │ │ └── test_ds_absorb_tbl_create_alt_skip_bitmap_column.groovy
│ │ │ ├── stor_poli
│ │ │ │ └── test_ds_absorb_tbl_create_alt_storage_policy.groovy
│ │ │ └── synced
│ │ │ │ └── test_ds_absorb_tbl_create_alt_synced.groovy
│ │ ├── create_drop
│ │ │ └── test_ds_absorb_tbl_create_drop.groovy
│ │ ├── drop_cre
│ │ │ └── sch_consis
│ │ │ │ └── test_ds_absorb_tbl_drop_create_schema_consist.groovy
│ │ ├── rename
│ │ │ └── test_ds_absorb_tbl_rename.groovy
│ │ └── replace
│ │ │ ├── false
│ │ │ └── test_ds_absorb_tbl_replace_false.groovy
│ │ │ └── true
│ │ │ └── test_ds_absorb_tbl_replace_true.groovy
│ └── view
│ │ ├── alter
│ │ └── test_ds_absorb_view_alter.groovy
│ │ ├── create
│ │ └── test_ds_absorb_view_create.groovy
│ │ ├── create_drop
│ │ └── test_ds_absorb_view_create_drop.groovy
│ │ ├── drop
│ │ └── test_ds_absorb_view_drop.groovy
│ │ └── drop_create
│ │ └── test_ds_absorb_view_drop_create.groovy
│ ├── db_sync_dep
│ ├── column
│ │ ├── add_key
│ │ │ └── test_dsd_column_add_key.groovy
│ │ ├── add_value
│ │ │ └── test_dsd_column_add_value.groovy
│ │ ├── drop_key
│ │ │ └── test_dsd_column_drop_key.groovy
│ │ ├── drop_value
│ │ │ └── test_dsd_column_drop_value.groovy
│ │ ├── order_by
│ │ │ └── test_dsd_column_order_by.groovy
│ │ └── rename_column
│ │ │ └── test_dsd_column_rename_column.groovy
│ ├── dml
│ │ └── insert
│ │ │ └── test_dsd_dml_insert.groovy
│ ├── index
│ │ ├── add_bf
│ │ │ └── test_dsd_index_add_bf.groovy
│ │ ├── add_inverted
│ │ │ └── test_dsd_index_add_inverted.groovy
│ │ ├── add_ng_bf
│ │ │ └── test_dsd_index_add_ng_bf.groovy
│ │ ├── drop_bf
│ │ │ └── test_dsd_index_drop_bf.groovy
│ │ ├── drop_inverted
│ │ │ └── test_dsd_index_drop_inverted.groovy
│ │ └── drop_ng_bf
│ │ │ └── test_dsd_index_drop_ng_bf.groovy
│ ├── mv
│ │ ├── create
│ │ │ └── test_dsd_mv_create.groovy
│ │ └── drop
│ │ │ └── test_dsd_mv_drop.groovy
│ ├── partition
│ │ ├── drop
│ │ │ └── test_ds_dep_part_drop.groovy
│ │ ├── rename
│ │ │ └── test_ds_dep_part_rename.groovy
│ │ └── replace
│ │ │ └── test_ds_dep_part_replace.groovy
│ ├── rollup
│ │ ├── add
│ │ │ └── test_dsd_rollup_add.groovy
│ │ ├── add_column
│ │ │ └── test_dsd_rollup_add_column.groovy
│ │ ├── drop
│ │ │ └── test_dsd_rollup_drop.groovy
│ │ ├── drop_1
│ │ │ └── test_dsd_rollup_drop_1.groovy
│ │ ├── drop_2
│ │ │ └── test_dsd_rollup_drop_2.groovy
│ │ ├── drop_column
│ │ │ └── test_dsd_rollup_drop_column.groovy
│ │ ├── modify_column
│ │ │ └── test_dsd_rollup_modify_column.groovy
│ │ └── rename
│ │ │ └── test_dsd_rollup_rename.groovy
│ ├── table
│ │ ├── drop
│ │ │ └── test_ds_dep_table_drop.groovy
│ │ ├── rename
│ │ │ └── test_ds_dep_table_rename.groovy
│ │ ├── replace_false
│ │ │ └── test_ds_dep_table_replace_false.groovy
│ │ ├── replace_true
│ │ │ └── test_ds_dep_table_replace_true.groovy
│ │ └── truncate
│ │ │ └── test_ds_dep_table_truncate.groovy
│ └── view
│ │ ├── alter
│ │ └── test_dsd_view_alter.groovy
│ │ ├── create
│ │ └── test_dsd_view_create.groovy
│ │ └── drop
│ │ └── test_dsd_view_drop.groovy
│ ├── db_sync_idem
│ ├── add_key_column
│ │ └── test_ds_idem_add_key_column.groovy
│ ├── add_partition
│ │ └── test_ds_idem_add_partition.groovy
│ ├── add_value_column
│ │ └── test_ds_idem_add_value_column.groovy
│ ├── alter_tbl_property
│ │ └── test_ds_idem_alter_tbl_property.groovy
│ ├── create_table
│ │ └── test_ds_idem_create_table.groovy
│ ├── distribution_num
│ │ └── test_ds_idem_distribution_num.groovy
│ ├── drop_partition
│ │ └── test_ds_idem_drop_partition.groovy
│ ├── drop_rollup
│ │ └── test_ds_idem_drop_rollup.groovy
│ ├── drop_table
│ │ └── test_ds_idem_drop_table.groovy
│ ├── drop_value_column
│ │ └── test_ds_idem_drop_value_column.groovy
│ ├── inverted_idx
│ │ └── test_ds_idem_inverted_idx.groovy
│ ├── modify_comment
│ │ └── test_ds_idem_modify_comment.groovy
│ ├── modify_view
│ │ └── test_ds_idem_modify_view_def.groovy
│ ├── recover_info
│ │ └── test_ds_idem_recover_info.groovy
│ ├── rename_column
│ │ └── test_ds_idem_rename_column.groovy
│ ├── rename_partition
│ │ └── test_ds_idem_rename_partition.groovy
│ ├── rename_rollup
│ │ └── test_ds_idem_rename_rollup.groovy
│ ├── rename_table
│ │ └── test_ds_idem_rename_table.groovy
│ ├── replace_partition
│ │ └── test_ds_idem_replace_partition.groovy
│ └── truncate_table
│ │ └── test_ds_idem_truncate_table.groovy
│ ├── syncer
│ ├── failover
│ │ └── test_syncer_failover.groovy
│ ├── get_lag
│ │ └── test_syncer_get_lag.groovy
│ ├── skip_binlog
│ │ ├── fullsync
│ │ │ └── test_syncer_skip_binlog_fullsync.groovy
│ │ ├── partialsync
│ │ │ └── test_syncer_skip_binlog_partialsync.groovy
│ │ └── silence
│ │ │ └── test_syncer_skip_binlog_silence.groovy
│ └── ts_allow_table_exists
│ │ └── test_syncer_ts_allow_table_exists.groovy
│ ├── table_ps_inc
│ ├── basic
│ │ └── test_tbl_ps_inc_basic.groovy
│ └── cache
│ │ └── test_tbl_ps_inc_cache.groovy
│ ├── table_sync
│ ├── alt_prop
│ │ ├── bloom_filter
│ │ │ └── test_ts_alt_prop_bloom_filter.groovy
│ │ ├── bucket
│ │ │ └── test_ts_alt_prop_bucket.groovy
│ │ ├── colocate
│ │ │ └── test_ts_alt_prop_colocate_with.groovy
│ │ ├── comment
│ │ │ └── test_ts_alt_prop_comment.groovy
│ │ ├── compaction
│ │ │ └── test_ts_alt_prop_compaction.groovy
│ │ ├── distribution_num
│ │ │ └── test_ts_alt_prop_distr_num.groovy
│ │ ├── distribution_type
│ │ │ └── test_ts_alt_prop_distr_type.groovy
│ │ ├── dy_part
│ │ │ └── test_ts_alt_prop_dy_pary.groovy
│ │ ├── light_schema_change
│ │ │ └── test_ts_alt_prop_light_schema_change.groovy
│ │ ├── row_store
│ │ │ └── test_ts_alt_prop_row_store.groovy
│ │ ├── sequence
│ │ │ └── test_ts_alt_prop_seq.groovy
│ │ ├── skip_bitmap_column
│ │ │ └── test_ts_alt_prop_skip_bitmap_column.groovy
│ │ ├── storage_policy
│ │ │ └── test_ts_alt_prop_stor_policy.groovy
│ │ └── synced
│ │ │ └── test_ts_alt_prop_synced.groovy
│ ├── auto_increment
│ │ ├── duplicate
│ │ │ └── test_ts_auto_incerment_duplicate.groovy
│ │ ├── unique
│ │ │ └── test_ts_auto_incerment_unique.groovy
│ │ └── update
│ │ │ ├── key
│ │ │ └── test_ts_auto_incer_update_key.groovy
│ │ │ └── val
│ │ │ └── test_ts_auto_incer_update_val.groovy
│ ├── column
│ │ ├── add
│ │ │ └── test_ts_col_add.groovy
│ │ ├── add_agg
│ │ │ └── test_ts_col_add_agg.groovy
│ │ ├── add_many
│ │ │ └── test_ts_col_add_many.groovy
│ │ ├── alter_comment
│ │ │ └── test_ts_col_alter_comment.groovy
│ │ ├── alter_type
│ │ │ └── test_ts_col_alter_type.groovy
│ │ ├── basic
│ │ │ └── test_ts_col_basic.groovy
│ │ ├── comment
│ │ │ └── test_ts_col_default_value.groovy
│ │ ├── drop_key
│ │ │ └── test_ts_col_drop_key.groovy
│ │ ├── drop_val
│ │ │ └── test_ts_col_drop_val.groovy
│ │ ├── filter_dropped_indexes
│ │ │ └── test_ts_col_filter_dropped_indexes.groovy
│ │ ├── order_by
│ │ │ └── test_ts_col_order_by.groovy
│ │ └── rename
│ │ │ └── test_ts_col_rename.groovy
│ ├── common
│ │ └── test_ts_common.groovy
│ ├── delete
│ │ ├── mor
│ │ │ └── test_ts_delete_mor.groovy
│ │ └── mow
│ │ │ └── test_ts_delete_mow.groovy
│ ├── dml
│ │ ├── delete
│ │ │ └── test_ts_dml_delete.groovy
│ │ ├── insert_overwrite
│ │ │ └── test_ts_dml_insert_overwrite.groovy
│ │ └── update_unique
│ │ │ └── test_ts_dml_update_unique.groovy
│ ├── idx_bf
│ │ ├── add_drop
│ │ │ └── test_ts_idx_bf_add_drop.groovy
│ │ └── fpp
│ │ │ └── test_ts_idx_bf_fpp.groovy
│ ├── idx_bitmap
│ │ └── add
│ │ │ └── test_ts_idx_bitmap_add.groovy
│ ├── idx_inverted
│ │ ├── add_build_drop
│ │ │ └── test_ts_idx_inverted_add_build_drop.groovy
│ │ ├── add_drop_multi
│ │ │ └── test_ts_idx_inverted_add_drop_multi.groovy
│ │ ├── build_with_part
│ │ │ └── test_ts_idx_inverted_build_with_part.groovy
│ │ ├── create_drop
│ │ │ └── test_ts_idx_inverted_create_drop.groovy
│ │ └── match
│ │ │ └── test_ts_idx_inverted_match.groovy
│ ├── idx_ngbf
│ │ └── add_drop
│ │ │ └── test_ts_idx_ngbf_add_drop.groovy
│ ├── mv
│ │ └── create_drop
│ │ │ └── test_ts_mv_create_drop.groovy
│ ├── partition
│ │ ├── add
│ │ │ └── test_ts_part_add.groovy
│ │ ├── add_drop
│ │ │ └── test_tbl_part_add_drop.groovy
│ │ ├── alter
│ │ │ └── test_ts_part_alter.groovy
│ │ ├── clean_restore
│ │ │ └── test_ts_part_clean_restore.groovy
│ │ ├── recover
│ │ │ └── test_tbl_part_recover.groovy
│ │ ├── recover1
│ │ │ └── test_tbl_part_recover_new.groovy
│ │ ├── rename
│ │ │ └── test_ts_part_rename.groovy
│ │ ├── replace
│ │ │ └── test_ts_part_replace.groovy
│ │ └── replace_partial
│ │ │ └── test_ts_part_replace_partial.groovy
│ ├── prop
│ │ ├── auto_bucket
│ │ │ └── test_ts_prop_auto_bucket.groovy
│ │ ├── auto_compaction
│ │ │ └── test_ts_prop_auto_compaction.groovy
│ │ ├── auto_increment
│ │ │ └── test_ts_prop_auto_increment.groovy
│ │ ├── binlog
│ │ │ └── test_ts_prop_binlog.groovy
│ │ ├── bloom_filter
│ │ │ └── test_ts_prop_bloom_filter.groovy
│ │ ├── colocate_with
│ │ │ └── test_ts_prop_colocate_with.groovy
│ │ ├── compaction_policy
│ │ │ └── test_ts_prop_compaction_policy.groovy
│ │ ├── compression
│ │ │ └── test_ts_prop_compression.groovy
│ │ ├── dynamic_partition
│ │ │ └── test_ts_prop_dynamic_partition.groovy
│ │ ├── generated_column
│ │ │ └── test_ts_prop_generated_column.groovy
│ │ ├── group_commit
│ │ │ └── test_ts_prop_group_commit.groovy
│ │ ├── index
│ │ │ └── test_ts_prop_index.groovy
│ │ ├── light_schema_change
│ │ │ └── test_ts_prop_light_schema_change.groovy
│ │ ├── repli_alloc
│ │ │ └── test_ts_prop_repli_alloc.groovy
│ │ ├── row_store
│ │ │ └── test_ts_prop_row_store.groovy
│ │ ├── seq_col
│ │ │ └── test_ts_prop_seq_col.groovy
│ │ ├── single_replica_compaction
│ │ │ └── test_ts_prop_single_repli_compact.groovy
│ │ ├── storage_medium
│ │ │ └── test_ts_prop_storage_medium.groovy
│ │ ├── storage_policy
│ │ │ └── test_ts_prop_storage_policy.groovy
│ │ ├── time_series_compaction
│ │ │ └── test_ts_prop_tm_series_compact.groovy
│ │ ├── unique_key_mow
│ │ │ └── test_ts_prop_unique_key_mow.groovy
│ │ └── variant_nested
│ │ │ └── test_ts_prop_variant_nested.groovy
│ ├── rollup
│ │ ├── add_drop
│ │ │ └── test_ts_rollup_add_drop.groovy
│ │ └── rename
│ │ │ └── test_ts_rollup_rename.groovy
│ ├── rollup_col
│ │ ├── add
│ │ │ └── test_ts_rollup_col_add.groovy
│ │ ├── drop
│ │ │ └── test_ts_rollup_col_drop.groovy
│ │ └── order_by
│ │ │ └── test_ts_rollup_col_order_by.groovy
│ ├── sequence
│ │ ├── column
│ │ │ └── test_ts_sequence_column.groovy
│ │ └── type
│ │ │ └── test_ts_sequence_type.groovy
│ └── table
│ │ ├── aggregate_key
│ │ └── test_ts_tbl_aggregate_key.groovy
│ │ ├── duplicate
│ │ └── test_ts_tbl_duplicate.groovy
│ │ ├── modify_comment
│ │ └── test_ts_table_modify_comment.groovy
│ │ ├── part_bucket
│ │ └── test_ts_tbl_part_bucket.groovy
│ │ ├── rename
│ │ └── test_ts_tbl_rename.groovy
│ │ ├── replace
│ │ └── test_ts_tbl_replace.groovy
│ │ ├── res_inverted_idx
│ │ └── test_ts_tbl_res_inverted_idx.groovy
│ │ ├── res_mow
│ │ └── test_ts_table_res_mow.groovy
│ │ ├── res_row_storage
│ │ └── test_ts_tbl_res_row_storage.groovy
│ │ ├── res_variant
│ │ └── test_ts_tbl_res_variant.groovy
│ │ ├── truncate
│ │ └── test_ts_tbl_truncate.groovy
│ │ ├── txn_insert
│ │ └── test_ts_tbl_txn_insert.groovy
│ │ └── unique_key
│ │ └── test_ts_tbl_unique_key.groovy
│ ├── table_sync_alias
│ ├── alt_prop
│ │ ├── bloom_filter
│ │ │ └── test_tsa_alt_prop_bloom_filter.groovy
│ │ ├── bucket
│ │ │ └── test_tsa_alt_prop_bucket.groovy
│ │ ├── colocate
│ │ │ └── test_tsa_alt_prop_colocate_with.groovy
│ │ ├── comment
│ │ │ └── test_tsa_alt_prop_comment.groovy
│ │ ├── compaction
│ │ │ └── test_tsa_alt_prop_compaction.groovy
│ │ ├── distribution_num
│ │ │ └── test_tsa_alt_prop_distr_num.groovy
│ │ ├── distribution_type
│ │ │ └── test_tsa_alt_prop_distr_type.groovy
│ │ ├── dy_part
│ │ │ └── test_tsa_alt_prop_dy_pary.groovy
│ │ ├── light_sc
│ │ │ └── test_tsa_alt_prop_light_schema_change.groovy
│ │ ├── row_store
│ │ │ └── test_tsa_alt_prop_row_store.groovy
│ │ ├── sequence
│ │ │ └── test_tsa_alt_prop_seq.groovy
│ │ ├── skip_bitmap_col
│ │ │ └── test_tsa_alt_prop_skip_bitmap_col.groovy
│ │ ├── storage_policy
│ │ │ └── test_tsa_alt_prop_stor_policy.groovy
│ │ └── synced
│ │ │ └── test_tsa_alt_prop_synced.groovy
│ ├── auto_increment
│ │ ├── duplicate
│ │ │ └── test_tsa_auto_incerment_duplicate.groovy
│ │ ├── unique
│ │ │ └── test_tsa_auto_incerment_unique.groovy
│ │ └── update
│ │ │ ├── key
│ │ │ └── test_tsa_auto_incer_update_key.groovy
│ │ │ └── val
│ │ │ └── test_tsa_auto_incer_update_val.groovy
│ ├── column
│ │ ├── add_value
│ │ │ └── test_tsa_column_add.groovy
│ │ ├── alter_comment
│ │ │ └── test_tsa_col_alter_comment.groovy
│ │ ├── alter_type
│ │ │ └── test_tsa_col_alter_type.groovy
│ │ ├── basic
│ │ │ └── test_tsa_col_basic.groovy
│ │ ├── drop_key
│ │ │ └── test_tsa_col_drop_key_col.groovy
│ │ ├── drop_val
│ │ │ └── test_tsa_col_drop_val_col.groovy
│ │ ├── order_by
│ │ │ └── test_tsa_col_order_by.groovy
│ │ ├── rename
│ │ │ └── test_tsa_col_rename.groovy
│ │ ├── rollup
│ │ │ ├── add_drop
│ │ │ │ └── test_tsa_rollup_add_drop.groovy
│ │ │ └── rename
│ │ │ │ └── test_tsa_rollup_rename.groovy
│ │ └── rollup_col
│ │ │ ├── add
│ │ │ └── test_tsa_rollup_col_add.groovy
│ │ │ ├── drop
│ │ │ └── test_tsa_rollup_col_drop.groovy
│ │ │ └── order_by
│ │ │ └── test_tsa_rollup_col_order_by.groovy
│ ├── delete
│ │ ├── mor
│ │ │ └── test_tsa_delete_mor.groovy
│ │ └── mow
│ │ │ └── test_tsa_delete_mow.groovy
│ ├── dml
│ │ ├── delete
│ │ │ └── test_tsa_dml_delete.groovy
│ │ ├── insert_overwrite
│ │ │ └── test_tsa_dml_insert_overwrite.groovy
│ │ └── update_unique
│ │ │ └── test_tsa_dml_update_unique.groovy
│ ├── idx_bf
│ │ ├── add_drop
│ │ │ └── test_tsa_idx_bf_add_drop.groovy
│ │ └── fpp
│ │ │ └── test_tsa_idx_bf_fpp.groovy
│ ├── idx_ngbf
│ │ └── add_drop
│ │ │ └── test_tsa_idx_ngbf_add_drop.groovy
│ ├── index
│ │ └── create_drop_inverted
│ │ │ └── test_tsa_index_create_drop_inverted.groovy
│ ├── inverted
│ │ └── add_drop
│ │ │ └── test_tsa_idx_inverted_add_build_drop.groovy
│ ├── mv
│ │ └── create_drop
│ │ │ └── test_tsa_mv_create_drop.groovy
│ ├── partition
│ │ ├── add_drop
│ │ │ └── test_tsa_part_add_drop.groovy
│ │ ├── alter
│ │ │ └── test_tsa_part_alter.groovy
│ │ └── rename
│ │ │ └── test_tsa_part_rename.groovy
│ ├── sequence
│ │ ├── column
│ │ │ └── test_tsa_sequence_column.groovy
│ │ └── type
│ │ │ └── test_tsa_sequence_type.groovy
│ └── table
│ │ ├── aggregate_key
│ │ └── test_tsa_tbl_aggregate_key.groovy
│ │ ├── duplicate
│ │ └── test_tsa_tbl_duplicate.groovy
│ │ ├── modify_comment
│ │ └── test_tsa_table_modify_comment.groovy
│ │ ├── part_bucket
│ │ └── test_tsa_tbl_part_bucket.groovy
│ │ ├── replace
│ │ └── test_tsa_tbl_replace.groovy
│ │ ├── res_mow
│ │ └── test_tsa_table_res_mow.groovy
│ │ ├── res_row_storage
│ │ └── test_tsa_tbl_res_row_storage.groovy
│ │ ├── res_variant
│ │ └── test_tsa_tbl_res_variant.groovy
│ │ ├── truncate
│ │ └── test_tsa_tbl_truncate.groovy
│ │ └── unique_key
│ │ └── test_tsa_tbl_unique_key.groovy
│ ├── table_sync_alias_dep
│ ├── column
│ │ ├── add_key
│ │ │ └── test_tsad_column_add_key.groovy
│ │ ├── add_value
│ │ │ └── test_tsad_column_add_value.groovy
│ │ ├── drop_key
│ │ │ └── test_tsad_column_drop_key.groovy
│ │ ├── drop_value
│ │ │ └── test_tsad_column_drop_value.groovy
│ │ ├── order_by
│ │ │ └── test_tsad_column_order_by.groovy
│ │ └── rename_column
│ │ │ └── test_tsad_column_rename_column.groovy
│ ├── dml
│ │ └── insert
│ │ │ └── test_tsad_dml_insert.groovy
│ ├── index
│ │ ├── add_bf
│ │ │ └── test_tsad_index_add_bf.groovy
│ │ ├── add_inverted
│ │ │ └── test_tsad_index_add_inverted.groovy
│ │ ├── add_ng_bf
│ │ │ └── test_tsad_index_add_ng_bf.groovy
│ │ ├── drop_bf
│ │ │ └── test_tsad_index_drop_bf.groovy
│ │ ├── drop_inverted
│ │ │ └── test_tsad_index_drop_inverted.groovy
│ │ └── drop_ng_bf
│ │ │ └── test_tsad_index_drop_ng_bf.groovy
│ ├── partition
│ │ ├── drop
│ │ │ └── test_tsa_dep_part_drop.groovy
│ │ ├── rename
│ │ │ └── test_tsa_dep_part_rename.groovy
│ │ └── replace
│ │ │ └── test_tsa_dep_part_replace.groovy
│ ├── rollup
│ │ ├── add
│ │ │ └── test_tsad_rollup_add.groovy
│ │ ├── add_column
│ │ │ └── test_tsad_rollup_add_column.groovy
│ │ ├── drop
│ │ │ └── test_tsad_rollup_drop.groovy
│ │ ├── drop_column
│ │ │ └── test_tsad_rollup_drop_column.groovy
│ │ ├── modify_column
│ │ │ └── test_tsad_rollup_modify_column.groovy
│ │ └── rename
│ │ │ └── test_tsad_rollup_rename.groovy
│ └── table
│ │ ├── replace_true
│ │ └── test_tsa_dep_table_replace_true.groovy
│ │ └── truncate
│ │ └── test_tsa_dep_table_truncate.groovy
│ ├── table_sync_dep
│ ├── column
│ │ ├── add_key
│ │ │ └── test_tsd_column_add_key.groovy
│ │ ├── add_value
│ │ │ └── test_tsd_column_add_value.groovy
│ │ ├── drop_key
│ │ │ └── test_tsd_column_drop_key.groovy
│ │ ├── drop_value
│ │ │ └── test_tsd_column_drop_value.groovy
│ │ ├── order_by
│ │ │ └── test_tsd_column_order_by.groovy
│ │ └── rename_column
│ │ │ └── test_tsd_column_rename_column.groovy
│ ├── dml
│ │ └── insert
│ │ │ └── test_tsd_dml_insert.groovy
│ ├── index
│ │ ├── add_bf
│ │ │ └── test_tsd_index_add_bf.groovy
│ │ ├── add_inverted
│ │ │ └── test_tsd_index_add_inverted.groovy
│ │ ├── add_ng_bf
│ │ │ └── test_tsd_index_add_ng_bf.groovy
│ │ ├── drop_bf
│ │ │ └── test_tsd_index_drop_bf.groovy
│ │ ├── drop_inverted
│ │ │ └── test_tsd_index_drop_inverted.groovy
│ │ └── drop_ng_bf
│ │ │ └── test_tsd_index_drop_ng_bf.groovy
│ ├── partition
│ │ ├── drop
│ │ │ └── test_ts_dep_part_drop.groovy
│ │ ├── rename
│ │ │ └── test_ts_dep_part_rename.groovy
│ │ └── replace
│ │ │ └── test_ts_dep_part_replace.groovy
│ ├── rollup
│ │ ├── add
│ │ │ └── test_tsd_rollup_add.groovy
│ │ ├── add_column
│ │ │ └── test_tsd_rollup_add_column.groovy
│ │ ├── drop
│ │ │ └── test_tsd_rollup_drop.groovy
│ │ ├── drop_column
│ │ │ └── test_tsd_rollup_drop_column.groovy
│ │ ├── modify_column
│ │ │ └── test_tsd_rollup_modify_column.groovy
│ │ └── rename
│ │ │ └── test_tsd_rollup_rename.groovy
│ └── table
│ │ ├── replace_true
│ │ └── test_ts_dep_table_replace_true.groovy
│ │ └── truncate
│ │ └── test_ts_dep_table_truncate.groovy
│ ├── tbl_sync_absorb
│ ├── col
│ │ ├── add_key
│ │ │ └── test_ts_absorb_col_add_key.groovy
│ │ ├── add_key_drop
│ │ │ ├── key
│ │ │ │ └── test_ts_absorb_col_add_key_drop_key.groovy
│ │ │ └── val
│ │ │ │ └── test_ts_absorb_col_add_key_drop_val.groovy
│ │ ├── add_key_mod
│ │ │ └── test_ts_absorb_col_add_key_mod.groovy
│ │ ├── add_val
│ │ │ └── test_ts_absorb_col_add_val.groovy
│ │ ├── add_val_drop
│ │ │ ├── key
│ │ │ │ └── test_ts_absorb_col_add_val_drop_key.groovy
│ │ │ └── val
│ │ │ │ └── test_ts_absorb_col_add_val_drop_val.groovy
│ │ ├── add_val_mod
│ │ │ └── test_ts_absorb_col_add_val_mod.groovy
│ │ ├── drop_key
│ │ │ └── test_ts_absorb_col_drop_key.groovy
│ │ ├── drop_key_add
│ │ │ └── test_ts_absorb_col_drop_key_add.groovy
│ │ ├── drop_val_add
│ │ │ └── test_ts_absorb_col_drop_val_add.groovy
│ │ ├── drop_value
│ │ │ └── test_ts_absorb_col_drop_value.groovy
│ │ ├── modify_comm
│ │ │ └── test_ts_absorb_col_modify_comment.groovy
│ │ ├── order_by
│ │ │ └── test_ts_absorb_col_order_by.groovy
│ │ └── rename
│ │ │ └── test_ts_absorb_col_rename.groovy
│ ├── delete
│ │ ├── mor
│ │ │ └── test_ts_absorb_delete_mor.groovy
│ │ └── mow
│ │ │ └── test_ts_absorb_delete_mow.groovy
│ ├── index
│ │ ├── add_bf
│ │ │ └── test_ts_absorb_idx_add_bf.groovy
│ │ ├── add_inverted
│ │ │ └── test_ts_absorb_idx_add_inverted.groovy
│ │ ├── add_ng_bf
│ │ │ └── test_ts_absorb_idx_add_ng_bf.groovy
│ │ ├── drop_bf
│ │ │ └── test_ts_absorb_idx_drop_bf.groovy
│ │ ├── drop_inverted
│ │ │ └── test_ts_absorb_idx_drop_inverted.groovy
│ │ └── drop_ng_bf
│ │ │ └── test_ts_absorb_idx_drop_ng_bf.groovy
│ ├── insert
│ │ └── test_ts_absorb_dml_insert.groovy
│ ├── mv
│ │ ├── create
│ │ │ └── test_ts_absorb_mv_create.groovy
│ │ └── drop
│ │ │ └── test_ts_absorb_mv_drop.groovy
│ ├── partition
│ │ ├── add
│ │ │ └── test_ts_absorb_part_add.groovy
│ │ ├── add_drop
│ │ │ └── test_ts_absorb_part_add_drop.groovy
│ │ ├── add_rename
│ │ │ └── test_ts_absorb_part_add_rename.groovy
│ │ ├── drop
│ │ │ └── test_ts_absorb_part_drop.groovy
│ │ ├── drop_add
│ │ │ └── test_ts_absorb_part_drop_add.groovy
│ │ ├── rename
│ │ │ └── test_ts_absorb_part_rename.groovy
│ │ └── replace
│ │ │ └── test_ts_absorb_part_replace.groovy
│ ├── rollup
│ │ ├── add
│ │ │ └── test_ts_absorb_rollup_add.groovy
│ │ ├── add_col
│ │ │ └── test_ts_absorb_rollup_add_col.groovy
│ │ ├── drop
│ │ │ └── test_ts_absorb_rollup_drop.groovy
│ │ ├── drop_col
│ │ │ └── test_ts_absorb_rollup_drop_col.groovy
│ │ ├── modify_col
│ │ │ └── test_ts_absorb_rollup_modify_col.groovy
│ │ └── rename_col
│ │ │ └── test_ts_absorb_rollup_rename_col.groovy
│ └── tbl
│ │ ├── alt_comment
│ │ └── test_ts_absorb_tbl_alt_comment.groovy
│ │ ├── alt_prop
│ │ ├── bfilter
│ │ │ └── test_ts_absorb_tbl_alt_prop_bloom_filter.groovy
│ │ ├── bucket
│ │ │ └── test_ts_absorb_tbl_alt_prop_bucket.groovy
│ │ ├── colocate
│ │ │ └── test_ts_absorb_tbl_alt_prop_colocate.groovy
│ │ ├── comm
│ │ │ └── test_ts_absorb_tbl_alt_prop_comment.groovy
│ │ ├── compact
│ │ │ └── test_ts_absorb_tbl_alt_prop_compaction.groovy
│ │ ├── dist_num
│ │ │ └── test_ts_absorb_tbl_alt_prop_distr_num.groovy
│ │ ├── dist_tp
│ │ │ └── test_ts_absorb_tbl_alt_prop_distr_type.groovy
│ │ ├── dy_part
│ │ │ └── test_ts_absorb_tbl_alt_prop_dy_part.groovy
│ │ ├── dy_pt_sp
│ │ │ └── test_ts_absorb_tbl_alt_prop_dynamic_partition_sp.groovy
│ │ ├── row_store
│ │ │ └── test_ts_absorb_tbl_alt_prop_row_store.groovy
│ │ ├── sequence
│ │ │ └── test_ts_absorb_alt_prop_seq.groovy
│ │ ├── sk_bitm_col
│ │ │ └── test_ts_absorb_tbl_alt_prop_skip_bitmap_column.groovy
│ │ ├── stor_poli
│ │ │ └── test_ts_absorb_tbl_alt_prop_storage_policy.groovy
│ │ └── synced
│ │ │ └── test_ts_absorb_tbl_alt_prop_synced.groovy
│ │ ├── replace
│ │ ├── false
│ │ │ └── test_ts_absorb_tbl_replace_false.groovy
│ │ └── true
│ │ │ └── test_ts_absorb_tbl_replace_true.groovy
│ │ └── truncate
│ │ └── test_ts_absorb_tbl_truncate.groovy
│ └── tbl_sync_alias_abs
│ ├── col
│ ├── add_key
│ │ └── test_tsa_absorb_col_add_key.groovy
│ ├── add_key_drop
│ │ ├── key
│ │ │ └── test_tsa_absorb_col_add_key_drop_key.groovy
│ │ └── val
│ │ │ └── test_tsa_absorb_col_add_key_drop_val.groovy
│ ├── add_key_mod
│ │ └── test_tsa_absorb_col_add_key_mod.groovy
│ ├── add_val
│ │ └── test_tsa_absorb_col_add_val.groovy
│ ├── add_val_drop
│ │ ├── key
│ │ │ └── test_tsa_absorb_col_add_val_drop_key.groovy
│ │ └── val
│ │ │ └── test_tsa_absorb_col_add_val_drop_val.groovy
│ ├── add_val_mod
│ │ └── test_tsa_absorb_col_add_val_mod.groovy
│ ├── drop_key
│ │ └── test_tsa_absorb_col_drop_key.groovy
│ ├── drop_key_add
│ │ └── test_tsa_absorb_col_drop_key_add.groovy
│ ├── drop_val_add
│ │ └── test_tsa_absorb_col_drop_val_add.groovy
│ ├── drop_value
│ │ └── test_tsa_absorb_col_drop_value.groovy
│ ├── modify_comm
│ │ └── test_tsa_absorb_col_modify_comment.groovy
│ ├── order_by
│ │ └── test_tsa_absorb_col_order_by.groovy
│ └── rename
│ │ └── test_tsa_absorb_col_rename.groovy
│ ├── delete
│ ├── mor
│ │ └── test_tsa_absorb_delete_mor.groovy
│ └── mow
│ │ └── test_tsa_absorb_delete_mow.groovy
│ ├── index
│ ├── add_bf
│ │ └── test_tsa_absorb_idx_add_bf.groovy
│ ├── add_inverted
│ │ └── test_tsa_absorb_idx_add_inverted.groovy
│ ├── add_ng_bf
│ │ └── test_tsa_absorb_idx_add_ng_bf.groovy
│ ├── drop_bf
│ │ └── test_tsa_absorb_idx_drop_bf.groovy
│ ├── drop_inverted
│ │ └── test_tsa_absorb_idx_drop_inverted.groovy
│ └── drop_ng_bf
│ │ └── test_tsa_absorb_idx_drop_ng_bf.groovy
│ ├── insert
│ └── test_tsa_absorb_dml_insert.groovy
│ ├── mv
│ ├── create
│ │ └── test_tsa_absorb_mv_create.groovy
│ └── drop
│ │ └── test_tsa_absorb_mv_drop.groovy
│ ├── partition
│ ├── add
│ │ └── test_tsa_absorb_part_add.groovy
│ ├── add_drop
│ │ └── test_tsa_absorb_part_add_drop.groovy
│ ├── add_rename
│ │ └── test_tsa_absorb_part_add_rename.groovy
│ ├── drop
│ │ └── test_tsa_absorb_part_drop.groovy
│ ├── drop_add
│ │ └── test_tsa_absorb_part_drop_add.groovy
│ ├── rename
│ │ └── test_tsa_absorb_part_rename.groovy
│ └── replace
│ │ └── test_tsa_absorb_part_replace.groovy
│ ├── rollup
│ ├── add
│ │ └── test_tsa_absorb_rollup_add.groovy
│ ├── add_col
│ │ └── test_tsa_absorb_rollup_add_col.groovy
│ ├── drop
│ │ └── test_tsa_absorb_rollup_drop.groovy
│ ├── drop_col
│ │ └── test_tsa_absorb_rollup_drop_col.groovy
│ ├── modify_col
│ │ └── test_tsa_absorb_rollup_modify_col.groovy
│ └── rename_col
│ │ └── test_tsa_absorb_rollup_rename_col.groovy
│ └── tbl
│ ├── alt_comment
│ └── test_tsa_absorb_tbl_alt_comment.groovy
│ ├── alt_prop
│ ├── bfilter
│ │ └── test_tsa_absorb_tbl_alt_prop_bloom_filter.groovy
│ ├── bucket
│ │ └── test_tsa_absorb_tbl_alt_prop_bucket.groovy
│ ├── colocate
│ │ └── test_tsa_absorb_tbl_alt_prop_colocate.groovy
│ ├── comm
│ │ └── test_tsa_absorb_tbl_alt_prop_comment.groovy
│ ├── compact
│ │ └── test_tsa_absorb_tbl_alt_prop_compaction.groovy
│ ├── dist_num
│ │ └── test_tsa_absorb_tbl_alt_prop_distr_num.groovy
│ ├── dist_tp
│ │ └── test_tsa_absorb_tbl_alt_prop_distr_type.groovy
│ ├── dy_part
│ │ └── test_tsa_absorb_tbl_alt_prop_dy_part.groovy
│ ├── dy_pt_sp
│ │ └── test_tsa_absorb_tbl_alt_prop_dynamic_partition_sp.groovy
│ ├── row_store
│ │ └── test_tsa_absorb_tbl_alt_prop_row_store.groovy
│ ├── sequence
│ │ └── test_tsa_absorb_alt_prop_seq.groovy
│ ├── sk_bitm_col
│ │ └── test_tsa_absorb_tbl_alt_prop_skip_bitmap_column.groovy
│ ├── stor_poli
│ │ └── test_tsa_absorb_tbl_alt_prop_storage_policy.groovy
│ └── synced
│ │ └── test_tsa_absorb_tbl_alt_prop_synced.groovy
│ ├── replace
│ ├── false
│ │ └── test_tsa_absorb_tbl_replace_false.groovy
│ └── true
│ │ └── test_tsa_absorb_tbl_replace_true.groovy
│ └── truncate
│ └── test_tsa_absorb_tbl_truncate.groovy
└── shell
├── db.conf
├── desync.sh
├── enable_db_binlog.sh
├── start_syncer.sh
└── stop_syncer.sh
/.github/workflows/go.yml:
--------------------------------------------------------------------------------
1 | # This workflow will build a golang project
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
3 |
4 | name: Go
5 |
6 | on:
7 | push:
8 | branches: [ "dev", "branch-2.0", "branch-2.1", "branch-3.0" ]
9 | pull_request:
10 |
11 | jobs:
12 | build:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/checkout@v3
16 |
17 | - name: Set up Go
18 | uses: actions/setup-go@v4
19 | with:
20 | go-version: '1.20'
21 |
22 | - name: Format
23 | run: if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then exit 1; fi
24 |
25 | - name: Build
26 | run: make
27 |
28 | - name: Test
29 | run: make test
30 |
--------------------------------------------------------------------------------
/.github/workflows/golangci-lint.yml:
--------------------------------------------------------------------------------
1 | name: golangci-lint
2 | on:
3 | push:
4 | branches:
5 | - main
6 | - dev
7 | - branch-3.0
8 | - branch-2.1
9 | - branch-2.0
10 | pull_request:
11 |
12 | permissions:
13 | contents: read
14 | # Optional: allow read access to pull request. Use with `only-new-issues` option.
15 | # pull-requests: read
16 |
17 | jobs:
18 | golangci:
19 | name: lint
20 | runs-on: ubuntu-latest
21 | steps:
22 | - uses: actions/checkout@v4
23 | - uses: actions/setup-go@v5
24 | with:
25 | go-version: '1.20'
26 | - name: golangci-lint
27 | uses: golangci/golangci-lint-action@v6
28 | with:
29 | version: v1.60
30 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | .vscode
3 | .envrc
4 | bin
5 | output
6 | ccr.db
7 | tarball
8 |
--------------------------------------------------------------------------------
/build.sh:
--------------------------------------------------------------------------------
1 | ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
2 | export SYNCER_HOME="${ROOT}"
3 |
4 | OPTS="$(getopt \
5 | -n "$0" \
6 | -o 'j:' \
7 | -l 'clean' \
8 | -l 'output:' \
9 | -- "$@")"
10 |
11 | eval set -- "${OPTS}"
12 |
13 | SYNCER_OUTPUT="${SYNCER_HOME}/output"
14 | PARALLEL="$(($(nproc) / 4 + 1))"
15 | while true; do
16 | case "$1" in
17 | -j)
18 | PARALLEL="$2"
19 | shift 2
20 | ;;
21 | --clean)
22 | CLEAN=1
23 | shift
24 | ;;
25 | --output)
26 | SYNCER_OUTPUT="$2"
27 | shift 2
28 | ;;
29 | --)
30 | shift
31 | break
32 | ;;
33 | *)
34 | echo "Internal error, opt: $OPTS"
35 | exit 1
36 | ;;
37 | esac
38 | done
39 |
40 | export GOMAXPROCS=${PARALLEL}
41 |
42 | mkdir -p ${SYNCER_OUTPUT}/bin
43 | mkdir -p ${SYNCER_OUTPUT}/log
44 | mkdir -p ${SYNCER_OUTPUT}/db
45 |
46 | if [[ "${CLEAN}" -eq 1 ]]; then
47 | rm -rf ${SYNCER_HOME}/bin
48 | exit 0
49 | fi
50 |
51 | make ccr_syncer
52 |
53 | cp ${SYNCER_HOME}/bin/ccr_syncer ${SYNCER_OUTPUT}/bin/
54 | cp ${SYNCER_HOME}/shell/* ${SYNCER_OUTPUT}/bin/
55 | cp -r ${SYNCER_HOME}/doc ${SYNCER_OUTPUT}/
56 | cp ${SYNCER_HOME}/CHANGELOG.md ${SYNCER_OUTPUT}/
57 | cp ${SYNCER_HOME}/README.md ${SYNCER_OUTPUT}/
58 |
--------------------------------------------------------------------------------
/cmd/ccr_syncer/signal_mux.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package main
18 |
19 | import (
20 | "os"
21 | "os/signal"
22 | "syscall"
23 |
24 | log "github.com/sirupsen/logrus"
25 | )
26 |
27 | type SignalMux struct {
28 | sigChan chan os.Signal
29 | handler func(os.Signal) bool
30 | }
31 |
32 | func NewSignalMux(handler func(os.Signal) bool) *SignalMux {
33 | sigChan := make(chan os.Signal, 1)
34 | signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGHUP)
35 |
36 | if handler == nil {
37 | log.Panic("signal handler is nil")
38 | }
39 |
40 | return &SignalMux{
41 | sigChan: sigChan,
42 | handler: handler,
43 | }
44 | }
45 |
46 | func (s *SignalMux) Serve() {
47 | for {
48 | signal := <-s.sigChan
49 | log.Infof("receive signal: %s", signal.String())
50 |
51 | if s.handler(signal) {
52 | return
53 | }
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/cmd/get_lag/get_lag.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package main
18 |
19 | import (
20 | "flag"
21 |
22 | "github.com/selectdb/ccr_syncer/pkg/ccr/base"
23 | "github.com/selectdb/ccr_syncer/pkg/rpc"
24 | u "github.com/selectdb/ccr_syncer/pkg/utils"
25 | log "github.com/sirupsen/logrus"
26 | )
27 |
28 | // commit_seq flag default 0
29 | var (
30 | commitSeq int64
31 | )
32 |
33 | func init_flags() {
34 | flag.Int64Var(&commitSeq, "commit_seq", 0, "commit_seq")
35 | flag.Parse()
36 | }
37 |
38 | func init() {
39 | init_flags()
40 | u.InitLog()
41 | }
42 |
43 | func test_get_lag(spec *base.Spec) {
44 | rpcFactory := rpc.NewRpcFactory()
45 | rpc, err := rpcFactory.NewFeRpc(spec)
46 | if err != nil {
47 | panic(err)
48 | }
49 | resp, err := rpc.GetBinlogLag(spec, commitSeq)
50 | // resp, err := rpc.GetBinlog(spec, commitSeq)
51 | if err != nil {
52 | panic(err)
53 | }
54 | log.Infof("resp: %v", resp)
55 | log.Infof("lag: %d", resp.GetLag())
56 | }
57 |
58 | func main() {
59 | src := &base.Spec{
60 | Frontend: base.Frontend{
61 | Host: "localhost",
62 | Port: "9030",
63 | ThriftPort: "9020",
64 | },
65 | User: "root",
66 | Password: "",
67 | Database: "ccr",
68 | Table: "",
69 | }
70 |
71 | test_get_lag(src)
72 | }
73 |
--------------------------------------------------------------------------------
/cmd/get_master_token/get_master_token.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package main
18 |
19 | import (
20 | "fmt"
21 |
22 | "github.com/selectdb/ccr_syncer/pkg/ccr/base"
23 | "github.com/selectdb/ccr_syncer/pkg/rpc"
24 | )
25 |
26 | func test_get_master_token(spec *base.Spec) {
27 | rpcFactory := rpc.NewRpcFactory()
28 | rpc, err := rpcFactory.NewFeRpc(spec)
29 | if err != nil {
30 | panic(err)
31 | }
32 | token, err := rpc.GetMasterToken(spec)
33 | if err != nil {
34 | panic(err)
35 | }
36 | fmt.Printf("token: %v\n", token)
37 | }
38 |
39 | func main() {
40 | // init_log()
41 |
42 | src := &base.Spec{
43 | Frontend: base.Frontend{
44 | Host: "localhost",
45 | Port: "9030",
46 | ThriftPort: "9020",
47 | },
48 | User: "root",
49 | Password: "",
50 | Cluster: "",
51 | Database: "ccr",
52 | Table: "src_1",
53 | }
54 |
55 | test_get_master_token(src)
56 | }
57 |
--------------------------------------------------------------------------------
/cmd/json_t/json_t.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package main
18 |
19 | import (
20 | "encoding/json"
21 | "fmt"
22 | )
23 |
24 | type JsonT struct {
25 | Id int `json:"id"`
26 | IdP *int `json:"id_p"`
27 | Value interface{} `json:"value"`
28 | }
29 |
30 | type inMemory struct {
31 | Uid int `json:"uid"`
32 | Name string `json:"name"`
33 | }
34 |
35 | type Map struct {
36 | Ids map[int64]int64 `json:"ids"`
37 | }
38 |
39 | func main() {
40 | // ids := make(map[int64]int64)
41 | idsMap := Map{}
42 | idsData, err := json.Marshal(&idsMap)
43 | if err != nil {
44 | panic(err)
45 | }
46 | fmt.Printf("%s\n", string(idsData))
47 |
48 | inMemoryV := inMemory{
49 | Uid: 1,
50 | Name: "test",
51 | }
52 |
53 | idP := 10
54 | jsonT := JsonT{
55 | Id: 1,
56 | IdP: &idP,
57 | Value: &inMemoryV,
58 | }
59 |
60 | data, err := json.Marshal(&jsonT)
61 | if err != nil {
62 | panic(err)
63 | }
64 | fmt.Printf("%s\n", string(data))
65 |
66 | // j2 := JsonT{}
67 | // if err := json.Unmarshal(data, &j2); err != nil {
68 | // panic(err)
69 | // }
70 | // inMemory2 := j2.Value.(*inMemory)
71 | // fmt.Printf("%+v\n", inMemory2)
72 | }
73 |
--------------------------------------------------------------------------------
/cmd/metrics/metrics_demo.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package main
18 |
19 | import (
20 | "log"
21 | "net/http"
22 | "time"
23 |
24 | "github.com/hashicorp/go-metrics"
25 | prometheussink "github.com/hashicorp/go-metrics/prometheus"
26 | "github.com/prometheus/client_golang/prometheus/promhttp"
27 | )
28 |
29 | func promHttp() {
30 | http.Handle("/metrics", promhttp.Handler())
31 | log.Fatal(http.ListenAndServe(":8080", nil))
32 | }
33 |
34 | func main() {
35 | go promHttp()
36 | sink, _ := prometheussink.NewPrometheusSink()
37 | metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink)
38 | metrics.SetGauge([]string{"foo"}, 42)
39 | metrics.EmitKey([]string{"bar"}, 30)
40 | metrics.IncrCounter([]string{"baz"}, 42)
41 | metrics.IncrCounter([]string{"baz"}, 1)
42 | metrics.IncrCounter([]string{"baz"}, 80)
43 | metrics.AddSample([]string{"method", "wow"}, 42)
44 | metrics.AddSample([]string{"method", "wow"}, 100)
45 | metrics.AddSample([]string{"method", "wow"}, 22)
46 | time.Sleep(10000000 * time.Second)
47 | }
48 |
--------------------------------------------------------------------------------
/cmd/rows_parse/rows_parse.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package main
18 |
19 | import (
20 | "github.com/selectdb/ccr_syncer/pkg/ccr/base"
21 | "github.com/selectdb/ccr_syncer/pkg/utils"
22 | log "github.com/sirupsen/logrus"
23 | )
24 |
25 | func init() {
26 | utils.InitLog()
27 | }
28 |
29 | func main() {
30 | src := &base.Spec{
31 | Frontend: base.Frontend{
32 | Host: "localhost",
33 | Port: "56131",
34 | ThriftPort: "54130",
35 | },
36 | User: "root",
37 | Password: "",
38 | Database: "ccr",
39 | Table: "",
40 | }
41 |
42 | db, err := src.Connect()
43 | if err != nil {
44 | log.Fatal("connect to doris failed")
45 | }
46 |
47 | query := "ADMIN SHOW FRONTEND CONFIG LIKE \"%%enable_feature_binlog%%\""
48 | rows, err := db.Query(query)
49 | if err != nil {
50 | log.Fatalf("query %s failed", query)
51 | }
52 | defer rows.Close()
53 |
54 | for rows.Next() {
55 | rowParser := utils.NewRowParser()
56 | if err := rowParser.Parse(rows); err != nil {
57 | log.Fatal("rows parse failed")
58 | }
59 | enable, err := rowParser.GetBool("Value")
60 | if err != nil {
61 | log.Fatal("get int64 failed")
62 | }
63 | log.Infof("row: %v", enable)
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/devtools/ccr_db.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # use curl to post json rpc to 127.0.0.1:9190/create_ccr
4 | # json map to below golang struct
5 | # type Spec struct {
6 | # Host string `json:"host"`
7 | # Port string `json:"port"`
8 | # ThriftPort string `json:"thrift_port"`
9 | # User string `json:"user"`
10 | # Password string `json:"password"`
11 | # Cluster string `json:"cluster"`
12 | # Database string `json:"database"`
13 | # Table string `json:"table"`
14 | # }
15 | # type CreateCcrRequest struct {
16 | # Src ccr.Spec `json:"src"`
17 | # Dest ccr.Spec `json:"dest"`
18 | # }
19 | # src := ccr.Spec{
20 | # Host: "localhost",
21 | # Port: "9030",
22 | # User: "root",
23 | # Password: "",
24 | # Database: "demo",
25 | # Table: "example_tbl",
26 | # }
27 | # dest := ccr.Spec{
28 | # Host: "localhost",
29 | # Port: "9030",
30 | # User: "root",
31 | # Password: "",
32 | # Database: "ccrt",
33 | # Table: "copy",
34 | # }
35 |
36 | curl -X POST -H "Content-Type: application/json" -d '{
37 | "name": "ccr_test",
38 | "src": {
39 | "host": "localhost",
40 | "port": "9030",
41 | "thrift_port": "9020",
42 | "user": "root",
43 | "password": "",
44 | "database": "ccr",
45 | "table": ""
46 | },
47 | "dest": {
48 | "host": "localhost",
49 | "port": "29030",
50 | "thrift_port": "29020",
51 | "user": "root",
52 | "password": "",
53 | "database": "ccr",
54 | "table": ""
55 | }
56 | }' http://127.0.0.1:9190/create_ccr
57 |
--------------------------------------------------------------------------------
/devtools/ccr_lightningschemachange.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -X POST -H "Content-Type: application/json" -d '{
4 | "name": "ccr_partition",
5 | "src": {
6 | "host": "localhost",
7 | "port": "9030",
8 | "thrift_port": "9020",
9 | "user": "root",
10 | "password": "",
11 | "database": "ccr",
12 | "table": "test_ddl"
13 | },
14 | "dest": {
15 | "host": "localhost",
16 | "port": "29030",
17 | "thrift_port": "29020",
18 | "user": "root",
19 | "password": "",
20 | "database": "ccr",
21 | "table": "test_ddl"
22 | }
23 | }' http://127.0.0.1:9190/create_ccr
24 |
--------------------------------------------------------------------------------
/devtools/ccr_partition.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -X POST -H "Content-Type: application/json" -d '{
4 | "name": "ccr_partition",
5 | "src": {
6 | "host": "localhost",
7 | "port": "9030",
8 | "thrift_port": "9020",
9 | "user": "root",
10 | "password": "",
11 | "database": "ccr",
12 | "table": "editlog_partition"
13 | },
14 | "dest": {
15 | "host": "localhost",
16 | "port": "29030",
17 | "thrift_port": "29020",
18 | "user": "root",
19 | "password": "",
20 | "database": "ccr",
21 | "table": "editlog_partition"
22 | }
23 | }' http://127.0.0.1:9190/create_ccr
24 |
--------------------------------------------------------------------------------
/devtools/ccr_t1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # use curl to post json rpc to 127.0.0.1:9190/create_ccr
4 | # json map to below golang struct
5 | # type Spec struct {
6 | # Host string `json:"host"`
7 | # Port string `json:"port"`
8 | # ThriftPort string `json:"thrift_port"`
9 | # User string `json:"user"`
10 | # Password string `json:"password"`
11 | # Cluster string `json:"cluster"`
12 | # Database string `json:"database"`
13 | # Table string `json:"table"`
14 | # }
15 | # type CreateCcrRequest struct {
16 | # Src ccr.Spec `json:"src"`
17 | # Dest ccr.Spec `json:"dest"`
18 | # }
19 | # src := ccr.Spec{
20 | # Host: "localhost",
21 | # Port: "9030",
22 | # User: "root",
23 | # Password: "",
24 | # Database: "demo",
25 | # Table: "example_tbl",
26 | # }
27 | # dest := ccr.Spec{
28 | # Host: "localhost",
29 | # Port: "9030",
30 | # User: "root",
31 | # Password: "",
32 | # Database: "ccrt",
33 | # Table: "copy",
34 | # }
35 |
36 | curl -X POST -H "Content-Type: application/json" -d '{
37 | "name": "ccr_test",
38 | "src": {
39 | "host": "localhost",
40 | "port": "9030",
41 | "thrift_port": "9020",
42 | "user": "root",
43 | "password": "",
44 | "database": "c1",
45 | "table": "t1"
46 | },
47 | "dest": {
48 | "host": "localhost",
49 | "port": "29030",
50 | "thrift_port": "29020",
51 | "user": "root",
52 | "password": "",
53 | "database": "c1",
54 | "table": "t1"
55 | }
56 | }' http://127.0.0.1:9190/create_ccr
57 |
--------------------------------------------------------------------------------
/devtools/clean.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | rm -rf ccr.db
4 | echo "drop table ccr.src_1;" | mysql -h 127.0.0.1 -P 29030 -uroot
5 |
--------------------------------------------------------------------------------
/devtools/get_lag.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -X POST -H "Content-Type: application/json" -d '{
4 | "name": "ccr_test"
5 | }' http://127.0.0.1:9190/get_lag
6 |
--------------------------------------------------------------------------------
/devtools/issue_test/priv.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | curl -X POST -H "Content-Type: application/json" -d '{
4 | "name": "priv_test",
5 | "src": {
6 | "host": "localhost",
7 | "port": "9030",
8 | "thrift_port": "9020",
9 | "user": "etl",
10 | "password": "etl%2023",
11 | "database": "tmp",
12 | "table": "ccr_test_src"
13 | },
14 | "dest": {
15 | "host": "localhost",
16 | "port": "9030",
17 | "thrift_port": "9020",
18 | "user": "etl",
19 | "password": "etl%2023",
20 | "database": "tmp",
21 | "table": "ccr_test_dst"
22 | }
23 | }' http://127.0.0.1:9190/create_ccr
24 |
--------------------------------------------------------------------------------
/devtools/pause_job.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -X POST -H "Content-Type: application/json" -d '{
4 | "name": "ccr_test"
5 | }' http://127.0.0.1:9190/pause
6 |
--------------------------------------------------------------------------------
/devtools/resume_job.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -X POST -H "Content-Type: application/json" -d '{
4 | "name": "ccr_test"
5 | }' http://127.0.0.1:9190/resume
6 |
--------------------------------------------------------------------------------
/devtools/status.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -X POST -H "Content-Type: application/json" -d '{
4 | "name": "ccr_test"
5 | }' http://127.0.0.1:9190/job_status
6 |
--------------------------------------------------------------------------------
/devtools/test_alter_job.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -X POST -H "Content-Type: application/json" -d '{
4 | "name": "ccr_partition",
5 | "src": {
6 | "host": "localhost",
7 | "port": "9030",
8 | "thrift_port": "9020",
9 | "user": "root",
10 | "password": "",
11 | "database": "ccr",
12 | "table": "test_ddl"
13 | },
14 | "dest": {
15 | "host": "localhost",
16 | "port": "29030",
17 | "thrift_port": "29020",
18 | "user": "root",
19 | "password": "",
20 | "database": "ccr",
21 | "table": "test_ddl"
22 | }
23 | }' http://127.0.0.1:9190/create_ccr
24 |
--------------------------------------------------------------------------------
/devtools/test_ccr_db_table_alias.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -X POST -H "Content-Type: application/json" -d '{
4 | "name": "ccr_db_table_alias",
5 | "src": {
6 | "host": "localhost",
7 | "port": "9030",
8 | "thrift_port": "9020",
9 | "user": "root",
10 | "password": "",
11 | "database": "ccr",
12 | "table": "src_1"
13 | },
14 | "dest": {
15 | "host": "localhost",
16 | "port": "9030",
17 | "thrift_port": "9020",
18 | "user": "root",
19 | "password": "",
20 | "database": "dccr",
21 | "table": "src_1_alias"
22 | }
23 | }' http://127.0.0.1:9190/create_ccr
24 |
--------------------------------------------------------------------------------
/devtools/test_ccr_many_rows.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -X POST -H "Content-Type: application/json" -d '{
4 | "name": "ccr_table_many_rows",
5 | "src": {
6 | "host": "localhost",
7 | "port": "9030",
8 | "thrift_port": "9020",
9 | "user": "root",
10 | "password": "",
11 | "database": "ccr",
12 | "table": "many"
13 | },
14 | "dest": {
15 | "host": "localhost",
16 | "port": "9030",
17 | "thrift_port": "9020",
18 | "user": "root",
19 | "password": "",
20 | "database": "ccr",
21 | "table": "many_alias"
22 | }
23 | }' http://127.0.0.1:9190/create_ccr
24 |
--------------------------------------------------------------------------------
/devtools/test_ccr_table.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -X POST -H "Content-Type: application/json" -d '{
4 | "name": "ccr_test",
5 | "src": {
6 | "host": "localhost",
7 | "port": "9030",
8 | "thrift_port": "9020",
9 | "user": "root",
10 | "password": "",
11 | "database": "ccr",
12 | "table": "src_1"
13 | },
14 | "dest": {
15 | "host": "localhost",
16 | "port": "29030",
17 | "thrift_port": "29020",
18 | "user": "root",
19 | "password": "",
20 | "database": "ccr",
21 | "table": "src_1"
22 | }
23 | }' http://127.0.0.1:9190/create_ccr
24 |
--------------------------------------------------------------------------------
/devtools/test_ccr_table_alias.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -X POST -H "Content-Type: application/json" -d '{
4 | "name": "ccr_table_alias",
5 | "src": {
6 | "host": "localhost",
7 | "port": "9030",
8 | "thrift_port": "9020",
9 | "user": "root",
10 | "password": "",
11 | "database": "ccr",
12 | "table": "src_1"
13 | },
14 | "dest": {
15 | "host": "localhost",
16 | "port": "9030",
17 | "thrift_port": "9020",
18 | "user": "root",
19 | "password": "",
20 | "database": "ccr",
21 | "table": "src_1_alias"
22 | },
23 | "skip_error": false
24 | }' http://127.0.0.1:9190/create_ccr
25 |
--------------------------------------------------------------------------------
/devtools/test_ccr_truncate_table.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -X POST -H "Content-Type: application/json" -d '{
4 | "name": "ccr_truncate_table",
5 | "src": {
6 | "host": "localhost",
7 | "port": "9030",
8 | "thrift_port": "9020",
9 | "user": "root",
10 | "password": "",
11 | "database": "ccr",
12 | "table": "truncate"
13 | },
14 | "dest": {
15 | "host": "localhost",
16 | "port": "9030",
17 | "thrift_port": "9020",
18 | "user": "root",
19 | "password": "",
20 | "database": "ccr",
21 | "table": "truncate_alias"
22 | }
23 | }' http://127.0.0.1:9190/create_ccr
24 |
--------------------------------------------------------------------------------
/devtools/test_limit_speed.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -X POST -H "Content-Type: application/json" -d '{
4 | "name": "test_speed_limit",
5 | "src": {
6 | "host": "localhost",
7 | "port": "9030",
8 | "thrift_port": "9020",
9 | "user": "root",
10 | "password": "",
11 | "database": "ccr",
12 | "table": "github_test_1"
13 | },
14 | "dest": {
15 | "host": "localhost",
16 | "port": "9030",
17 | "thrift_port": "9020",
18 | "user": "root",
19 | "password": "",
20 | "database": "dccr",
21 | "table": "github_test_1_sync"
22 | }
23 | }' http://127.0.0.1:9190/create_ccr
24 |
--------------------------------------------------------------------------------
/devtools/test_rollup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -X POST -H "Content-Type: application/json" -d '{
4 | "name": "ccr_partition",
5 | "src": {
6 | "host": "localhost",
7 | "port": "9030",
8 | "thrift_port": "9020",
9 | "user": "root",
10 | "password": "",
11 | "database": "ccr",
12 | "table": "test_rollup"
13 | },
14 | "dest": {
15 | "host": "localhost",
16 | "port": "29030",
17 | "thrift_port": "29020",
18 | "user": "root",
19 | "password": "",
20 | "database": "ccr",
21 | "table": "test_rollup"
22 | }
23 | }' http://127.0.0.1:9190/create_ccr
24 |
--------------------------------------------------------------------------------
/devtools/update_job.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | curl -X POST -H "Content-Type: application/json" -d '{
4 | "name": "ccr_test",
5 | "skip": true
6 | }' http://127.0.0.1:9190/update_job
7 |
--------------------------------------------------------------------------------
/doc/dashboard.md:
--------------------------------------------------------------------------------
1 | # Dashboard 使用教程
2 | ccr 的`dashboard`主要使用两个工具`Grafana`和`Prometheus`
3 | ## Grafana
4 | 在`https://grafana.com/grafana/download?pg=graf&plcmt=deploy-box-1`界面选择合适的版本下载Grafana程序,这里使用写文档时的最新版本:
5 | ```bash
6 | wget https://dl.grafana.com/enterprise/release/grafana-enterprise-11.6.0.linux-amd64.tar.gz
7 | tar zxvf grafana-enterprise-11.6.0.linux-amd64.tar.gz
8 | ```
9 | 解压完成后进入目录
10 | ```bash
11 | cd grafana-enterprise-11.6.0.linux-amd64
12 | ./bin/grafana server
13 | ```
14 | 这样就在你的机器上启动了Grafana,可以通过浏览器打开,默认端口是3000,你可以看到 login 界面,默认的账号密码都是admin登录之后可以更改密码
15 | 
16 | 这里新建Dashboard,再这里使用import
17 | 
18 | 这里上传`ccr-syncer/dashboard`文件中提供的json文件
19 | 
20 | 创建好之后在左侧选择数据源(Data sources),选择Prometheus,只需要改一个配置
21 | 
22 | 这个url填`http://地址:端口`端口默认9090按情况修改,这一步可以等一会,Prometheus配置好后可以点`Sava&Test`测试一下,当然提前保存也没问题
23 | ## Prometheus
24 | 可以下载Grafana提供的文件也可以自己选择其他版本`https://prometheus.io/download/`,这里使用写文档时的最新版本
25 | ```bash
26 | wget https://github.com/prometheus/prometheus/releases/download/v3.3.0-rc.1/prometheus-3.3.0-rc.1.linux-amd64.tar.gz
27 | tar zxvf prometheus-3.3.0-rc.1.linux-amd64.tar.gz
28 | ```
29 | 在这里需要修改一下配置,打开`prometheus.yml`文件在下边添加
30 | ```bash
31 | - job_name: 'ccr-syncer'
32 | static_configs:
33 | - targets: ['localhost:9190']
34 | ```
35 | 这里9190是ccr的默认端口,按情况修改,再使用以下命令启动
36 | ```bash
37 | ./prometheus --config.file=./prometheus.yml
38 | ```
39 | 可以在浏览器打开,默认端口是9090,执行`up`语句可以看到`up{instance="localhost:9190", job="ccr-syncer"}`这条信息,后边的值是1那么就没问题了
40 |
41 | 这时候返回Grafana就可以查看ccr的dashboard了
--------------------------------------------------------------------------------
/doc/db_enable_binlog.md:
--------------------------------------------------------------------------------
1 | # 开启库中所有表的binlog
2 | ### 输出路径下的文件结构
3 | 在编译完成后的输出路径下,文件结构大致如下所示:
4 | ```
5 | output_dir
6 | bin
7 | ccr_syncer
8 | enable_db_binlog.sh
9 | start_syncer.sh
10 | stop_syncer.sh
11 | db
12 | [ccr.db] # 默认配置下运行后生成
13 | log
14 | [ccr_syncer.log] # 默认配置下运行后生成
15 | ```
16 | **后文中的enable_db_binlog.sh指的是该路径下的enable_db_binlog.sh!!!**
17 | ### 使用说明
18 | ```bash
19 | bash bin/enable_db_binlog.sh -h host -p port -u user -P password -d db
20 | ```
--------------------------------------------------------------------------------
/doc/pic/dashboard-1.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/selectdb/ccr-syncer/406597489cb17d33b4b37cc7abdf5faa92e33004/doc/pic/dashboard-1.jpeg
--------------------------------------------------------------------------------
/doc/pic/dashboard-2.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/selectdb/ccr-syncer/406597489cb17d33b4b37cc7abdf5faa92e33004/doc/pic/dashboard-2.jpeg
--------------------------------------------------------------------------------
/doc/pic/dashboard-3.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/selectdb/ccr-syncer/406597489cb17d33b4b37cc7abdf5faa92e33004/doc/pic/dashboard-3.jpeg
--------------------------------------------------------------------------------
/doc/pic/dashboard-4.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/selectdb/ccr-syncer/406597489cb17d33b4b37cc7abdf5faa92e33004/doc/pic/dashboard-4.jpeg
--------------------------------------------------------------------------------
/doc/pic/framework.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/selectdb/ccr-syncer/406597489cb17d33b4b37cc7abdf5faa92e33004/doc/pic/framework.png
--------------------------------------------------------------------------------
/doc/pprof.md:
--------------------------------------------------------------------------------
1 | # pprof使用介绍
2 |
3 | ## pprof简介
4 | pprof是golang语言中,用来分析性能的工具,pprof有4种profling:
5 | 1. CPU Profiling : CPU 性能分析
6 | 2. memory Profiling : 程序的内存占用情况
7 | 3. Block Profiling : goroutine 在等待共享资源花费的时间
8 | 4. Mutex Profiling : 只记录因为锁竞争导致的等待或延迟
9 | 目前CCR已经集成了pprof,可以用来分析CCR的性能。
10 |
11 | ## CCR中使用pprof的步骤
12 | 1. 启动CCR进程时,可以通过sh shell/start_syncer.sh --pprof true --pprof_port 8080 --host x.x.x.x --daemon的方式打开pprof
13 | 2. 在浏览器中打开 http://x.x.x.x:8080/debug/pprof/ 即可看到profiling
14 | 3. 或者可以使用采样工具,通过更加图形化的方式来分析,此时可以在8080端口启动后,在ccr机器上执行
15 | ``` go tool pprof -http=:9999 http://x.x.x.x:8080/debug/pprof/heap ```
16 | 然后在浏览器打开 http://x.x.x.x:9999 即可看到采样图形化信息
17 | 此处需要注意的是,如果无法开通端口,可以使用如下命令将采样信息保存到文件中,再将文件拉到本地使用浏览器打开:
18 | ``` curl http://localhost:8080/debug/pprof/heap?seconds=30 > heap.out ```
19 | ``` go tool pprof heap.out ```
--------------------------------------------------------------------------------
/doc/run-regression-test-zh.md:
--------------------------------------------------------------------------------
1 | # 回归测试注意事项
2 | ## 运行测试的步骤
3 | ### 1. 复制测试及 ccr 接口库
4 | CCR 的回归测试需要用到 doris/regression-test 的回归测试框架, 所以我们运行测试时需要将测试和 ccr 接口迁移到doris/regression-test 目录下
5 | 在 doris/regression-test/suites 目录下建立文件夹 ccr-syncer-test, 将测试文件复制到此文件夹, 其次将 ccr-syncer/regression-test/common 下的文件复制到 doris/regression-test/comman 目录下, 至此测试前的框架已经搭好
6 | ### 2. 配置 regression-conf.groovy
7 | 根据实际情况在配置文件中添加如下并配置 jdbc fe ccr
8 | ```bash
9 | // Jdbc配置
10 | jdbcUrl = "jdbc:mysql://127.0.0.1:9030/?"
11 | targetJdbcUrl = "jdbc:mysql://127.0.0.1:9190/?
12 | jdbcUser = "root"
13 | jdbcPassword = ""
14 |
15 | feSourceThriftAddress = "127.0.0.1:9020"
16 | feTargetThriftAddress = "127.0.0.1:9020"
17 | syncerAddress = "127.0.0.1:9190"
18 | feSyncerUser = "root"
19 | feSyncerPassword = ""
20 | feHttpAddress = "127.0.0.1:8030"
21 |
22 | // ccr配置
23 | ccrDownstreamUrl = "jdbc:mysql://172.19.0.2:9131/?"
24 |
25 | ccrDownstreamUser = "root"
26 |
27 | ccrDownstreamPassword = ""
28 |
29 | ccrDownstreamFeThriftAddress = "127.0.0.1:9020"
30 | ```
31 | ### 3. 运行测试
32 | 在运行测试前确保 doris 至少一个 be, fe 部署完成, 确保 ccr-syncer 部署完成
33 | ```bash
34 | 使用 doris 脚本运行测试
35 | # --测试suiteName为sql_action的用例, 目前suiteName等于文件名前缀, 例子对应的用例文件是sql_action.groovy
36 | ./run-regression-test.sh --run sql_action
37 | ```
38 | 至此运行测试的步骤已完成
39 | ## 编写测试用例的步骤
40 | ### 1. 创建测试文件
41 | 进入 ccr-syncer/regressioon-test/suites 目录, 根据同步级别划分文件夹, 以db级别为例, 进入 db_sync 文件夹, 根据同步对象划分文件夹, 以 column 为例, 进入 column 文件夹, 根据对对象的行为划分文件夹, 以rename为例, 创建 rename 文件夹, 在此文件夹下创建测试, 文件名为 test 前缀加依次进入目录的顺序, 例如 test_ds_col_rename 代表在db级别下 rename column 的同步测试
42 | **确保在每个最小文件夹下只有一个测试文件**
43 | ### 2. 编写测试
44 | ccr 接口说明
45 | ```
46 | // 开启Binlog
47 | helper.enableDbBinlog()
48 |
49 | // 创建、删除、暂停、恢复任务等函数支持一个可选参数。
50 | // 以创建任务为例, 参数为 tableName, 参数为空时, 默认创建db级别同步任务, 目标数据库为context.dbName
51 | helper.ccrJobCreate()
52 |
53 | // 不为空时创建 tbl 级别同步任务, 目标数据库为context.dbName, 目标表为 tableName
54 | helper.ccrJobCreate(tableName)
55 |
56 | // 检测 sql 运行结果是否符合 res_func函数, sql_type 为 "sql" (源集群) 或 "target_sql" (目标集群), time_out 为超时时间
57 | helper.checkShowTimesOf(sql, res_func, time_out, sql_type)
58 | ```
59 | **注意事项**
60 | ```
61 | 1. 测试时会建两个集群, sql 发给上游集群, target_sql 发给下游集群, 涉及到目标集群的需要用 target_sql
62 |
63 | 2. 创建任务时确保源数据库不为空, 否则创建任务会失败
64 |
65 | 3. 在修改对象前后都需要对上下游进行 check 保证结果正确
66 |
67 | 4. 确保测试自动创建的 dbName 的长度不超过 64
68 | ```
--------------------------------------------------------------------------------
/doc/stop_syncer.md:
--------------------------------------------------------------------------------
1 | # 停止说明
2 | 根据默认或指定路径下pid文件中的进程号关闭对应Syncer,pid文件的命名方式为`host_port.pid`。
3 | ## 输出路径下的文件结构
4 | 在编译完成后的输出路径下,文件结构大致如下所示:
5 | ```
6 | output_dir
7 | bin
8 | ccr_syncer
9 | enable_db_binlog.sh
10 | start_syncer.sh
11 | stop_syncer.sh
12 | db
13 | [ccr.db] # 默认配置下运行后生成
14 | log
15 | [ccr_syncer.log] # 默认配置下运行后生成
16 | ```
17 | **后文中的stop_syncer.sh指的是该路径下的stop_syncer.sh!!!**
18 | ## 停止选项
19 | 有三种关闭方法:
20 | 1. 关闭目录下单个Syncer
21 | 指定要关闭Syncer的host && port,注意要与start_syncer时指定的host一致
22 | 2. 批量关闭目录下指定Syncer
23 | 指定要关闭的pid文件名,以空格分隔,用`" "`包裹
24 | 3. 关闭目录下所有Syncer
25 | 默认即可
26 |
27 | ### --pid_dir
28 | 指定pid文件所在目录,上述三种关闭方法都依赖于pid文件的所在目录执行
29 | ```bash
30 | bash bin/stop_syncer.sh --pid_dir /path/to/pids
31 | ```
32 | 例子中的执行效果就是关闭`/path/to/pids`下所有pid文件对应的Syncers(**方法3**),`--pid_dir`可与上面三种关闭方法组合使用。
33 |
34 | 默认值为`SYNCER_OUTPUT_DIR/bin`
35 | ### --host && --port
36 | 关闭pid_dir路径下host:port对应的Syncer
37 | ```bash
38 | bash bin/stop_syncer.sh --host 127.0.0.1 --port 9190
39 | ```
40 | host的默认值为127.0.0.1,port默认值为空
41 | 即,单独指定host时**方法1**不生效,会退化为**方法3**。
42 | host与port都不为空时**方法1**才能生效
43 | ### --files
44 | 关闭pid_dir路径下指定pid文件名对应的Syncer
45 | ```bash
46 | bash bin/stop_syncer.sh --files "127.0.0.1_9190.pid 127.0.0.1_9191.pid"
47 | ```
48 | 文件之间用空格分隔,整体需要用`" "`包裹住
--------------------------------------------------------------------------------
/pkg/ccr/base/backend.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package base
18 |
19 | import "fmt"
20 |
21 | type Backend struct {
22 | Id int64
23 | Host string
24 | BePort uint16
25 | HttpPort uint16
26 | BrpcPort uint16
27 | }
28 |
29 | // Backend Stringer
30 | func (b *Backend) String() string {
31 | return fmt.Sprintf("Backend: {Id: %d, Host: %s, BePort: %d, HttpPort: %d, BrpcPort: %d}", b.Id, b.Host, b.BePort, b.HttpPort, b.BrpcPort)
32 | }
33 |
34 | func (b *Backend) GetHttpPortStr() string {
35 | return fmt.Sprintf("%d", b.HttpPort)
36 | }
37 |
--------------------------------------------------------------------------------
/pkg/ccr/base/extra_info.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package base
18 |
19 | type NetworkAddr struct {
20 | Ip string `json:"ip"`
21 | Port uint16 `json:"port"`
22 | }
23 | type ExtraInfo struct {
24 | BeNetworkMap map[int64]NetworkAddr `json:"be_network_map"`
25 | Token string `json:"token"`
26 | }
27 |
--------------------------------------------------------------------------------
/pkg/ccr/base/specer_factory.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package base
18 |
19 | type SpecerFactory interface {
20 | NewSpecer(tableSpec *Spec) Specer
21 | }
22 |
23 | type SpecFactory struct{}
24 |
25 | func NewSpecerFactory() SpecerFactory {
26 | return &SpecFactory{}
27 | }
28 |
29 | func (sf *SpecFactory) NewSpecer(spec *Spec) Specer {
30 | return spec
31 | }
32 |
--------------------------------------------------------------------------------
/pkg/ccr/be_mock.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | // Code generated by MockGen. DO NOT EDIT.
18 | // Source: rpc/be.go
19 |
20 | // Package ccr is a generated GoMock package.
21 | package ccr
22 |
23 | import (
24 | reflect "reflect"
25 |
26 | backendservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/backendservice"
27 | gomock "go.uber.org/mock/gomock"
28 | )
29 |
30 | // MockIBeRpc is a mock of IBeRpc interface.
31 | type MockIBeRpc struct {
32 | ctrl *gomock.Controller
33 | recorder *MockIBeRpcMockRecorder
34 | }
35 |
36 | // MockIBeRpcMockRecorder is the mock recorder for MockIBeRpc.
37 | type MockIBeRpcMockRecorder struct {
38 | mock *MockIBeRpc
39 | }
40 |
41 | // NewMockIBeRpc creates a new mock instance.
42 | func NewMockIBeRpc(ctrl *gomock.Controller) *MockIBeRpc {
43 | mock := &MockIBeRpc{ctrl: ctrl}
44 | mock.recorder = &MockIBeRpcMockRecorder{mock}
45 | return mock
46 | }
47 |
48 | // EXPECT returns an object that allows the caller to indicate expected use.
49 | func (m *MockIBeRpc) EXPECT() *MockIBeRpcMockRecorder {
50 | return m.recorder
51 | }
52 |
53 | // IngestBinlog mocks base method.
54 | func (m *MockIBeRpc) IngestBinlog(arg0 *backendservice.TIngestBinlogRequest) (*backendservice.TIngestBinlogResult_, error) {
55 | m.ctrl.T.Helper()
56 | ret := m.ctrl.Call(m, "IngestBinlog", arg0)
57 | ret0, _ := ret[0].(*backendservice.TIngestBinlogResult_)
58 | ret1, _ := ret[1].(error)
59 | return ret0, ret1
60 | }
61 |
62 | // IngestBinlog indicates an expected call of IngestBinlog.
63 | func (mr *MockIBeRpcMockRecorder) IngestBinlog(arg0 interface{}) *gomock.Call {
64 | mr.mock.ctrl.T.Helper()
65 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IngestBinlog", reflect.TypeOf((*MockIBeRpc)(nil).IngestBinlog), arg0)
66 | }
67 |
--------------------------------------------------------------------------------
/pkg/ccr/errors.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package ccr
18 |
19 | import "github.com/selectdb/ccr_syncer/pkg/xerror"
20 |
21 | var errBackendNotFound = xerror.NewWithoutStack(xerror.Meta, "backend not found")
22 |
--------------------------------------------------------------------------------
/pkg/ccr/factory.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package ccr
18 |
19 | import (
20 | "github.com/selectdb/ccr_syncer/pkg/ccr/base"
21 | "github.com/selectdb/ccr_syncer/pkg/rpc"
22 | )
23 |
24 | type Factory struct {
25 | rpc.IRpcFactory
26 | MetaerFactory
27 | base.SpecerFactory
28 | ThriftMetaFactory
29 | }
30 |
31 | func NewFactory(rpcFactory rpc.IRpcFactory, metaFactory MetaerFactory, ISpecFactory base.SpecerFactory, thriftMetaFactory ThriftMetaFactory) *Factory {
32 | return &Factory{
33 | IRpcFactory: rpcFactory,
34 | MetaerFactory: metaFactory,
35 | SpecerFactory: ISpecFactory,
36 | ThriftMetaFactory: thriftMetaFactory,
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/pkg/ccr/handle/add_partition.go:
--------------------------------------------------------------------------------
1 | package handle
2 |
3 | import (
4 | "github.com/selectdb/ccr_syncer/pkg/ccr"
5 | "github.com/selectdb/ccr_syncer/pkg/ccr/record"
6 | festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
7 | log "github.com/sirupsen/logrus"
8 | )
9 |
10 | func init() {
11 | ccr.RegisterJobHandle[*record.AddPartition](festruct.TBinlogType_ADD_PARTITION, &AddPartitionHandle{})
12 | }
13 |
14 | type AddPartitionHandle struct {
15 | // The adding partition binlog is idempotent
16 | IdempotentJobHandle[*record.AddPartition]
17 | }
18 |
19 | func (h *AddPartitionHandle) Handle(j *ccr.Job, commitSeq int64, addPartition *record.AddPartition) error {
20 | if isAsyncMv, err := j.IsMaterializedViewTable(addPartition.TableId); err != nil {
21 | return err
22 | } else if isAsyncMv {
23 | log.Warnf("skip add partition for materialized view table %d", addPartition.TableId)
24 | return nil
25 | }
26 |
27 | if addPartition.IsTemp {
28 | log.Infof("skip add temporary partition because backup/restore table with temporary partitions is not supported yet")
29 | return nil
30 | }
31 |
32 | destTableName, err := j.GetDestNameBySrcId(addPartition.TableId)
33 | if err != nil {
34 | return err
35 | }
36 |
37 | return j.IDest.AddPartition(destTableName, addPartition)
38 | }
39 |
--------------------------------------------------------------------------------
/pkg/ccr/handle/alter_view.go:
--------------------------------------------------------------------------------
1 | package handle
2 |
3 | import (
4 | "strings"
5 |
6 | "github.com/selectdb/ccr_syncer/pkg/ccr"
7 | "github.com/selectdb/ccr_syncer/pkg/ccr/record"
8 | festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
9 | log "github.com/sirupsen/logrus"
10 | )
11 |
12 | func init() {
13 | ccr.RegisterJobHandle[*record.AlterView](festruct.TBinlogType_MODIFY_VIEW_DEF, &AlterViewDefHandle{})
14 | }
15 |
16 | type AlterViewDefHandle struct {
17 | // The alter view binlog is idempotent
18 | IdempotentJobHandle[*record.AlterView]
19 | }
20 |
21 | func (h *AlterViewDefHandle) Handle(j *ccr.Job, commitSeq int64, alterView *record.AlterView) error {
22 | viewName, err := j.GetDestNameBySrcId(alterView.TableId)
23 | if err != nil {
24 | return err
25 | }
26 |
27 | if err := j.IDest.AlterViewDef(j.Src.Database, viewName, alterView); err != nil {
28 | if strings.Contains(err.Error(), "Unknown column") {
29 | log.Warnf("alter view but the column is not found, trigger partial snapshot, commit seq: %d, msg: %s",
30 | commitSeq, err.Error())
31 | replace := false
32 | isView := true
33 | return j.NewPartialSnapshot(alterView.TableId, viewName, nil, replace, isView)
34 | }
35 | }
36 | return nil
37 | }
38 |
--------------------------------------------------------------------------------
/pkg/ccr/handle/drop_partition.go:
--------------------------------------------------------------------------------
1 | package handle
2 |
3 | import (
4 | "github.com/selectdb/ccr_syncer/pkg/ccr"
5 | "github.com/selectdb/ccr_syncer/pkg/ccr/record"
6 | festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
7 | log "github.com/sirupsen/logrus"
8 | )
9 |
10 | func init() {
11 | ccr.RegisterJobHandle[*record.DropPartition](festruct.TBinlogType_DROP_PARTITION, &DropPartitionHandle{})
12 | }
13 |
14 | type DropPartitionHandle struct {
15 | // The drop partition binlog is idempotent
16 | IdempotentJobHandle[*record.DropPartition]
17 | }
18 |
19 | func (h *DropPartitionHandle) Handle(j *ccr.Job, commitSeq int64, dropPartition *record.DropPartition) error {
20 | if dropPartition.IsTemp {
21 | log.Infof("Since the temporary partition is not synchronized to the downstream, this binlog is skipped.")
22 | return nil
23 | }
24 |
25 | if isAsyncMv, err := j.IsMaterializedViewTable(dropPartition.TableId); err != nil {
26 | return err
27 | } else if isAsyncMv {
28 | log.Warnf("skip drop partition for materialized view table %d", dropPartition.TableId)
29 | return nil
30 | }
31 |
32 | destTableName, err := j.GetDestNameBySrcId(dropPartition.TableId)
33 | if err != nil {
34 | return err
35 | }
36 | return j.IDest.DropPartition(destTableName, dropPartition)
37 | }
38 |
--------------------------------------------------------------------------------
/pkg/ccr/handle/drop_rollup.go:
--------------------------------------------------------------------------------
1 | package handle
2 |
3 | import (
4 | "github.com/selectdb/ccr_syncer/pkg/ccr"
5 | "github.com/selectdb/ccr_syncer/pkg/ccr/record"
6 | festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
7 | )
8 |
9 | func init() {
10 | ccr.RegisterJobHandle[*record.DropRollup](festruct.TBinlogType_DROP_ROLLUP, &DropRollupHandle{})
11 | }
12 |
13 | type DropRollupHandle struct {
14 | }
15 |
16 | func (h *DropRollupHandle) IsIdempotent() bool {
17 | return false
18 | }
19 |
20 | func (h *DropRollupHandle) IsBinlogCommitted(j *ccr.Job, record *record.DropRollup) (bool, error) {
21 | destTableName, err := j.GetDestNameBySrcId(record.TableId)
22 | if err != nil {
23 | return false, err
24 | }
25 |
26 | descResult, err := j.GetDestMeta().DescribeTableAll(destTableName)
27 | if err != nil {
28 | return false, err
29 | }
30 |
31 | if _, ok := descResult[record.IndexName]; ok {
32 | return false, nil
33 | }
34 |
35 | return true, nil
36 | }
37 |
38 | func (h *DropRollupHandle) Handle(j *ccr.Job, commitSeq int64, dropRollup *record.DropRollup) error {
39 | var destTableName string
40 | if j.SyncType == ccr.TableSync {
41 | destTableName = j.Dest.Table
42 | } else {
43 | destTableName = dropRollup.TableName
44 | }
45 |
46 | return j.IDest.DropRollup(destTableName, dropRollup.IndexName)
47 | }
48 |
--------------------------------------------------------------------------------
/pkg/ccr/handle/dummy.go:
--------------------------------------------------------------------------------
1 | package handle
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/selectdb/ccr_syncer/pkg/ccr"
7 | "github.com/selectdb/ccr_syncer/pkg/ccr/record"
8 | festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
9 | log "github.com/sirupsen/logrus"
10 | )
11 |
12 | func init() {
13 | ccr.RegisterJobHandle[*record.Dummy](festruct.TBinlogType_DUMMY, &DummyHandle{})
14 | }
15 |
16 | type DummyHandle struct {
17 | IdempotentJobHandle[*record.AddPartition]
18 | }
19 |
20 | func (h *DummyHandle) IsBinlogCommitted(job *ccr.Job, record *record.Dummy) (bool, error) {
21 | return true, nil
22 | }
23 |
24 | func (h *DummyHandle) Handle(j *ccr.Job, commitSeq int64, dummy *record.Dummy) error {
25 | info := fmt.Sprintf("handle dummy binlog, need full sync. SyncType: %v, seq: %v", j.SyncType, commitSeq)
26 | log.Infof("%s", info)
27 |
28 | return j.NewSnapshot(commitSeq, info)
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/ccr/handle/idempotent.go:
--------------------------------------------------------------------------------
1 | package handle
2 |
3 | import (
4 | "github.com/selectdb/ccr_syncer/pkg/ccr"
5 | "github.com/selectdb/ccr_syncer/pkg/ccr/record"
6 | )
7 |
8 | type IdempotentJobHandle[T record.Record] struct{}
9 |
10 | func (h *IdempotentJobHandle[T]) IsIdempotent() bool {
11 | return true
12 | }
13 |
14 | func (h *IdempotentJobHandle[T]) IsBinlogCommitted(job *ccr.Job, record T) (bool, error) {
15 | return false, nil
16 | }
17 |
--------------------------------------------------------------------------------
/pkg/ccr/handle/index_change_job.go:
--------------------------------------------------------------------------------
1 | package handle
2 |
3 | import (
4 | "github.com/selectdb/ccr_syncer/pkg/ccr"
5 | "github.com/selectdb/ccr_syncer/pkg/ccr/record"
6 | festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
7 | log "github.com/sirupsen/logrus"
8 | )
9 |
10 | func init() {
11 | ccr.RegisterJobHandle[*record.IndexChangeJob](festruct.TBinlogType_INDEX_CHANGE_JOB, &IndexChangeJobHandle{})
12 | }
13 |
14 | type IndexChangeJobHandle struct {
15 | // The index change job binlog is idempotent
16 | IdempotentJobHandle[*record.IndexChangeJob]
17 | }
18 |
19 | func (h *IndexChangeJobHandle) Handle(j *ccr.Job, commitSeq int64, indexChangeJob *record.IndexChangeJob) error {
20 | if indexChangeJob.JobState != record.INDEX_CHANGE_JOB_STATE_FINISHED ||
21 | indexChangeJob.IsDropOp {
22 | log.Debugf("skip index change job binlog, job state: %s, is drop op: %t",
23 | indexChangeJob.JobState, indexChangeJob.IsDropOp)
24 | return nil
25 | }
26 |
27 | var destTableName string
28 | if j.SyncType == ccr.TableSync {
29 | destTableName = j.Dest.Table
30 | } else {
31 | destTableName = indexChangeJob.TableName
32 | }
33 |
34 | return j.IDest.BuildIndex(destTableName, indexChangeJob)
35 | }
36 |
--------------------------------------------------------------------------------
/pkg/ccr/handle/modify_distribution_bucket_num.go:
--------------------------------------------------------------------------------
1 | package handle
2 |
3 | import (
4 | "github.com/selectdb/ccr_syncer/pkg/ccr"
5 | "github.com/selectdb/ccr_syncer/pkg/ccr/record"
6 | festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
7 | )
8 |
9 | func init() {
10 | ccr.RegisterJobHandle[*record.ModifyDistributionBucketNum](festruct.TBinlogType_MODIFY_DISTRIBUTION_BUCKET_NUM, &ModifyDistributionBucketNumHandle{})
11 | }
12 |
13 | type ModifyDistributionBucketNumHandle struct {
14 | // The modify distribution bucket num binlog is idempotent
15 | IdempotentJobHandle[*record.ModifyDistributionBucketNum]
16 | }
17 |
18 | func (h *ModifyDistributionBucketNumHandle) Handle(j *ccr.Job, commitSeq int64, modifyDistributionBucketNum *record.ModifyDistributionBucketNum) error {
19 | destTableName, err := j.GetDestNameBySrcId(modifyDistributionBucketNum.TableId)
20 | if err != nil {
21 | return err
22 | }
23 | bucketType := modifyDistributionBucketNum.Type
24 | autoBucket := modifyDistributionBucketNum.AutoBucket
25 | bucketNum := modifyDistributionBucketNum.BucketNum
26 | columnsName := modifyDistributionBucketNum.ColumnsName
27 | return j.IDest.ModifyDistributionBucketNum(destTableName, bucketType, autoBucket, bucketNum, columnsName)
28 | }
29 |
--------------------------------------------------------------------------------
/pkg/ccr/handle/modify_distribution_type.go:
--------------------------------------------------------------------------------
1 | package handle
2 |
3 | import (
4 | "github.com/selectdb/ccr_syncer/pkg/ccr"
5 | "github.com/selectdb/ccr_syncer/pkg/ccr/record"
6 | festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
7 | )
8 |
9 | func init() {
10 | ccr.RegisterJobHandle[*record.ModifyDistributionType](festruct.TBinlogType_MODIFY_DISTRIBUTION_TYPE, &ModifyDistributionTypeHandle{})
11 | }
12 |
13 | type ModifyDistributionTypeHandle struct {
14 | }
15 |
16 | func (h *ModifyDistributionTypeHandle) IsBinlogCommitted(j *ccr.Job, r *record.ModifyDistributionType) (bool, error) {
17 | j.GetDestMeta().GetTable(r.GetTableId())
18 | destTableName, err := j.GetDestNameBySrcId(r.GetTableId())
19 | if err != nil {
20 | return false, err
21 | }
22 | return j.CheckCreateTable(destTableName, "DISTRIBUTED BY RANDOM")
23 | }
24 |
25 | func (h *ModifyDistributionTypeHandle) IsIdempotent() bool {
26 | return false
27 | }
28 |
29 | func (h *ModifyDistributionTypeHandle) Handle(j *ccr.Job, commitSeq int64, record *record.ModifyDistributionType) error {
30 | destTableName, err := j.GetDestNameBySrcId(record.TableId)
31 | if err != nil {
32 | return err
33 | }
34 |
35 | return j.IDest.ModifyDistributionType(destTableName)
36 | }
37 |
--------------------------------------------------------------------------------
/pkg/ccr/handle/modify_property.go:
--------------------------------------------------------------------------------
1 | package handle
2 |
3 | import (
4 | "github.com/selectdb/ccr_syncer/pkg/ccr"
5 | "github.com/selectdb/ccr_syncer/pkg/ccr/record"
6 | festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
7 | )
8 |
9 | func init() {
10 | ccr.RegisterJobHandle[*record.ModifyTableProperty](festruct.TBinlogType_MODIFY_TABLE_PROPERTY, &ModifyTablePropertyHandle{})
11 | }
12 |
13 | type ModifyTablePropertyHandle struct {
14 | // The modify table property binlog is idempotent
15 | IdempotentJobHandle[*record.ModifyTableProperty]
16 | }
17 |
18 | func (h *ModifyTablePropertyHandle) Handle(j *ccr.Job, commitSeq int64, modifyProperty *record.ModifyTableProperty) error {
19 | destTableName, err := j.GetDestNameBySrcId(modifyProperty.TableId)
20 | if err != nil {
21 | return err
22 | }
23 | return j.Dest.ModifyTableProperty(destTableName, modifyProperty)
24 | }
25 |
--------------------------------------------------------------------------------
/pkg/ccr/handle/recover_info.go:
--------------------------------------------------------------------------------
1 | package handle
2 |
3 | import (
4 | "github.com/selectdb/ccr_syncer/pkg/ccr"
5 | "github.com/selectdb/ccr_syncer/pkg/ccr/record"
6 | festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
7 | log "github.com/sirupsen/logrus"
8 | )
9 |
10 | func init() {
11 | ccr.RegisterJobHandle[*record.RecoverInfo](festruct.TBinlogType_RECOVER_INFO, &RecoverInfoHandle{})
12 | }
13 |
14 | type RecoverInfoHandle struct {
15 | // The recover info binlog is idempotent
16 | IdempotentJobHandle[*record.RecoverInfo]
17 | }
18 |
19 | func (h *RecoverInfoHandle) Handle(j *ccr.Job, commitSeq int64, recoverInfo *record.RecoverInfo) error {
20 | if recoverInfo.IsRecoverTable() {
21 | var tableName string
22 | if recoverInfo.NewTableName != "" {
23 | tableName = recoverInfo.NewTableName
24 | } else {
25 | tableName = recoverInfo.TableName
26 | }
27 | log.Infof("recover info with for table %s, will trigger partial sync", tableName)
28 | isView := false
29 | return j.NewPartialSnapshot(recoverInfo.TableId, tableName, nil, true, isView)
30 | }
31 |
32 | var partitions []string
33 | if recoverInfo.NewPartitionName != "" {
34 | partitions = append(partitions, recoverInfo.NewPartitionName)
35 | } else {
36 | partitions = append(partitions, recoverInfo.PartitionName)
37 | }
38 | log.Infof("recover info with for partition(%s) for table %s, will trigger partial sync",
39 | partitions, recoverInfo.TableName)
40 | // if source does multiple recover of partition, then there is a race
41 | // condition and some recover might miss due to commitseq change after snapshot.
42 | isView := false
43 | return j.NewPartialSnapshot(recoverInfo.TableId, recoverInfo.TableName, nil, true, isView)
44 | }
45 |
--------------------------------------------------------------------------------
/pkg/ccr/handle/rename_partition.go:
--------------------------------------------------------------------------------
1 | package handle
2 |
3 | import (
4 | "github.com/selectdb/ccr_syncer/pkg/ccr"
5 | "github.com/selectdb/ccr_syncer/pkg/ccr/record"
6 | festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
7 | log "github.com/sirupsen/logrus"
8 | )
9 |
10 | func init() {
11 | ccr.RegisterJobHandle[*record.RenamePartition](festruct.TBinlogType_RENAME_PARTITION, &RenamePartitionHandle{})
12 | }
13 |
14 | type RenamePartitionHandle struct {
15 | }
16 |
17 | func (h *RenamePartitionHandle) IsBinlogCommitted(j *ccr.Job, record *record.RenamePartition) (bool, error) {
18 | destTableId, err := j.GetDestTableIdBySrc(record.TableId)
19 | if err != nil {
20 | return false, err
21 | }
22 |
23 | if err := j.GetDestMeta().UpdatePartitions(destTableId); err != nil {
24 | return false, err
25 | }
26 |
27 | partitions, err := j.GetDestMeta().GetPartitionIdMap(destTableId)
28 | if err != nil {
29 | return false, err
30 | }
31 |
32 | for _, partition := range partitions {
33 | if partition.Name == record.NewPartitionName {
34 | log.Infof("partition %s is not renamed to %s in dest table %d, this binlog is committed",
35 | record.OldPartitionName, record.NewPartitionName, destTableId)
36 | return true, nil
37 | }
38 | }
39 |
40 | log.Infof("partition %s is renamed to %s in dest table %d, this binlog is not committed",
41 | record.OldPartitionName, record.NewPartitionName, destTableId)
42 | return false, nil
43 | }
44 |
45 | func (h *RenamePartitionHandle) IsIdempotent() bool {
46 | return false
47 | }
48 |
49 | func (h *RenamePartitionHandle) Handle(j *ccr.Job, commitSeq int64, renamePartition *record.RenamePartition) error {
50 | destTableName, err := j.GetDestNameBySrcId(renamePartition.TableId)
51 | if err != nil {
52 | return err
53 | }
54 |
55 | newPartition := renamePartition.NewPartitionName
56 | oldPartition := renamePartition.OldPartitionName
57 | if oldPartition == "" {
58 | log.Warnf("old partition name is empty, sync partition via partial snapshot, "+
59 | "new partition: %s, partition id: %d, table id: %d, commit seq: %d",
60 | newPartition, renamePartition.PartitionId, renamePartition.TableId, commitSeq)
61 | replace := true
62 | tableName := destTableName
63 | if j.IsTableSyncWithAlias() {
64 | tableName = j.Src.Table
65 | }
66 | isView := false
67 | return j.NewPartialSnapshot(renamePartition.TableId, tableName, nil, replace, isView)
68 | }
69 | return j.IDest.RenamePartition(destTableName, oldPartition, newPartition)
70 | }
71 |
--------------------------------------------------------------------------------
/pkg/ccr/handle/rename_rollup.go:
--------------------------------------------------------------------------------
1 | package handle
2 |
3 | import (
4 | "github.com/selectdb/ccr_syncer/pkg/ccr"
5 | "github.com/selectdb/ccr_syncer/pkg/ccr/record"
6 | festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
7 | log "github.com/sirupsen/logrus"
8 | )
9 |
10 | func init() {
11 | ccr.RegisterJobHandle[*record.RenameRollup](festruct.TBinlogType_RENAME_ROLLUP, &RenameRollupHandle{})
12 | }
13 |
14 | type RenameRollupHandle struct {
15 | }
16 |
17 | func (h *RenameRollupHandle) IsBinlogCommitted(j *ccr.Job, record *record.RenameRollup) (bool, error) {
18 | destTableName, err := j.GetDestNameBySrcId(record.TableId)
19 | if err != nil {
20 | log.Errorf("get dest table name by src id %d failed, err: %v", record.TableId, err)
21 | return false, err
22 | }
23 |
24 | descResult, err := j.GetDestMeta().DescribeTableAll(destTableName)
25 | if err != nil {
26 | return false, err
27 | }
28 |
29 | if _, ok := descResult[record.NewRollupName]; !ok {
30 | log.Infof("rollup %s is not renamed to %s in dest table %s, this binlog is not committed",
31 | record.OldRollupName, record.NewRollupName, destTableName)
32 | return false, nil
33 | }
34 |
35 | log.Infof("rollup %s is renamed to %s in dest table %s, this binlog is committed",
36 | record.OldRollupName, record.NewRollupName, destTableName)
37 | return true, nil
38 | }
39 |
40 | func (h *RenameRollupHandle) IsIdempotent() bool {
41 | return false
42 | }
43 |
44 | func (h *RenameRollupHandle) Handle(j *ccr.Job, commitSeq int64, renameRollup *record.RenameRollup) error {
45 | destTableName, err := j.GetDestNameBySrcId(renameRollup.TableId)
46 | if err != nil {
47 | return nil
48 | }
49 |
50 | newRollup := renameRollup.NewRollupName
51 | oldRollup := renameRollup.OldRollupName
52 | if oldRollup == "" {
53 | log.Warnf("old rollup name is empty, sync rollup via partial snapshot, "+
54 | "new rollup: %s, index id: %d, table id: %d, commit seq: %d",
55 | newRollup, renameRollup.IndexId, renameRollup.TableId, commitSeq)
56 | replace := true
57 | tableName := destTableName
58 | if j.IsTableSyncWithAlias() {
59 | tableName = j.Src.Table
60 | }
61 | isView := false
62 | return j.NewPartialSnapshot(renameRollup.TableId, tableName, nil, replace, isView)
63 | }
64 |
65 | return j.IDest.RenameRollup(destTableName, oldRollup, newRollup)
66 | }
67 |
--------------------------------------------------------------------------------
/pkg/ccr/job_factory.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package ccr
18 |
19 | import "context"
20 |
21 | type JobFactory struct{}
22 |
23 | // create job factory
24 | func NewJobFactory() *JobFactory {
25 | return &JobFactory{}
26 | }
27 |
28 | func (jf *JobFactory) CreateJob(ctx context.Context, job *Job, jobType string) (Jober, error) {
29 | switch jobType {
30 | case "IngestBinlog":
31 | return NewIngestBinlogJob(ctx, job)
32 | case "Snapshot":
33 | return nil, nil
34 | // return NewSnapshotJob(ctx, job)
35 | default:
36 | return nil, nil
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/pkg/ccr/jober.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package ccr
18 |
19 | type Jober interface {
20 | Run()
21 | Error() error
22 | }
23 |
--------------------------------------------------------------------------------
/pkg/ccr/label.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package ccr
18 |
19 | import (
20 | "fmt"
21 | "regexp"
22 | "time"
23 | )
24 |
25 | var LabelRegex = `^[-_A-Za-z0-9:]{1,128}$`
26 |
27 | // snapshot name format "ccrs_${ccr_name}_${sync_id}"
28 | func NewSnapshotLabelPrefix(ccrName string, syncId int64) string {
29 | return fmt.Sprintf("ccrs_%s_%d", ccrName, syncId)
30 | }
31 |
32 | // snapshot name format "ccrp_${ccr_name}_${sync_id}"
33 | func NewPartialSnapshotLabelPrefix(ccrName string, syncId int64) string {
34 | return fmt.Sprintf("ccrp_%s_%d", ccrName, syncId)
35 | }
36 |
37 | func NewLabelWithTs(prefix string) string {
38 | return fmt.Sprintf("%s_%d", prefix, time.Now().Unix())
39 | }
40 |
41 | func NewRestoreLabel(snapshotName string) string {
42 | if snapshotName == "" {
43 | return ""
44 | }
45 |
46 | // use current seconds
47 | return fmt.Sprintf("%s_r_%d", snapshotName, time.Now().Unix())
48 | }
49 |
50 | func TableAlias(tableName string) string {
51 | return fmt.Sprintf("__ccr_%s_%d", tableName, time.Now().Unix())
52 | }
53 |
54 | // the same as doris
55 | func CheckLabelRegex(label string) bool {
56 | if label == "" {
57 | return false
58 | }
59 |
60 | re := regexp.MustCompile(LabelRegex)
61 | return re.MatchString(label)
62 | }
63 |
--------------------------------------------------------------------------------
/pkg/ccr/metaer_factory.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package ccr
18 |
19 | import (
20 | "github.com/selectdb/ccr_syncer/pkg/ccr/base"
21 | )
22 |
23 | type MetaerFactory interface {
24 | NewMeta(tableSpec *base.Spec) Metaer
25 | }
26 |
27 | type MetaFactory struct{}
28 |
29 | func NewMetaFactory() MetaerFactory {
30 | return &MetaFactory{}
31 | }
32 |
33 | func (mf *MetaFactory) NewMeta(spec *base.Spec) Metaer {
34 | return NewMeta(spec)
35 | }
36 |
--------------------------------------------------------------------------------
/pkg/ccr/record/alter_view.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package record
18 |
19 | import (
20 | "encoding/json"
21 | "fmt"
22 | )
23 |
24 | type AlterView struct {
25 | DbId int64 `json:"dbId"`
26 | TableId int64 `json:"tableId"`
27 | InlineViewDef string `json:"inlineViewDef"`
28 | SqlMode int64 `json:"sqlMode"`
29 | Comment string `json:"comment"`
30 | }
31 |
32 | func (alterView *AlterView) Deserialize(data string) error {
33 | err := json.Unmarshal([]byte(data), &alterView)
34 | if err != nil {
35 | return fmt.Errorf("unmarshal alter view error: %v", err)
36 | }
37 |
38 | if alterView.TableId == 0 {
39 | return fmt.Errorf("table id not found")
40 | }
41 | return nil
42 | }
43 |
44 | func (alterView *AlterView) GetTableId() int64 {
45 | return alterView.TableId
46 | }
47 |
48 | func NewAlterViewFromJson(data string) (*AlterView, error) {
49 | var alterView AlterView
50 | if err := alterView.Deserialize(data); err != nil {
51 | return nil, err
52 | }
53 | return &alterView, nil
54 | }
55 |
56 | func (a *AlterView) String() string {
57 | return fmt.Sprintf("AlterView: DbId: %d, TableId: %d, InlineViewDef: %s, SqlMode: %d, Comment: %s", a.DbId, a.TableId, a.InlineViewDef, a.SqlMode, a.Comment)
58 | }
59 |
--------------------------------------------------------------------------------
/pkg/ccr/record/barrier_log.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package record
18 |
19 | import (
20 | "encoding/json"
21 |
22 | "github.com/selectdb/ccr_syncer/pkg/xerror"
23 | )
24 |
25 | type BarrierLog struct {
26 | DbId int64 `json:"dbId"`
27 | TableId int64 `json:"tableId"`
28 | BinlogType int64 `json:"binlogType"`
29 | Binlog string `json:"binlog"`
30 | }
31 |
32 | func NewBarrierLogFromJson(data string) (*BarrierLog, error) {
33 | var log BarrierLog
34 | err := json.Unmarshal([]byte(data), &log)
35 | if err != nil {
36 | return nil, xerror.Wrap(err, xerror.Normal, "unmarshal barrier log error")
37 | }
38 | return &log, nil
39 | }
40 |
--------------------------------------------------------------------------------
/pkg/ccr/record/drop_partition.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package record
18 |
19 | import (
20 | "encoding/json"
21 |
22 | "github.com/selectdb/ccr_syncer/pkg/xerror"
23 | )
24 |
25 | type DropPartition struct {
26 | TableId int64 `json:"tableId"`
27 | Sql string `json:"sql"`
28 | IsTemp bool `json:"isTempPartition"`
29 | PartitionName string `json:"partitionName"`
30 | ForceDrop bool `json:"forceDrop"`
31 | }
32 |
33 | func (dropPartition *DropPartition) Deserialize(data string) error {
34 | err := json.Unmarshal([]byte(data), &dropPartition)
35 | if err != nil {
36 | return xerror.Wrap(err, xerror.Normal, "unmarshal drop partition error")
37 | }
38 |
39 | if dropPartition.Sql == "" {
40 | return xerror.Errorf(xerror.Normal, "drop partition sql is empty")
41 | }
42 |
43 | if dropPartition.TableId == 0 {
44 | return xerror.Errorf(xerror.Normal, "table id not found")
45 | }
46 |
47 | return nil
48 | }
49 |
50 | func (dropPartition *DropPartition) GetTableId() int64 {
51 | return dropPartition.TableId
52 | }
53 |
54 | func NewDropPartitionFromJson(data string) (*DropPartition, error) {
55 | var dropPartition DropPartition
56 | if err := dropPartition.Deserialize(data); err != nil {
57 | return nil, err
58 | }
59 | return &dropPartition, nil
60 | }
61 |
--------------------------------------------------------------------------------
/pkg/ccr/record/drop_rollup.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package record
18 |
19 | import (
20 | "encoding/json"
21 | "fmt"
22 |
23 | "github.com/selectdb/ccr_syncer/pkg/xerror"
24 | )
25 |
26 | type DropRollup struct {
27 | DbId int64 `json:"dbId"`
28 | TableId int64 `json:"tableId"`
29 | TableName string `json:"tableName"`
30 | IndexId int64 `json:"indexId"`
31 | IndexName string `json:"indexName"`
32 | }
33 |
34 | func NewDropRollupFromJson(data string) (*DropRollup, error) {
35 | var dropRollup DropRollup
36 | if err := dropRollup.Deserialize(data); err != nil {
37 | return nil, err
38 | }
39 | return &dropRollup, nil
40 | }
41 |
42 | func (dropRollup *DropRollup) Deserialize(data string) error {
43 | err := json.Unmarshal([]byte(data), &dropRollup)
44 | if err != nil {
45 | return xerror.Wrap(err, xerror.Normal, "unmarshal drop rollup error")
46 | }
47 |
48 | if dropRollup.TableId == 0 {
49 | return xerror.Errorf(xerror.Normal, "invalid drop rollup, table id not found")
50 | }
51 |
52 | if dropRollup.TableName == "" {
53 | return xerror.Errorf(xerror.Normal, "invalid drop rollup, tableName is empty")
54 | }
55 |
56 | if dropRollup.IndexName == "" {
57 | return xerror.Errorf(xerror.Normal, "invalid drop rollup, indexName is empty")
58 | }
59 |
60 | return nil
61 | }
62 |
63 | func (dropRollup *DropRollup) GetTableId() int64 {
64 | return dropRollup.TableId
65 | }
66 |
67 | func (d *DropRollup) String() string {
68 | return fmt.Sprintf("DropRollup{DbId: %d, TableId: %d, TableName: %s, IndexId: %d, IndexName: %s}",
69 | d.DbId, d.TableId, d.TableName, d.IndexId, d.IndexName)
70 | }
71 |
--------------------------------------------------------------------------------
/pkg/ccr/record/drop_table.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package record
18 |
19 | import (
20 | "encoding/json"
21 | "fmt"
22 |
23 | "github.com/selectdb/ccr_syncer/pkg/xerror"
24 | )
25 |
26 | type DropTable struct {
27 | DbId int64 `json:"dbId"`
28 | TableId int64 `json:"tableId"`
29 | TableName string `json:"tableName"`
30 | IsView bool `json:"isView"`
31 | RawSql string `json:"rawSql"`
32 | }
33 |
34 | func NewDropTableFromJson(data string) (*DropTable, error) {
35 | var dropTable DropTable
36 | if err := dropTable.Deserialize(data); err != nil {
37 | return nil, err
38 | }
39 | return &dropTable, nil
40 | }
41 |
42 | func (dropTable *DropTable) Deserialize(data string) error {
43 | err := json.Unmarshal([]byte(data), &dropTable)
44 | if err != nil {
45 | return xerror.Wrap(err, xerror.Normal, "unmarshal drop table error")
46 | }
47 |
48 | if dropTable.TableId == 0 {
49 | return xerror.Errorf(xerror.Normal, "table id not found")
50 | }
51 |
52 | return nil
53 | }
54 |
55 | func (dropRollup *DropTable) GetTableId() int64 {
56 | return dropRollup.TableId
57 | }
58 |
59 | // Stringer, all fields
60 | func (c *DropTable) String() string {
61 | return fmt.Sprintf("DropTable: DbId: %d, TableId: %d, TableName: %s, IsView: %t, RawSql: %s", c.DbId, c.TableId, c.TableName, c.IsView, c.RawSql)
62 | }
63 |
--------------------------------------------------------------------------------
/pkg/ccr/record/dummy.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package record
18 |
19 | type Dummy struct {
20 | }
21 |
22 | func (dummy *Dummy) Deserialize(data string) error {
23 | return nil
24 | }
25 |
26 | func (dummy *Dummy) GetTableId() int64 {
27 | return -1
28 | }
29 |
--------------------------------------------------------------------------------
/pkg/ccr/record/modify_comment.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package record
18 |
19 | import (
20 | "encoding/json"
21 | "fmt"
22 |
23 | "github.com/selectdb/ccr_syncer/pkg/xerror"
24 | )
25 |
26 | type ModifyComment struct {
27 | Type string `json:"type"`
28 | DbId int64 `json:"dbId"`
29 | TableId int64 `json:"tblId"`
30 | ColToComment map[string]string `json:"colToComment"`
31 | TableComment string `json:"tblComment"`
32 | }
33 |
34 | func NewModifyCommentFromJson(data string) (*ModifyComment, error) {
35 | var modifyComment ModifyComment
36 | err := json.Unmarshal([]byte(data), &modifyComment)
37 | if err != nil {
38 | return nil, xerror.Wrap(err, xerror.Normal, "unmarshal modify comment error")
39 | }
40 |
41 | if modifyComment.TableId == 0 {
42 | return nil, xerror.Errorf(xerror.Normal, "table id not found")
43 | }
44 |
45 | return &modifyComment, nil
46 | }
47 |
48 | // Stringer
49 | func (r *ModifyComment) String() string {
50 | return fmt.Sprintf("ModifyComment: Type: %s, DbId: %d, TableId: %d, ColToComment: %v, TableComment: %s", r.Type, r.DbId, r.TableId, r.ColToComment, r.TableComment)
51 | }
52 |
--------------------------------------------------------------------------------
/pkg/ccr/record/modify_distribution_type.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package record
18 |
19 | import (
20 | "encoding/json"
21 | "fmt"
22 |
23 | "github.com/selectdb/ccr_syncer/pkg/xerror"
24 | )
25 |
26 | type ModifyDistributionType struct {
27 | DbId int64 `json:"db"`
28 | TableId int64 `json:"tb"`
29 | }
30 |
31 | func (r *ModifyDistributionType) Deserialize(data string) error {
32 | err := json.Unmarshal([]byte(data), &r)
33 | if err != nil {
34 | return xerror.Wrap(err, xerror.Normal, "unmarshal rename table error")
35 | }
36 |
37 | if r.TableId == 0 {
38 | return xerror.Errorf(xerror.Normal, "table id not found")
39 | }
40 |
41 | return nil
42 | }
43 |
44 | func NewModifyDistributionTypeFromJson(data string) (*ModifyDistributionType, error) {
45 | var modifyDistributionType ModifyDistributionType
46 | if err := modifyDistributionType.Deserialize(data); err != nil {
47 | return nil, err
48 | }
49 | return &modifyDistributionType, nil
50 | }
51 |
52 | // Stringer
53 | func (r *ModifyDistributionType) String() string {
54 | return fmt.Sprintf("RenameTable: DbId: %d, TableId: %d", r.DbId, r.TableId)
55 | }
56 |
57 | func (record *ModifyDistributionType) GetTableId() int64 {
58 | return record.TableId
59 | }
60 |
--------------------------------------------------------------------------------
/pkg/ccr/record/modify_property.go:
--------------------------------------------------------------------------------
1 | package record
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 |
7 | "github.com/selectdb/ccr_syncer/pkg/xerror"
8 | )
9 |
10 | type ModifyTableProperty struct {
11 | DbId int64 `json:"dbId"`
12 | TableId int64 `json:"tableId"`
13 | TableName string `json:"tableName"`
14 | Properties map[string]string `json:"properties"`
15 | Sql string `json:"sql"`
16 | }
17 |
18 | func (modifyProperty *ModifyTableProperty) Deserialize(data string) error {
19 | err := json.Unmarshal([]byte(data), &modifyProperty)
20 | if err != nil {
21 | return xerror.Wrap(err, xerror.Normal, "unmarshal modify table property error")
22 | }
23 |
24 | if modifyProperty.TableId == 0 {
25 | return xerror.Errorf(xerror.Normal, "table id not found")
26 | }
27 | return nil
28 | }
29 |
30 | func NewModifyTablePropertyFromJson(data string) (*ModifyTableProperty, error) {
31 | var modifyProperty ModifyTableProperty
32 | if err := modifyProperty.Deserialize(data); err != nil {
33 | return nil, err
34 | }
35 | return &modifyProperty, nil
36 | }
37 |
38 | func (modifyProperty *ModifyTableProperty) GetTableId() int64 {
39 | return modifyProperty.TableId
40 | }
41 |
42 | func (m *ModifyTableProperty) String() string {
43 | return fmt.Sprintf("ModifyTableProperty: DbId: %d, TableId: %d, TableName: %s, Properties: %v, Sql: %s",
44 | m.DbId, m.TableId, m.TableName, m.Properties, m.Sql)
45 | }
46 |
--------------------------------------------------------------------------------
/pkg/ccr/record/modify_table_add_or_drop_inverted_indices.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package record
18 |
19 | import (
20 | "encoding/json"
21 | "strings"
22 |
23 | "github.com/selectdb/ccr_syncer/pkg/xerror"
24 | )
25 |
26 | type ModifyTableAddOrDropInvertedIndices struct {
27 | DbId int64 `json:"dbId"`
28 | TableId int64 `json:"tableId"`
29 | IsDropInvertedIndex bool `json:"isDropInvertedIndex"`
30 | RawSql string `json:"rawSql"`
31 | Indexes []Index `json:"indexes"`
32 | AlternativeIndexes []Index `json:"alterInvertedIndexes"`
33 | }
34 |
35 | func NewModifyTableAddOrDropInvertedIndicesFromJson(data string) (*ModifyTableAddOrDropInvertedIndices, error) {
36 | m := &ModifyTableAddOrDropInvertedIndices{}
37 | if err := json.Unmarshal([]byte(data), m); err != nil {
38 | return nil, xerror.Wrap(err, xerror.Normal, "unmarshal modify table add or drop inverted indices error")
39 | }
40 |
41 | if m.RawSql == "" {
42 | // TODO: fallback to create sql from other fields
43 | return nil, xerror.Errorf(xerror.Normal, "modify table add or drop inverted indices sql is empty")
44 | }
45 |
46 | if m.TableId == 0 {
47 | return nil, xerror.Errorf(xerror.Normal, "modify table add or drop inverted indices table id not found")
48 | }
49 |
50 | return m, nil
51 | }
52 |
53 | func (m *ModifyTableAddOrDropInvertedIndices) GetRawSql() string {
54 | if strings.Contains(m.RawSql, "ALTER TABLE") && strings.Contains(m.RawSql, "INDEX") &&
55 | !strings.Contains(m.RawSql, "DROP INDEX") && !strings.Contains(m.RawSql, "ADD INDEX") {
56 | // fix the syntax error
57 | // See apache/doris#44392 for details
58 | return strings.ReplaceAll(m.RawSql, "INDEX", "ADD INDEX")
59 | }
60 | return m.RawSql
61 | }
62 |
--------------------------------------------------------------------------------
/pkg/ccr/record/record.go:
--------------------------------------------------------------------------------
1 | package record
2 |
3 | // A basic interface for all binlog records
4 | type Record interface {
5 | // Deserialize the binlog data to the record
6 | Deserialize(data string) error
7 |
8 | // Get the table id of this record
9 | GetTableId() int64
10 | }
11 |
--------------------------------------------------------------------------------
/pkg/ccr/record/rename_column.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package record
18 |
19 | import (
20 | "encoding/json"
21 | "fmt"
22 |
23 | "github.com/selectdb/ccr_syncer/pkg/xerror"
24 | )
25 |
26 | type RenameColumn struct {
27 | DbId int64 `json:"dbId"`
28 | TableId int64 `json:"tableId"`
29 | ColName string `json:"colName"`
30 | NewColName string `json:"newColName"`
31 | IndexIdToSchemaVersion map[int64]int32 `json:"indexIdToSchemaVersion"`
32 | }
33 |
34 | func NewRenameColumnFromJson(data string) (*RenameColumn, error) {
35 | var renameColumn RenameColumn
36 | err := json.Unmarshal([]byte(data), &renameColumn)
37 | if err != nil {
38 | return nil, xerror.Wrap(err, xerror.Normal, "unmarshal rename column error")
39 | }
40 |
41 | if renameColumn.TableId == 0 {
42 | return nil, xerror.Errorf(xerror.Normal, "table id not found")
43 | }
44 |
45 | return &renameColumn, nil
46 | }
47 |
48 | // Stringer
49 | func (r *RenameColumn) String() string {
50 | return fmt.Sprintf("RenameColumn: DbId: %d, TableId: %d, ColName: %s, NewColName: %s, IndexIdToSchemaVersion: %v", r.DbId, r.TableId, r.ColName, r.NewColName, r.IndexIdToSchemaVersion)
51 | }
52 |
--------------------------------------------------------------------------------
/pkg/ccr/record/rename_partition.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package record
18 |
19 | import (
20 | "encoding/json"
21 | "fmt"
22 |
23 | "github.com/selectdb/ccr_syncer/pkg/xerror"
24 | )
25 |
26 | type RenamePartition struct {
27 | DbId int64 `json:"db"`
28 | TableId int64 `json:"tb"`
29 | PartitionId int64 `json:"p"`
30 | NewPartitionName string `json:"nP"`
31 | OldPartitionName string `json:"oP"`
32 | }
33 |
34 | func (renamePartition *RenamePartition) Deserialize(data string) error {
35 | err := json.Unmarshal([]byte(data), &renamePartition)
36 | if err != nil {
37 | return xerror.Wrap(err, xerror.Normal, "unmarshal rename partition record error")
38 | }
39 |
40 | if renamePartition.TableId == 0 {
41 | return xerror.Errorf(xerror.Normal, "rename partition record table id not found")
42 | }
43 |
44 | if renamePartition.PartitionId == 0 {
45 | return xerror.Errorf(xerror.Normal, "rename partition record partition id not found")
46 | }
47 |
48 | if renamePartition.NewPartitionName == "" {
49 | return xerror.Errorf(xerror.Normal, "rename partition record new partition name not found")
50 | }
51 | return nil
52 | }
53 |
54 | func NewRenamePartitionFromJson(data string) (*RenamePartition, error) {
55 | var renamePartition RenamePartition
56 | if err := renamePartition.Deserialize(data); err != nil {
57 | return nil, err
58 | }
59 | return &renamePartition, nil
60 | }
61 |
62 | func (renamePartition *RenamePartition) GetTableId() int64 {
63 | return renamePartition.TableId
64 | }
65 |
66 | // Stringer
67 | func (r *RenamePartition) String() string {
68 | return fmt.Sprintf("RenamePartition: DbId: %d, TableId: %d, PartitionId: %d, NewPartitionName: %s, OldPartitionName: %s",
69 | r.DbId, r.TableId, r.PartitionId, r.NewPartitionName, r.OldPartitionName)
70 | }
71 |
--------------------------------------------------------------------------------
/pkg/ccr/record/rename_rollup.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package record
18 |
19 | import (
20 | "encoding/json"
21 | "fmt"
22 |
23 | "github.com/selectdb/ccr_syncer/pkg/xerror"
24 | )
25 |
26 | type RenameRollup struct {
27 | DbId int64 `json:"db"`
28 | TableId int64 `json:"tb"`
29 | IndexId int64 `json:"ind"`
30 | NewRollupName string `json:"nR"`
31 | OldRollupName string `json:"oR"`
32 | }
33 |
34 | func (renameRollup *RenameRollup) Deserialize(data string) error {
35 | err := json.Unmarshal([]byte(data), &renameRollup)
36 | if err != nil {
37 | return xerror.Wrap(err, xerror.Normal, "unmarshal rename rollup record error")
38 | }
39 |
40 | if renameRollup.TableId == 0 {
41 | return xerror.Errorf(xerror.Normal, "rename rollup record table id not found")
42 | }
43 |
44 | if renameRollup.NewRollupName == "" {
45 | return xerror.Errorf(xerror.Normal, "rename rollup record old rollup name not found")
46 | }
47 | return nil
48 | }
49 |
50 | func NewRenameRollupFromJson(data string) (*RenameRollup, error) {
51 | var renameRollup RenameRollup
52 | if err := renameRollup.Deserialize(data); err != nil {
53 | return nil, err
54 | }
55 | return &renameRollup, nil
56 | }
57 |
58 | // Stringer
59 | func (r *RenameRollup) String() string {
60 | return fmt.Sprintf("RenameRollup: DbId: %d, TableId: %d, IndexId: %d, NewRollupName: %s, OldRollupName: %s",
61 | r.DbId, r.TableId, r.IndexId, r.NewRollupName, r.OldRollupName)
62 | }
63 |
64 | func (renameRollup *RenameRollup) GetTableId() int64 {
65 | return renameRollup.TableId
66 | }
67 |
--------------------------------------------------------------------------------
/pkg/ccr/record/rename_table.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package record
18 |
19 | import (
20 | "encoding/json"
21 | "fmt"
22 |
23 | "github.com/selectdb/ccr_syncer/pkg/xerror"
24 | )
25 |
26 | type RenameTable struct {
27 | DbId int64 `json:"db"`
28 | TableId int64 `json:"tb"`
29 | IndexId int64 `json:"ind"`
30 | PartitionId int64 `json:"p"`
31 | NewTableName string `json:"nT"`
32 | OldTableName string `json:"oT"`
33 | NewRollupName string `json:"nR"`
34 | OldRollupName string `json:"oR"`
35 | NewPartitionName string `json:"nP"`
36 | OldPartitionName string `json:"oP"`
37 | }
38 |
39 | func NewRenameTableFromJson(data string) (*RenameTable, error) {
40 | var renameTable RenameTable
41 | err := json.Unmarshal([]byte(data), &renameTable)
42 | if err != nil {
43 | return nil, xerror.Wrap(err, xerror.Normal, "unmarshal rename table error")
44 | }
45 |
46 | if renameTable.TableId == 0 {
47 | return nil, xerror.Errorf(xerror.Normal, "table id not found")
48 | }
49 |
50 | return &renameTable, nil
51 | }
52 |
53 | // Stringer
54 | func (r *RenameTable) String() string {
55 | return fmt.Sprintf("RenameTable: DbId: %d, TableId: %d, PartitionId: %d, IndexId: %d, NewTableName: %s, OldTableName: %s, NewRollupName: %s, OldRollupName: %s, NewPartitionName: %s, OldPartitionName: %s", r.DbId, r.TableId, r.PartitionId, r.IndexId, r.NewTableName, r.OldTableName, r.NewRollupName, r.OldRollupName, r.NewPartitionName, r.OldPartitionName)
56 | }
57 |
--------------------------------------------------------------------------------
/pkg/ccr/record/replace_partition.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package record
18 |
19 | import (
20 | "encoding/json"
21 |
22 | "github.com/selectdb/ccr_syncer/pkg/xerror"
23 | )
24 |
25 | type ReplacePartitionRecord struct {
26 | DbId int64 `json:"dbId"`
27 | DbName string `json:"dbName"`
28 | TableId int64 `json:"tblId"`
29 | TableName string `json:"tblName"`
30 | Partitions []string `json:"partitions"`
31 | TempPartitions []string `json:"tempPartitions"`
32 | StrictRange bool `json:"strictRange"`
33 | UseTempName bool `json:"useTempPartitionName"`
34 | }
35 |
36 | func NewReplacePartitionFromJson(data string) (*ReplacePartitionRecord, error) {
37 | var replacePartition ReplacePartitionRecord
38 | err := json.Unmarshal([]byte(data), &replacePartition)
39 | if err != nil {
40 | return nil, xerror.Wrap(err, xerror.Normal, "unmarshal replace partition error")
41 | }
42 |
43 | if len(replacePartition.TempPartitions) == 0 {
44 | return nil, xerror.Errorf(xerror.Normal, "the temp partitions of the replace partition record is empty")
45 | }
46 |
47 | if replacePartition.TableId == 0 {
48 | return nil, xerror.Errorf(xerror.Normal, "table id not found")
49 | }
50 |
51 | if replacePartition.TableName == "" {
52 | return nil, xerror.Errorf(xerror.Normal, "table name is empty")
53 | }
54 |
55 | return &replacePartition, nil
56 | }
57 |
--------------------------------------------------------------------------------
/pkg/ccr/record/table_type.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package record
18 |
19 | const (
20 | TableTypeOlap = "OLAP"
21 | TableTypeView = "VIEW"
22 | TableTypeMaterializedView = "MATERIALIZED_VIEW"
23 | TableTypeElasticSearch = "ELASTICSEARCH"
24 | )
25 |
--------------------------------------------------------------------------------
/pkg/ccr/record/truncate_table.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package record
18 |
19 | import (
20 | "encoding/json"
21 | "fmt"
22 |
23 | "github.com/selectdb/ccr_syncer/pkg/xerror"
24 | )
25 |
26 | // {
27 | // "dbId": 10079,
28 | // "db": "default_cluster:ccr", # "default_cluster:" prefix will be removed in Doris v2.1
29 | // "tblId": 77395,
30 | // "table": "src_1_alias",
31 | // "isEntireTable": false,
32 | // "rawSql": "PARTITIONS (src_1_alias)"
33 | // }
34 |
35 | type TruncateTable struct {
36 | DbId int64 `json:"dbId"`
37 | DbName string `json:"db"`
38 | TableId int64 `json:"tblId"`
39 | TableName string `json:"table"`
40 | IsEntireTable bool `json:"isEntireTable"`
41 | RawSql string `json:"rawSql"`
42 | }
43 |
44 | func NewTruncateTableFromJson(data string) (*TruncateTable, error) {
45 | var truncateTable TruncateTable
46 | err := json.Unmarshal([]byte(data), &truncateTable)
47 | if err != nil {
48 | return nil, xerror.Wrap(err, xerror.Normal, "unmarshal truncate table error")
49 | }
50 |
51 | if truncateTable.TableId == 0 {
52 | return nil, xerror.Errorf(xerror.Normal, "table id not found")
53 | }
54 |
55 | return &truncateTable, nil
56 | }
57 |
58 | // Stringer
59 | func (t *TruncateTable) String() string {
60 | return fmt.Sprintf("TruncateTable: DbId: %d, Db: %s, TableId: %d, Table: %s, IsEntireTable: %v, RawSql: %s", t.DbId, t.DbName, t.TableId, t.TableName, t.IsEntireTable, t.RawSql)
61 | }
62 |
--------------------------------------------------------------------------------
/pkg/ccr/snapshot_job.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package ccr
18 |
--------------------------------------------------------------------------------
/pkg/rpc/Makefile:
--------------------------------------------------------------------------------
1 | gen_thrift:
2 | kitex -module github.com/selectdb/ccr_syncer thrift/FrontendService.thrift
3 | kitex -module github.com/selectdb/ccr_syncer thrift/BackendService.thrift
4 |
--------------------------------------------------------------------------------
/pkg/rpc/be.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package rpc
18 |
19 | import (
20 | "context"
21 | "time"
22 |
23 | "github.com/cloudwego/kitex/client/callopt"
24 | "github.com/selectdb/ccr_syncer/pkg/ccr/base"
25 | "github.com/selectdb/ccr_syncer/pkg/xerror"
26 | "github.com/selectdb/ccr_syncer/pkg/xmetrics"
27 |
28 | bestruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/backendservice"
29 | beservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/backendservice/backendservice"
30 |
31 | log "github.com/sirupsen/logrus"
32 | )
33 |
34 | type BeRpcOption func() callopt.Option
35 |
36 | type IBeRpc interface {
37 | IngestBinlog(context.Context, *bestruct.TIngestBinlogRequest, ...BeRpcOption) (*bestruct.TIngestBinlogResult_, error)
38 | }
39 |
40 | type BeRpc struct {
41 | backend *base.Backend
42 | client beservice.Client
43 | }
44 |
45 | func (beRpc *BeRpc) IngestBinlog(ctx context.Context, req *bestruct.TIngestBinlogRequest, beOptions ...BeRpcOption) (*bestruct.TIngestBinlogResult_, error) {
46 | log.Tracef("IngestBinlog req: %+v, txnId: %d, be: %v", req, req.GetTxnId(), beRpc.backend)
47 |
48 | defer xmetrics.RecordBeRpc("IngestBinlog", beRpc.backend.Host, beRpc.backend.BePort)()
49 |
50 | var options []callopt.Option
51 | for _, opt := range beOptions {
52 | options = append(options, opt())
53 | }
54 | client := beRpc.client
55 | if result, err := client.IngestBinlog(ctx, req, options...); err != nil {
56 | return nil, xerror.Wrapf(err, xerror.Normal,
57 | "IngestBinlog error: %v, txnId: %d, be: %v", err, req.GetTxnId(), beRpc.backend)
58 | } else {
59 | return result, nil
60 | }
61 | }
62 |
63 | func WithBeRpcTimeout(timeout time.Duration) BeRpcOption {
64 | return func() callopt.Option {
65 | return callopt.WithRPCTimeout(timeout)
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/pkg/rpc/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | kitex -module github.com/selectdb/ccr_syncer thrift/FrontendService.thrift
4 | kitex -module github.com/selectdb/ccr_syncer thrift/BackendService.thrift
5 |
--------------------------------------------------------------------------------
/pkg/rpc/concurrency.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package rpc
18 |
19 | import (
20 | "flag"
21 | "sync"
22 | )
23 |
24 | var (
25 | FlagMaxIngestConcurrencyPerBackend int64
26 | )
27 |
28 | func init() {
29 | flag.Int64Var(&FlagMaxIngestConcurrencyPerBackend, "max_ingest_concurrency_per_backend", 48,
30 | "The max concurrency of the binlog ingesting per backend")
31 | }
32 |
33 | type ConcurrencyWindow struct {
34 | mu *sync.Mutex
35 | cond *sync.Cond
36 |
37 | id int64
38 | inflights int64
39 | }
40 |
41 | func newCongestionWindow(id int64) *ConcurrencyWindow {
42 | mu := &sync.Mutex{}
43 | return &ConcurrencyWindow{
44 | mu: mu,
45 | cond: sync.NewCond(mu),
46 | id: id,
47 | inflights: 0,
48 | }
49 | }
50 |
51 | func (cw *ConcurrencyWindow) Acquire() {
52 | cw.mu.Lock()
53 | defer cw.mu.Unlock()
54 |
55 | for cw.inflights+1 > FlagMaxIngestConcurrencyPerBackend {
56 | cw.cond.Wait()
57 | }
58 | cw.inflights += 1
59 | }
60 |
61 | func (cw *ConcurrencyWindow) Release() {
62 | cw.mu.Lock()
63 | defer cw.mu.Unlock()
64 |
65 | if cw.inflights == 0 {
66 | return
67 | }
68 |
69 | cw.inflights -= 1
70 | cw.cond.Signal()
71 | }
72 |
73 | type ConcurrencyManager struct {
74 | windows sync.Map
75 | }
76 |
77 | func NewConcurrencyManager() *ConcurrencyManager {
78 | return &ConcurrencyManager{}
79 | }
80 |
81 | func (cm *ConcurrencyManager) GetWindow(id int64) *ConcurrencyWindow {
82 | value, ok := cm.windows.Load(id)
83 | if !ok {
84 | window := newCongestionWindow(id)
85 | value, ok = cm.windows.LoadOrStore(id, window)
86 | }
87 | return value.(*ConcurrencyWindow)
88 | }
89 |
--------------------------------------------------------------------------------
/pkg/rpc/error.go:
--------------------------------------------------------------------------------
1 | package rpc
2 |
3 | import (
4 | "errors"
5 |
6 | "github.com/cloudwego/kitex/pkg/kerrors"
7 | "github.com/cloudwego/kitex/pkg/remote"
8 | "github.com/selectdb/ccr_syncer/pkg/xerror"
9 | )
10 |
11 | func IsUnknownMethod(err error) bool {
12 | if err == nil {
13 | return false
14 | }
15 |
16 | var xerr *xerror.XError
17 | if errors.As(err, &xerr) {
18 | return IsUnknownMethod(xerr.Unwrap())
19 | }
20 |
21 | var de *kerrors.DetailedError
22 | if errors.As(err, &de) {
23 | return IsUnknownMethod(de.Unwrap())
24 | }
25 |
26 | var te *remote.TransError
27 | if errors.As(err, &te) && te.TypeID() == remote.UnknownMethod {
28 | return true
29 | }
30 |
31 | return false
32 | }
33 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/agentservice/k-consts.go:
--------------------------------------------------------------------------------
1 | package agentservice
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/backendservice/backendservice/invoker.go:
--------------------------------------------------------------------------------
1 | // Code generated by Kitex v0.8.0. DO NOT EDIT.
2 |
3 | package backendservice
4 |
5 | import (
6 | server "github.com/cloudwego/kitex/server"
7 | backendservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/backendservice"
8 | )
9 |
10 | // NewInvoker creates a server.Invoker with the given handler and options.
11 | func NewInvoker(handler backendservice.BackendService, opts ...server.Option) server.Invoker {
12 | var options []server.Option
13 |
14 | options = append(options, opts...)
15 |
16 | s := server.NewInvoker(options...)
17 | if err := s.RegisterService(serviceInfo(), handler); err != nil {
18 | panic(err)
19 | }
20 | if err := s.Init(); err != nil {
21 | panic(err)
22 | }
23 | return s
24 | }
25 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/backendservice/backendservice/server.go:
--------------------------------------------------------------------------------
1 | // Code generated by Kitex v0.8.0. DO NOT EDIT.
2 | package backendservice
3 |
4 | import (
5 | server "github.com/cloudwego/kitex/server"
6 | backendservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/backendservice"
7 | )
8 |
9 | // NewServer creates a server.Server with the given handler and options.
10 | func NewServer(handler backendservice.BackendService, opts ...server.Option) server.Server {
11 | var options []server.Option
12 |
13 | options = append(options, opts...)
14 |
15 | svr := server.NewServer(options...)
16 | if err := svr.RegisterService(serviceInfo(), handler); err != nil {
17 | panic(err)
18 | }
19 | return svr
20 | }
21 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/backendservice/k-consts.go:
--------------------------------------------------------------------------------
1 | package backendservice
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/data/k-consts.go:
--------------------------------------------------------------------------------
1 | package data
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/datasinks/k-consts.go:
--------------------------------------------------------------------------------
1 | package datasinks
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/descriptors/k-consts.go:
--------------------------------------------------------------------------------
1 | package descriptors
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/dorisexternalservice/k-consts.go:
--------------------------------------------------------------------------------
1 | package dorisexternalservice
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/invoker.go:
--------------------------------------------------------------------------------
1 | // Code generated by Kitex v0.8.0. DO NOT EDIT.
2 |
3 | package tdorisexternalservice
4 |
5 | import (
6 | server "github.com/cloudwego/kitex/server"
7 | dorisexternalservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/dorisexternalservice"
8 | )
9 |
10 | // NewInvoker creates a server.Invoker with the given handler and options.
11 | func NewInvoker(handler dorisexternalservice.TDorisExternalService, opts ...server.Option) server.Invoker {
12 | var options []server.Option
13 |
14 | options = append(options, opts...)
15 |
16 | s := server.NewInvoker(options...)
17 | if err := s.RegisterService(serviceInfo(), handler); err != nil {
18 | panic(err)
19 | }
20 | if err := s.Init(); err != nil {
21 | panic(err)
22 | }
23 | return s
24 | }
25 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/dorisexternalservice/tdorisexternalservice/server.go:
--------------------------------------------------------------------------------
1 | // Code generated by Kitex v0.8.0. DO NOT EDIT.
2 | package tdorisexternalservice
3 |
4 | import (
5 | server "github.com/cloudwego/kitex/server"
6 | dorisexternalservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/dorisexternalservice"
7 | )
8 |
9 | // NewServer creates a server.Server with the given handler and options.
10 | func NewServer(handler dorisexternalservice.TDorisExternalService, opts ...server.Option) server.Server {
11 | var options []server.Option
12 |
13 | options = append(options, opts...)
14 |
15 | svr := server.NewServer(options...)
16 | if err := svr.RegisterService(serviceInfo(), handler); err != nil {
17 | panic(err)
18 | }
19 | return svr
20 | }
21 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/exprs/k-consts.go:
--------------------------------------------------------------------------------
1 | package exprs
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/frontendservice/frontendservice/invoker.go:
--------------------------------------------------------------------------------
1 | // Code generated by Kitex v0.8.0. DO NOT EDIT.
2 |
3 | package frontendservice
4 |
5 | import (
6 | server "github.com/cloudwego/kitex/server"
7 | frontendservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
8 | )
9 |
10 | // NewInvoker creates a server.Invoker with the given handler and options.
11 | func NewInvoker(handler frontendservice.FrontendService, opts ...server.Option) server.Invoker {
12 | var options []server.Option
13 |
14 | options = append(options, opts...)
15 |
16 | s := server.NewInvoker(options...)
17 | if err := s.RegisterService(serviceInfo(), handler); err != nil {
18 | panic(err)
19 | }
20 | if err := s.Init(); err != nil {
21 | panic(err)
22 | }
23 | return s
24 | }
25 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/frontendservice/frontendservice/server.go:
--------------------------------------------------------------------------------
1 | // Code generated by Kitex v0.8.0. DO NOT EDIT.
2 | package frontendservice
3 |
4 | import (
5 | server "github.com/cloudwego/kitex/server"
6 | frontendservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
7 | )
8 |
9 | // NewServer creates a server.Server with the given handler and options.
10 | func NewServer(handler frontendservice.FrontendService, opts ...server.Option) server.Server {
11 | var options []server.Option
12 |
13 | options = append(options, opts...)
14 |
15 | svr := server.NewServer(options...)
16 | if err := svr.RegisterService(serviceInfo(), handler); err != nil {
17 | panic(err)
18 | }
19 | return svr
20 | }
21 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/frontendservice/k-consts.go:
--------------------------------------------------------------------------------
1 | package frontendservice
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/heartbeatservice/heartbeatservice/client.go:
--------------------------------------------------------------------------------
1 | // Code generated by Kitex v0.8.0. DO NOT EDIT.
2 |
3 | package heartbeatservice
4 |
5 | import (
6 | "context"
7 | client "github.com/cloudwego/kitex/client"
8 | callopt "github.com/cloudwego/kitex/client/callopt"
9 | heartbeatservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/heartbeatservice"
10 | )
11 |
12 | // Client is designed to provide IDL-compatible methods with call-option parameter for kitex framework.
13 | type Client interface {
14 | Heartbeat(ctx context.Context, masterInfo *heartbeatservice.TMasterInfo, callOptions ...callopt.Option) (r *heartbeatservice.THeartbeatResult_, err error)
15 | }
16 |
17 | // NewClient creates a client for the service defined in IDL.
18 | func NewClient(destService string, opts ...client.Option) (Client, error) {
19 | var options []client.Option
20 | options = append(options, client.WithDestService(destService))
21 |
22 | options = append(options, opts...)
23 |
24 | kc, err := client.NewClient(serviceInfo(), options...)
25 | if err != nil {
26 | return nil, err
27 | }
28 | return &kHeartbeatServiceClient{
29 | kClient: newServiceClient(kc),
30 | }, nil
31 | }
32 |
33 | // MustNewClient creates a client for the service defined in IDL. It panics if any error occurs.
34 | func MustNewClient(destService string, opts ...client.Option) Client {
35 | kc, err := NewClient(destService, opts...)
36 | if err != nil {
37 | panic(err)
38 | }
39 | return kc
40 | }
41 |
42 | type kHeartbeatServiceClient struct {
43 | *kClient
44 | }
45 |
46 | func (p *kHeartbeatServiceClient) Heartbeat(ctx context.Context, masterInfo *heartbeatservice.TMasterInfo, callOptions ...callopt.Option) (r *heartbeatservice.THeartbeatResult_, err error) {
47 | ctx = client.NewCtxWithCallOptions(ctx, callOptions)
48 | return p.kClient.Heartbeat(ctx, masterInfo)
49 | }
50 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/heartbeatservice/heartbeatservice/heartbeatservice.go:
--------------------------------------------------------------------------------
1 | // Code generated by Kitex v0.8.0. DO NOT EDIT.
2 |
3 | package heartbeatservice
4 |
5 | import (
6 | "context"
7 | client "github.com/cloudwego/kitex/client"
8 | kitex "github.com/cloudwego/kitex/pkg/serviceinfo"
9 | heartbeatservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/heartbeatservice"
10 | )
11 |
12 | func serviceInfo() *kitex.ServiceInfo {
13 | return heartbeatServiceServiceInfo
14 | }
15 |
16 | var heartbeatServiceServiceInfo = NewServiceInfo()
17 |
18 | func NewServiceInfo() *kitex.ServiceInfo {
19 | serviceName := "HeartbeatService"
20 | handlerType := (*heartbeatservice.HeartbeatService)(nil)
21 | methods := map[string]kitex.MethodInfo{
22 | "heartbeat": kitex.NewMethodInfo(heartbeatHandler, newHeartbeatServiceHeartbeatArgs, newHeartbeatServiceHeartbeatResult, false),
23 | }
24 | extra := map[string]interface{}{
25 | "PackageName": "heartbeatservice",
26 | "ServiceFilePath": `thrift/HeartbeatService.thrift`,
27 | }
28 | svcInfo := &kitex.ServiceInfo{
29 | ServiceName: serviceName,
30 | HandlerType: handlerType,
31 | Methods: methods,
32 | PayloadCodec: kitex.Thrift,
33 | KiteXGenVersion: "v0.8.0",
34 | Extra: extra,
35 | }
36 | return svcInfo
37 | }
38 |
39 | func heartbeatHandler(ctx context.Context, handler interface{}, arg, result interface{}) error {
40 | realArg := arg.(*heartbeatservice.HeartbeatServiceHeartbeatArgs)
41 | realResult := result.(*heartbeatservice.HeartbeatServiceHeartbeatResult)
42 | success, err := handler.(heartbeatservice.HeartbeatService).Heartbeat(ctx, realArg.MasterInfo)
43 | if err != nil {
44 | return err
45 | }
46 | realResult.Success = success
47 | return nil
48 | }
49 | func newHeartbeatServiceHeartbeatArgs() interface{} {
50 | return heartbeatservice.NewHeartbeatServiceHeartbeatArgs()
51 | }
52 |
53 | func newHeartbeatServiceHeartbeatResult() interface{} {
54 | return heartbeatservice.NewHeartbeatServiceHeartbeatResult()
55 | }
56 |
57 | type kClient struct {
58 | c client.Client
59 | }
60 |
61 | func newServiceClient(c client.Client) *kClient {
62 | return &kClient{
63 | c: c,
64 | }
65 | }
66 |
67 | func (p *kClient) Heartbeat(ctx context.Context, masterInfo *heartbeatservice.TMasterInfo) (r *heartbeatservice.THeartbeatResult_, err error) {
68 | var _args heartbeatservice.HeartbeatServiceHeartbeatArgs
69 | _args.MasterInfo = masterInfo
70 | var _result heartbeatservice.HeartbeatServiceHeartbeatResult
71 | if err = p.c.Call(ctx, "heartbeat", &_args, &_result); err != nil {
72 | return
73 | }
74 | return _result.GetSuccess(), nil
75 | }
76 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/heartbeatservice/heartbeatservice/invoker.go:
--------------------------------------------------------------------------------
1 | // Code generated by Kitex v0.8.0. DO NOT EDIT.
2 |
3 | package heartbeatservice
4 |
5 | import (
6 | server "github.com/cloudwego/kitex/server"
7 | heartbeatservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/heartbeatservice"
8 | )
9 |
10 | // NewInvoker creates a server.Invoker with the given handler and options.
11 | func NewInvoker(handler heartbeatservice.HeartbeatService, opts ...server.Option) server.Invoker {
12 | var options []server.Option
13 |
14 | options = append(options, opts...)
15 |
16 | s := server.NewInvoker(options...)
17 | if err := s.RegisterService(serviceInfo(), handler); err != nil {
18 | panic(err)
19 | }
20 | if err := s.Init(); err != nil {
21 | panic(err)
22 | }
23 | return s
24 | }
25 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/heartbeatservice/heartbeatservice/server.go:
--------------------------------------------------------------------------------
1 | // Code generated by Kitex v0.8.0. DO NOT EDIT.
2 | package heartbeatservice
3 |
4 | import (
5 | server "github.com/cloudwego/kitex/server"
6 | heartbeatservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/heartbeatservice"
7 | )
8 |
9 | // NewServer creates a server.Server with the given handler and options.
10 | func NewServer(handler heartbeatservice.HeartbeatService, opts ...server.Option) server.Server {
11 | var options []server.Option
12 |
13 | options = append(options, opts...)
14 |
15 | svr := server.NewServer(options...)
16 | if err := svr.RegisterService(serviceInfo(), handler); err != nil {
17 | panic(err)
18 | }
19 | return svr
20 | }
21 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/heartbeatservice/k-consts.go:
--------------------------------------------------------------------------------
1 | package heartbeatservice
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/masterservice/k-consts.go:
--------------------------------------------------------------------------------
1 | package masterservice
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/metrics/k-Metrics.go:
--------------------------------------------------------------------------------
1 | // Code generated by Kitex v0.8.0. DO NOT EDIT.
2 |
3 | package metrics
4 |
5 | import (
6 | "bytes"
7 | "fmt"
8 | "reflect"
9 | "strings"
10 |
11 | "github.com/apache/thrift/lib/go/thrift"
12 |
13 | "github.com/cloudwego/kitex/pkg/protocol/bthrift"
14 | )
15 |
16 | // unused protection
17 | var (
18 | _ = fmt.Formatter(nil)
19 | _ = (*bytes.Buffer)(nil)
20 | _ = (*strings.Builder)(nil)
21 | _ = reflect.Type(nil)
22 | _ = thrift.TProtocol(nil)
23 | _ = bthrift.BinaryWriter(nil)
24 | )
25 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/metrics/k-consts.go:
--------------------------------------------------------------------------------
1 | package metrics
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/opcodes/k-Opcodes.go:
--------------------------------------------------------------------------------
1 | // Code generated by Kitex v0.8.0. DO NOT EDIT.
2 |
3 | package opcodes
4 |
5 | import (
6 | "bytes"
7 | "fmt"
8 | "reflect"
9 | "strings"
10 |
11 | "github.com/apache/thrift/lib/go/thrift"
12 |
13 | "github.com/cloudwego/kitex/pkg/protocol/bthrift"
14 | )
15 |
16 | // unused protection
17 | var (
18 | _ = fmt.Formatter(nil)
19 | _ = (*bytes.Buffer)(nil)
20 | _ = (*strings.Builder)(nil)
21 | _ = reflect.Type(nil)
22 | _ = thrift.TProtocol(nil)
23 | _ = bthrift.BinaryWriter(nil)
24 | )
25 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/opcodes/k-consts.go:
--------------------------------------------------------------------------------
1 | package opcodes
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/palointernalservice/k-consts.go:
--------------------------------------------------------------------------------
1 | package palointernalservice
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/paloservice/k-PaloService.go:
--------------------------------------------------------------------------------
1 | // Code generated by Kitex v0.8.0. DO NOT EDIT.
2 |
3 | package paloservice
4 |
5 | import (
6 | "bytes"
7 | "fmt"
8 | "reflect"
9 | "strings"
10 |
11 | "github.com/apache/thrift/lib/go/thrift"
12 |
13 | "github.com/cloudwego/kitex/pkg/protocol/bthrift"
14 |
15 | "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status"
16 | )
17 |
18 | // unused protection
19 | var (
20 | _ = fmt.Formatter(nil)
21 | _ = (*bytes.Buffer)(nil)
22 | _ = (*strings.Builder)(nil)
23 | _ = reflect.Type(nil)
24 | _ = thrift.TProtocol(nil)
25 | _ = bthrift.BinaryWriter(nil)
26 | _ = status.KitexUnusedProtection
27 | )
28 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/paloservice/k-consts.go:
--------------------------------------------------------------------------------
1 | package paloservice
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/partitions/k-consts.go:
--------------------------------------------------------------------------------
1 | package partitions
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/planner/k-consts.go:
--------------------------------------------------------------------------------
1 | package planner
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/plannodes/k-consts.go:
--------------------------------------------------------------------------------
1 | package plannodes
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/querycache/k-consts.go:
--------------------------------------------------------------------------------
1 | package querycache
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/runtimeprofile/k-consts.go:
--------------------------------------------------------------------------------
1 | package runtimeprofile
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/status/k-consts.go:
--------------------------------------------------------------------------------
1 | package status
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/kitex_gen/types/k-consts.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | // KitexUnusedProtection is used to prevent 'imported and not used' error.
4 | var KitexUnusedProtection = struct{}{}
5 |
--------------------------------------------------------------------------------
/pkg/rpc/thrift/Data.thrift:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | namespace cpp doris
19 | namespace java org.apache.doris.thrift
20 |
21 | include "Types.thrift"
22 |
23 | // this is a union over all possible return types
24 | struct TCell {
25 | // TODO: use _val instead of camelcase
26 | 1: optional bool boolVal
27 | 2: optional i32 intVal
28 | 3: optional i64 longVal
29 | 4: optional double doubleVal
30 | 5: optional string stringVal
31 | 6: optional bool isNull
32 | // add type: date datetime
33 | }
34 |
35 | struct TResultRow {
36 | 1: list colVals
37 | }
38 |
39 | struct TRow {
40 | 1: optional list column_value
41 | }
42 |
43 | // Serialized, self-contained version of a RowBatch (in be/src/runtime/row-batch.h).
44 | struct TResultBatch {
45 | // mysql result row
46 | 1: required list rows
47 |
48 | // Indicates whether tuple_data is snappy-compressed
49 | 2: required bool is_compressed
50 |
51 | // packet seq used to check if there has packet lost
52 | 3: required i64 packet_seq
53 |
54 | 4: optional map attached_infos
55 | }
56 |
--------------------------------------------------------------------------------
/pkg/rpc/thrift/Makefile:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing,
12 | # software distributed under the License is distributed on an
13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | # KIND, either express or implied. See the License for the
15 | # specific language governing permissions and limitations
16 | # under the License.
17 |
18 | # This file used to compile all thrift files.
19 | BUILD_DIR = ${CURDIR}/../build/
20 |
21 | THRIFT = ${DORIS_THIRDPARTY}/installed/bin/thrift
22 |
23 | SOURCES = $(shell find ${CURDIR} -name "*.thrift")
24 | OBJECTS = $(patsubst ${CURDIR}/%.thrift, ${BUILD_DIR}/gen_cpp/%_types.cpp, ${SOURCES})
25 |
26 | GEN_SOURCES = $(shell find ${THRIFT} -name "*.thrift")
27 | GEN_OBJECTS = $(patsubst ${BUILD_DIR}/thrift/%.thrift, ${BUILD_DIR}/gen_cpp/%_types.cpp, ${GEN_SOURCES})
28 |
29 | all: ${GEN_OBJECTS} ${OBJECTS}
30 | .PHONY: all
31 |
32 | $(shell mkdir -p ${BUILD_DIR}/gen_java)
33 |
34 | THRIFT_CPP_ARGS = -I ${CURDIR} -I ${BUILD_DIR}/thrift/ --gen cpp:moveable_types -out ${BUILD_DIR}/gen_cpp --allow-64bit-consts -strict
35 | THRIFT_JAVA_ARGS = -I ${CURDIR} -I ${BUILD_DIR}/thrift/ --gen java:fullcamel -out ${BUILD_DIR}/gen_java --allow-64bit-consts -strict
36 |
37 | ${BUILD_DIR}/gen_cpp:
38 | mkdir -p $@
39 | # handwrite thrift
40 | ${BUILD_DIR}/gen_cpp/%_types.cpp: ${CURDIR}/%.thrift | ${BUILD_DIR}/gen_cpp
41 | ${THRIFT} ${THRIFT_CPP_ARGS} $<
42 | ${THRIFT} ${THRIFT_JAVA_ARGS} $<
43 |
44 | # generated thrift
45 | ${BUILD_DIR}/gen_cpp/%_types.cpp: ${BUILD_DIR}/thrift/%.thrift | ${BUILD_DIR}/gen_cpp
46 | ${THRIFT} ${THRIFT_CPP_ARGS} $<
47 | ${THRIFT} ${THRIFT_JAVA_ARGS} $<
48 |
--------------------------------------------------------------------------------
/pkg/rpc/thrift/Metrics.thrift:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | namespace cpp doris
19 | namespace java org.apache.doris.thrift
20 |
21 | // Metric and counter data types.
22 | enum TUnit {
23 | // A dimensionless numerical quantity
24 | UNIT,
25 | // Rate of a dimensionless numerical quantity
26 | UNIT_PER_SECOND,
27 | CPU_TICKS,
28 | BYTES
29 | BYTES_PER_SECOND,
30 | TIME_NS,
31 | DOUBLE_VALUE,
32 | // No units at all, may not be a numerical quantity
33 | // It is used as a label now, so do not treat it as
34 | // a real counter.
35 | NONE,
36 | TIME_MS,
37 | TIME_S
38 | }
39 |
40 | // The kind of value that a metric represents.
41 | enum TMetricKind {
42 | // May go up or down over time
43 | GAUGE,
44 | // A strictly increasing value
45 | COUNTER,
46 | // Fixed; will never change
47 | PROPERTY,
48 | STATS,
49 | SET,
50 | HISTOGRAM
51 | }
52 |
--------------------------------------------------------------------------------
/pkg/rpc/thrift/NetworkTest.thrift:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | namespace cpp doristest
19 |
20 | struct ThriftDataParams {
21 | 1: required string data
22 | }
23 |
24 | struct ThriftDataResult {
25 | 1: required i64 bytes_received
26 | }
27 |
28 | service NetworkTestService {
29 | ThriftDataResult Send(1:ThriftDataParams params);
30 | }
31 |
--------------------------------------------------------------------------------
/pkg/rpc/thrift/Normalization.thrift:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | namespace java org.apache.doris.thrift
19 |
20 | include "Exprs.thrift"
21 | include "Types.thrift"
22 | include "Opcodes.thrift"
23 | include "Descriptors.thrift"
24 | include "Partitions.thrift"
25 | include "PlanNodes.thrift"
26 |
27 | struct TNormalizedOlapScanNode {
28 | 1: optional i64 table_id
29 | 2: optional i64 index_id
30 | 3: optional bool is_preaggregation
31 | 4: optional list key_column_names
32 | 5: optional list key_column_types
33 | 6: optional string rollup_name
34 | 7: optional string sort_column
35 | 8: optional list select_columns
36 | }
37 |
38 | struct TNormalizedAggregateNode {
39 | 1: optional list grouping_exprs
40 | 2: optional list aggregate_functions
41 | 3: optional Types.TTupleId intermediate_tuple_id
42 | 4: optional Types.TTupleId output_tuple_id
43 | 5: optional bool is_finalize
44 | 6: optional bool use_streaming_preaggregation
45 | 7: optional list projectToAggIntermediateTuple
46 | 8: optional list projectToAggOutputTuple
47 | }
48 |
49 | struct TNormalizedPlanNode {
50 | 1: optional Types.TPlanNodeId node_id
51 | 2: optional PlanNodes.TPlanNodeType node_type
52 | 3: optional i32 num_children
53 | 5: optional set tuple_ids
54 | 6: optional set nullable_tuples
55 | 7: optional list conjuncts
56 | 8: optional list projects
57 | 9: optional i64 limit
58 |
59 | 10: optional TNormalizedOlapScanNode olap_scan_node
60 | 11: optional TNormalizedAggregateNode aggregation_node
61 | }
--------------------------------------------------------------------------------
/pkg/rpc/thrift/QueryCache.thrift:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | namespace cpp doris
19 | namespace java org.apache.doris.thrift
20 |
21 | struct TQueryCacheParam {
22 | 1: optional i32 node_id
23 |
24 | 2: optional binary digest
25 |
26 | // the query slots order can different to the query cache slots order,
27 | // so we should mapping current slot id in planNode to normalized slot id
28 | // say:
29 | // SQL1: select id, count(*) cnt, sum(value) s from tbl group by id
30 | // SQL2: select sum(value) s, count(*) cnt, id from tbl group by id
31 | // the id always has normalized slot id 0,
32 | // the cnt always has normalized slot id 1
33 | // the s always has normalized slot id 2
34 | // but in SQL1, id, cnt, s can has slot id 5, 6, 7
35 | // in SQL2, s, cnt, id can has slot id 10, 11, 12
36 | // if generate plan cache in SQL1, we will make output_slot_mapping: {5: 0, 6: 1, 7: 2},
37 | // the SQL2 read plan cache and make output_slot_mapping: {10: 2, 11: 1, 12: 0},
38 | // even the select order is different, the normalized slot id is always equals:
39 | // the id always is 0, the cnt always is 1, the s always is 2.
40 | // then backend can mapping the current slots in the tuple to the query cached slots
41 | 3: optional map output_slot_mapping
42 |
43 | // mapping tablet to filter range,
44 | // BE will use as the key to search query cache.
45 | // note that, BE not care what the filter range content is, just use as the part of the key.
46 | 4: optional map tablet_to_range
47 |
48 | 5: optional bool force_refresh_query_cache
49 |
50 | 6: optional i64 entry_max_bytes
51 |
52 | 7: optional i64 entry_max_rows
53 | }
--------------------------------------------------------------------------------
/pkg/rpc/thrift/QueryPlanExtra.thrift:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | namespace java org.apache.doris.thrift
19 | namespace cpp doris
20 |
21 | include "Types.thrift"
22 | include "Status.thrift"
23 | include "Planner.thrift"
24 | include "Descriptors.thrift"
25 |
26 | struct TTabletVersionInfo {
27 | 1: required i64 tablet_id
28 | 2: required i64 version
29 | 3: required i64 version_hash
30 | // i32 for historical reason
31 | 4: required i32 schema_hash
32 | }
33 |
34 | struct TQueryPlanInfo {
35 | 1: required Planner.TPlanFragment plan_fragment
36 | // tablet_id -> TTabletVersionInfo
37 | 2: required map tablet_info
38 | 3: required Descriptors.TDescriptorTable desc_tbl
39 | // all tablet scan should share one query_id
40 | 4: required Types.TUniqueId query_id
41 | }
42 |
--------------------------------------------------------------------------------
/pkg/rpc/thrift/RuntimeProfile.thrift:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | namespace cpp doris
19 | namespace java org.apache.doris.thrift
20 |
21 | include "Metrics.thrift"
22 |
23 | // Counter data
24 | struct TCounter {
25 | 1: required string name
26 | 2: required Metrics.TUnit type
27 | 3: required i64 value
28 | 4: optional i64 level
29 | 5: optional string description
30 | }
31 |
32 | // A single runtime profile
33 | struct TRuntimeProfileNode {
34 | 1: required string name
35 | 2: required i32 num_children
36 | // Counters is a list of flattened counters for this node and all its children
37 | 3: required list counters
38 | // TODO: should we make metadata a serializable struct? We only use it to
39 | // store the node id right now so this is sufficient.
40 | 4: required i64 metadata
41 |
42 | // indicates whether the child will be printed with extra indentation;
43 | // corresponds to indent param of RuntimeProfile::AddChild()
44 | 5: required bool indent
45 |
46 | // map of key,value info strings that capture any kind of additional information
47 | // about the profiled object
48 | 6: required map info_strings
49 |
50 | // Auxilliary structure to capture the info strings display order when printed
51 | 7: required list info_strings_display_order
52 |
53 | // map from parent counter name to child counter name
54 | 8: required map> child_counters_map
55 |
56 | 9: required i64 timestamp
57 |
58 | // Deprecated.
59 | 10: optional bool deprecated_is_sink
60 | }
61 |
62 | // A flattened tree of runtime profiles, obtained by an
63 | // in-order traversal
64 | struct TRuntimeProfileTree {
65 | 1: required list nodes
66 | }
67 |
--------------------------------------------------------------------------------
/pkg/utils/array.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package utils
18 |
19 | func FirstOr[T any](array []T, def T) T {
20 | if len(array) == 0 {
21 | return def
22 | }
23 | return array[0]
24 | }
25 |
--------------------------------------------------------------------------------
/pkg/utils/failpoint.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package utils
18 |
19 | import (
20 | "fmt"
21 | "sync"
22 | "sync/atomic"
23 | )
24 |
25 | var (
26 | failpointEnabled = atomic.Bool{}
27 | failpoints = sync.Map{}
28 | )
29 |
30 | type FailpointValue interface{}
31 |
32 | func IsFailpointEnabled() bool {
33 | return failpointEnabled.Load()
34 | }
35 |
36 | func EnableFailpoint() {
37 | failpointEnabled.Store(true)
38 | }
39 |
40 | func DisableFailpoint() {
41 | failpointEnabled.Store(false)
42 | }
43 |
44 | func InjectJobFailpoint(jobName, name string, value FailpointValue) {
45 | failpoint := getJobFailpointName(jobName, name)
46 | failpoints.Store(failpoint, value)
47 | }
48 |
49 | func RemoveJobFailpoint(jobName, name string) {
50 | failpoint := getJobFailpointName(jobName, name)
51 | failpoints.Delete(failpoint)
52 | }
53 |
54 | func HasJobFailpoint(jobName, name string) bool {
55 | if !IsFailpointEnabled() {
56 | return false
57 | }
58 |
59 | failpoint := getJobFailpointName(jobName, name)
60 | _, ok := failpoints.Load(failpoint)
61 | return ok
62 | }
63 |
64 | func IsJobFailpointExpected[T fmt.Stringer](jobName, name string, value T) bool {
65 | if !IsFailpointEnabled() {
66 | return false
67 | }
68 |
69 | failpoint := getJobFailpointName(jobName, name)
70 | v, ok := failpoints.Load(failpoint)
71 | if !ok {
72 | return false
73 | }
74 |
75 | return v == value.String()
76 | }
77 |
78 | func getJobFailpointName(jobName, name string) string {
79 | return fmt.Sprintf("/job/%s/%s", jobName, name)
80 | }
81 |
--------------------------------------------------------------------------------
/pkg/utils/gzip.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package utils
18 |
19 | import (
20 | "bytes"
21 | "compress/gzip"
22 | "io"
23 | )
24 |
25 | func GZIPDecompress(data []byte) ([]byte, error) {
26 | buf := bytes.NewReader(data)
27 | reader, err := gzip.NewReader(buf)
28 | if err != nil {
29 | return nil, err
30 | }
31 | defer reader.Close()
32 |
33 | return io.ReadAll(reader)
34 | }
35 |
36 | func GZIPCompress(data []byte) ([]byte, error) {
37 | var buf bytes.Buffer
38 | writer := gzip.NewWriter(&buf)
39 | if _, err := writer.Write(data); err != nil {
40 | return nil, err
41 | }
42 | if err := writer.Close(); err != nil {
43 | return nil, err
44 | }
45 |
46 | return buf.Bytes(), nil
47 | }
48 |
--------------------------------------------------------------------------------
/pkg/utils/job_hook.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package utils
18 |
19 | import (
20 | "github.com/modern-go/gls"
21 | "github.com/sirupsen/logrus"
22 | )
23 |
24 | type Hook struct {
25 | Field string
26 | levels []logrus.Level
27 | }
28 |
29 | func (hook *Hook) Levels() []logrus.Level {
30 | return hook.levels
31 | }
32 |
33 | func (hook *Hook) Fire(entry *logrus.Entry) error {
34 | syncName := gls.Get(hook.Field)
35 | if syncName != nil {
36 | entry.Data[hook.Field] = gls.Get(hook.Field)
37 | }
38 | return nil
39 | }
40 |
41 | func NewHook(levels ...logrus.Level) *Hook {
42 | hook := Hook{
43 | Field: "job",
44 | levels: levels,
45 | }
46 | if len(hook.levels) == 0 {
47 | hook.levels = logrus.AllLevels
48 | }
49 |
50 | return &hook
51 | }
52 |
--------------------------------------------------------------------------------
/pkg/utils/map.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package utils
18 |
19 | // CopyMap returns a new map with the same key-value pairs as the input map.
20 | // The input map must have keys and values of comparable types.
21 | // but key and value is not deep copy
22 | func CopyMap[K, V comparable](m map[K]V) map[K]V {
23 | result := make(map[K]V)
24 | for k, v := range m {
25 | result[k] = v
26 | }
27 | return result
28 | }
29 |
30 | // MergeMap returns a new map with all key-value pairs from both input maps.
31 | func MergeMap[K comparable, V any](m1, m2 map[K]V) map[K]V {
32 | if m1 == nil {
33 | m1 = make(map[K]V, len(m2))
34 | }
35 | for k, v := range m2 {
36 | m1[k] = v
37 | }
38 | return m1
39 | }
40 |
41 | func Keys[V any](m map[string]V) []string {
42 | keys := make([]string, 0, len(m))
43 | for k := range m {
44 | keys = append(keys, k)
45 | }
46 | return keys
47 | }
48 |
--------------------------------------------------------------------------------
/pkg/utils/map_test.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package utils
18 |
19 | import (
20 | "testing"
21 |
22 | "github.com/stretchr/testify/assert"
23 | )
24 |
25 | func TestCopyMap(t *testing.T) {
26 | // Test with string keys and int values
27 | m1 := map[string]int{"a": 1, "b": 2, "c": 3}
28 | m2 := CopyMap(m1)
29 | assert.Equal(t, m1, m2)
30 | // update
31 | m1["c"] = 4
32 | assert.NotEqual(t, m1, m2)
33 |
34 | // Test with int keys and string values
35 | m3 := map[int]string{1: "a", 2: "b", 3: "c"}
36 | m4 := CopyMap(m3)
37 | assert.Equal(t, m3, m4)
38 | // update
39 | m3[3] = "d"
40 | assert.NotEqual(t, m3, m4)
41 |
42 | // Test with float keys and bool values
43 | m5 := map[float64]bool{1.1: true, 2.2: false, 3.3: true}
44 | m6 := CopyMap(m5)
45 | assert.Equal(t, m5, m6)
46 | // update
47 | m5[3.3] = false
48 | assert.NotEqual(t, m5, m6)
49 | }
50 |
--------------------------------------------------------------------------------
/pkg/utils/math.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package utils
18 |
19 | import "golang.org/x/exp/constraints"
20 |
21 | func Min[T constraints.Ordered](a, b T) T {
22 | if a < b {
23 | return a
24 | }
25 | return b
26 | }
27 |
--------------------------------------------------------------------------------
/pkg/utils/observer.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package utils
18 |
19 | type Observer[T any] interface {
20 | Update(T)
21 | }
22 |
23 | type Subject[T any] interface {
24 | Register(Observer[T])
25 | Unregister(Observer[T])
26 | Notify(T)
27 | }
28 |
--------------------------------------------------------------------------------
/pkg/utils/slice.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | // Index returns the index of the first occurrence of v in s,
4 | // or -1 if not present.
5 | func Index[S ~[]E, E comparable](s S, v E) int {
6 | for i := range s {
7 | if v == s[i] {
8 | return i
9 | }
10 | }
11 | return -1
12 | }
13 |
14 | // Contains reports whether v is present in s.
15 | func Contains[S ~[]E, E comparable](s S, v E) bool {
16 | return Index(s, v) >= 0
17 | }
18 |
--------------------------------------------------------------------------------
/pkg/utils/thrift_wrapper.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package utils
18 |
19 | import (
20 | "context"
21 |
22 | "github.com/apache/thrift/lib/go/thrift"
23 | )
24 |
25 | type WrapperType interface {
26 | ~int64 | ~string | ~bool
27 | }
28 |
29 | func ThriftValueWrapper[T WrapperType](value T) *T {
30 | return &value
31 | }
32 |
33 | func ThriftToJsonStr(obj thrift.TStruct) (string, error) {
34 | transport := thrift.NewTMemoryBuffer()
35 | protocol := thrift.NewTJSONProtocolFactory().GetProtocol(transport)
36 | ts := &thrift.TSerializer{Transport: transport, Protocol: protocol}
37 | if jsonBytes, err := ts.Write(context.Background(), obj); err != nil {
38 | return "", nil
39 | } else {
40 | return string(jsonBytes), nil
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/pkg/version/version.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package version
18 |
19 | // Git SHA Value will be set during build
20 | var GitTagSha = "Git tag sha: Not provided, use Makefile to build"
21 |
22 | func GetVersion() string {
23 | return GitTagSha
24 | }
25 |
--------------------------------------------------------------------------------
/pkg/xerror/withMessage.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | // copy from github.com/pkg/errors/errors.go
18 |
19 | package xerror
20 |
21 | import (
22 | "fmt"
23 | "io"
24 | )
25 |
26 | type withMessage struct {
27 | cause error
28 | msg string
29 | }
30 |
31 | func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
32 | func (w *withMessage) Cause() error { return w.cause }
33 |
34 | // Unwrap provides compatibility for Go 1.13 error chains.
35 | func (w *withMessage) Unwrap() error { return w.cause }
36 |
37 | func (w *withMessage) Format(s fmt.State, verb rune) {
38 | switch verb {
39 | case 'v':
40 | if s.Flag('+') {
41 | fmt.Fprintf(s, "%+v\n", w.Cause())
42 | io.WriteString(s, w.msg)
43 | return
44 | }
45 | fallthrough
46 | case 's', 'q':
47 | io.WriteString(s, w.Error())
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/pkg/xerror/withstack.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | // copy from github.com/pkg/errors/errors.go
18 |
19 | package xerror
20 |
21 | import (
22 | "fmt"
23 | "io"
24 | )
25 |
26 | type withStack struct {
27 | error
28 | *stack
29 | }
30 |
31 | func (w *withStack) Cause() error { return w.error }
32 |
33 | // Unwrap provides compatibility for Go 1.13 error chains.
34 | func (w *withStack) Unwrap() error { return w.error }
35 |
36 | func (w *withStack) Format(s fmt.State, verb rune) {
37 | switch verb {
38 | case 'v':
39 | if s.Flag('+') {
40 | fmt.Fprintf(s, "%+v", w.Cause())
41 | w.stack.Format(s, verb)
42 | return
43 | }
44 | fallthrough
45 | case 's':
46 | io.WriteString(s, w.Error())
47 | case 'q':
48 | fmt.Fprintf(s, "%q", w.Error())
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/pkg/xmetrics/tags.go:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License
17 | package xmetrics
18 |
19 | import (
20 | "github.com/prometheus/client_golang/prometheus"
21 | "github.com/selectdb/ccr_syncer/pkg/xerror"
22 | )
23 |
24 | func ErrorLabels(err *xerror.XError) prometheus.Labels {
25 | labels := make(prometheus.Labels)
26 | labels["error"] = err.Category().Name()
27 |
28 | var type_ string
29 | if err.IsRecoverable() {
30 | type_ = "recoverable"
31 | } else if err.IsPanic() {
32 | type_ = "panic"
33 | } else {
34 | type_ = "unknown"
35 | }
36 |
37 | labels["type"] = type_
38 | return labels
39 | }
40 |
--------------------------------------------------------------------------------
/regression-test/data/db_sync/partition/drop_1/test_ds_part_drop_1.out:
--------------------------------------------------------------------------------
1 | -- This file is automatically generated. You should know what you did if you want to edit this
2 | -- !sql --
3 | 2020-01-10 3 103 15 225.0
4 | 2020-01-20 4 104 30 450.0
5 |
6 | -- !target_sql --
7 | 2020-01-10 3 103 15 225.0
8 | 2020-01-20 4 104 30 450.0
9 |
10 | -- !sql --
11 |
12 | -- !target_sql --
13 | 2020-01-10 3 103 15 225.0
14 | 2020-01-20 4 104 30 450.0
15 |
16 | -- !sql --
17 |
18 | -- !target_sql --
19 | 2020-01-10 3 103 15 225.0
20 | 2020-01-20 4 104 30 450.0
21 |
22 | -- !sql --
23 | 2020-02-20 5 105 50 550.0
24 |
25 | -- !target_sql --
26 | 2020-01-10 3 103 15 225.0
27 | 2020-01-20 4 104 30 450.0
28 |
29 |
--------------------------------------------------------------------------------
/regression-test/data/db_sync/partition/recover/test_ds_part_recover.out:
--------------------------------------------------------------------------------
1 | -- This file is automatically generated. You should know what you did if you want to edit this
2 | -- !target_sql_content --
3 | 3 0
4 | 3 1
5 | 3 2
6 | 5 0
7 | 5 1
8 | 5 2
9 |
10 | -- !sql_source_content --
11 | 3 0
12 | 3 1
13 | 3 2
14 | 5 0
15 | 5 1
16 | 5 2
17 |
18 |
--------------------------------------------------------------------------------
/regression-test/data/db_sync/partition/recover1/test_ds_part_recover_new.out:
--------------------------------------------------------------------------------
1 | -- This file is automatically generated. You should know what you did if you want to edit this
2 | -- !target_sql_content --
3 | 3 0
4 | 3 1
5 | 3 2
6 | 5 0
7 | 5 1
8 | 5 2
9 |
10 | -- !sql_source_content --
11 | 3 0
12 | 3 1
13 | 3 2
14 | 5 0
15 | 5 1
16 | 5 2
17 |
18 |
--------------------------------------------------------------------------------
/regression-test/data/db_sync/table/recover/test_ds_tbl_drop_recover.out:
--------------------------------------------------------------------------------
1 | -- This file is automatically generated. You should know what you did if you want to edit this
2 | -- !target_sql_content --
3 | 0 0
4 | 0 1
5 | 0 2
6 | 2 0
7 | 2 1
8 | 2 2
9 |
10 | -- !sql_source_content --
11 | 0 0
12 | 0 1
13 | 0 2
14 | 2 0
15 | 2 1
16 | 2 2
17 |
18 | -- !target_sql_content_2 --
19 | 0 0
20 | 0 1
21 | 0 2
22 | 2 0
23 | 2 1
24 | 2 2
25 | 3 0
26 | 3 1
27 | 3 2
28 |
29 | -- !sql_source_content_2 --
30 | 0 0
31 | 0 1
32 | 0 2
33 | 2 0
34 | 2 1
35 | 2 2
36 | 3 0
37 | 3 1
38 | 3 2
39 |
40 |
--------------------------------------------------------------------------------
/regression-test/data/db_sync/table/recover1/test_ds_tbl_drop_recover_new.out:
--------------------------------------------------------------------------------
1 | -- This file is automatically generated. You should know what you did if you want to edit this
2 | -- !target_sql_content --
3 | 0 0
4 | 0 1
5 | 0 2
6 | 2 0
7 | 2 1
8 | 2 2
9 |
10 | -- !sql_source_content --
11 | 0 0
12 | 0 1
13 | 0 2
14 | 2 0
15 | 2 1
16 | 2 2
17 |
18 | -- !target_sql_content_2 --
19 | 0 0
20 | 0 1
21 | 0 2
22 | 2 0
23 | 2 1
24 | 2 2
25 | 3 0
26 | 3 1
27 | 3 2
28 |
29 | -- !sql_source_content_2 --
30 | 0 0
31 | 0 1
32 | 0 2
33 | 2 0
34 | 2 1
35 | 2 2
36 | 3 0
37 | 3 1
38 | 3 2
39 |
40 |
--------------------------------------------------------------------------------
/regression-test/data/db_sync/table/recover2/test_ds_tbl_drop_recover2.out:
--------------------------------------------------------------------------------
1 | -- This file is automatically generated. You should know what you did if you want to edit this
2 | -- !target_sql_content_2 --
3 | 0 0
4 | 0 1
5 | 0 2
6 | 10 0
7 | 10 1
8 | 10 2
9 | 11 0
10 | 11 1
11 | 11 2
12 | 12 0
13 | 12 1
14 | 12 2
15 | 13 0
16 | 13 1
17 | 13 2
18 | 14 0
19 | 14 1
20 | 14 2
21 | 15 0
22 | 15 1
23 | 15 2
24 | 16 0
25 | 16 1
26 | 16 2
27 | 17 0
28 | 17 1
29 | 17 2
30 | 18 0
31 | 18 1
32 | 18 2
33 | 19 0
34 | 19 1
35 | 19 2
36 | 20 0
37 | 20 1
38 | 20 2
39 |
40 | -- !sql_source_content_2 --
41 | 0 0
42 | 0 1
43 | 0 2
44 | 10 0
45 | 10 1
46 | 10 2
47 | 11 0
48 | 11 1
49 | 11 2
50 | 12 0
51 | 12 1
52 | 12 2
53 | 13 0
54 | 13 1
55 | 13 2
56 | 14 0
57 | 14 1
58 | 14 2
59 | 15 0
60 | 15 1
61 | 15 2
62 | 16 0
63 | 16 1
64 | 16 2
65 | 17 0
66 | 17 1
67 | 17 2
68 | 18 0
69 | 18 1
70 | 18 2
71 | 19 0
72 | 19 1
73 | 19 2
74 | 20 0
75 | 20 1
76 | 20 2
77 |
78 |
--------------------------------------------------------------------------------
/regression-test/data/db_sync/table/recover3/test_ds_tbl_drop_recover3.out:
--------------------------------------------------------------------------------
1 | -- This file is automatically generated. You should know what you did if you want to edit this
2 | -- !target_sql_content_2 --
3 | 0 0
4 | 0 1
5 | 0 2
6 | 10 0
7 | 10 1
8 | 10 2
9 | 11 0
10 | 11 1
11 | 11 2
12 | 12 0
13 | 12 1
14 | 12 2
15 | 13 0
16 | 13 1
17 | 13 2
18 | 14 0
19 | 14 1
20 | 14 2
21 | 15 0
22 | 15 1
23 | 15 2
24 | 16 0
25 | 16 1
26 | 16 2
27 | 17 0
28 | 17 1
29 | 17 2
30 | 18 0
31 | 18 1
32 | 18 2
33 | 19 0
34 | 19 1
35 | 19 2
36 | 20 0
37 | 20 1
38 | 20 2
39 |
40 | -- !sql_source_content_2 --
41 | 0 0
42 | 0 1
43 | 0 2
44 | 10 0
45 | 10 1
46 | 10 2
47 | 11 0
48 | 11 1
49 | 11 2
50 | 12 0
51 | 12 1
52 | 12 2
53 | 13 0
54 | 13 1
55 | 13 2
56 | 14 0
57 | 14 1
58 | 14 2
59 | 15 0
60 | 15 1
61 | 15 2
62 | 16 0
63 | 16 1
64 | 16 2
65 | 17 0
66 | 17 1
67 | 17 2
68 | 18 0
69 | 18 1
70 | 18 2
71 | 19 0
72 | 19 1
73 | 19 2
74 | 20 0
75 | 20 1
76 | 20 2
77 |
78 |
--------------------------------------------------------------------------------
/regression-test/data/table_sync/dml/insert_overwrite/test_ts_dml_insert_overwrite.out:
--------------------------------------------------------------------------------
1 | -- This file is automatically generated. You should know what you did if you want to edit this
2 | -- !sql --
3 | 1 0
4 | 1 1
5 | 1 2
6 | 1 3
7 | 1 4
8 |
9 | -- !target_sql --
10 | 1 0
11 | 1 1
12 | 1 2
13 | 1 3
14 | 1 4
15 |
16 | -- !sql --
17 | 2 0
18 | 2 1
19 | 2 2
20 | 2 3
21 | 2 4
22 |
23 | -- !target_sql --
24 | 2 0
25 | 2 1
26 | 2 2
27 | 2 3
28 | 2 4
29 |
30 | -- !sql --
31 | 3 0
32 | 3 1
33 | 3 2
34 | 3 3
35 | 3 4
36 |
37 | -- !target_sql --
38 | 3 0
39 | 3 1
40 | 3 2
41 | 3 3
42 | 3 4
43 |
44 |
--------------------------------------------------------------------------------
/regression-test/data/table_sync/partition/recover/test_tbl_part_recover.out:
--------------------------------------------------------------------------------
1 | -- This file is automatically generated. You should know what you did if you want to edit this
2 | -- !target_sql_content --
3 | 3 0
4 | 3 1
5 | 3 2
6 | 5 0
7 | 5 1
8 | 5 2
9 |
10 | -- !sql_source_content --
11 | 3 0
12 | 3 1
13 | 3 2
14 | 5 0
15 | 5 1
16 | 5 2
17 |
18 |
--------------------------------------------------------------------------------
/regression-test/data/table_sync/partition/recover1/test_tbl_part_recover_new.out:
--------------------------------------------------------------------------------
1 | -- This file is automatically generated. You should know what you did if you want to edit this
2 | -- !target_sql_content --
3 | 3 0
4 | 3 1
5 | 3 2
6 | 5 0
7 | 5 1
8 | 5 2
9 |
10 | -- !sql_source_content --
11 | 3 0
12 | 3 1
13 | 3 2
14 | 5 0
15 | 5 1
16 | 5 2
17 |
18 |
--------------------------------------------------------------------------------
/regression-test/suites/db_sync/delete/mor/test_ds_delete_mor.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite('test_ds_delete_mor') {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", 'helper.groovy'))
21 |
22 | def suffix = helper.randomSuffix()
23 | def tableName = 'tbl_' + suffix
24 | helper.enableDbBinlog()
25 | sql """
26 | CREATE TABLE if NOT EXISTS ${tableName}
27 | (
28 | `test` INT,
29 | `id` INT
30 | )
31 | ENGINE=OLAP
32 | UNIQUE KEY(`test`, `id`)
33 | PARTITION BY RANGE(id)
34 | (
35 | PARTITION `p1` VALUES LESS THAN ("100"),
36 | PARTITION `p2` VALUES LESS THAN ("200")
37 | )
38 | DISTRIBUTED BY HASH(id) BUCKETS 1
39 | PROPERTIES (
40 | "replication_allocation" = "tag.location.default: 1",
41 | "binlog.enable" = "true",
42 | "binlog.ttl_seconds" = "180"
43 | )
44 | """
45 | helper.ccrJobDelete()
46 | helper.ccrJobCreate()
47 |
48 | assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60))
49 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", { res -> res.size() == 1 }, 60, 'target'))
50 |
51 | for (int i = 0; i < 10; i++) {
52 | sql """ INSERT INTO ${tableName} VALUES (${i}, ${i}) """
53 | }
54 | assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", 10, 60))
55 |
56 | sql """ DELETE FROM ${tableName}
57 | PARTITION `p1`
58 | WHERE test < 5
59 | """
60 |
61 | assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", 5, 60))
62 | }
63 |
--------------------------------------------------------------------------------
/regression-test/suites/db_sync/delete/mow/test_ds_delete_mow.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite('test_ds_delete_mow') {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", 'helper.groovy'))
21 |
22 | def suffix = helper.randomSuffix()
23 | def tableName = 'tbl_' + suffix
24 | helper.enableDbBinlog()
25 | sql """
26 | CREATE TABLE if NOT EXISTS ${tableName}
27 | (
28 | `test` INT,
29 | `id` INT
30 | )
31 | ENGINE=OLAP
32 | UNIQUE KEY(`test`, `id`)
33 | PARTITION BY RANGE(id)
34 | (
35 | PARTITION `p1` VALUES LESS THAN ("100"),
36 | PARTITION `p2` VALUES LESS THAN ("200")
37 | )
38 | DISTRIBUTED BY HASH(id) BUCKETS 1
39 | PROPERTIES (
40 | "replication_allocation" = "tag.location.default: 1",
41 | "binlog.enable" = "true",
42 | "binlog.ttl_seconds" = "180",
43 | "enable_unique_key_merge_on_write" = "true"
44 | )
45 | """
46 | helper.ccrJobDelete()
47 | helper.ccrJobCreate()
48 |
49 | assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60))
50 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", { res -> res.size() == 1 }, 60, 'target'))
51 |
52 | for (int i = 0; i < 10; i++) {
53 | sql """ INSERT INTO ${tableName} VALUES (${i}, ${i}) """
54 | }
55 | assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", 10, 60))
56 |
57 | sql """ DELETE FROM ${tableName}
58 | PARTITION `p1`
59 | WHERE test < 5
60 | """
61 |
62 | assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", 5, 60))
63 | }
64 |
--------------------------------------------------------------------------------
/regression-test/suites/db_sync/prop/auto_bucket/test_ds_prop_auto_bucket.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite("test_ds_prop_auto_bucket") {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy"))
21 |
22 | def dbName = context.dbName
23 | def tableName = "tbl_" + helper.randomSuffix()
24 | def test_num = 0
25 | def insert_num = 5
26 |
27 | def exist = { res -> Boolean
28 | return res.size() != 0
29 | }
30 |
31 | sql "DROP TABLE IF EXISTS ${dbName}.${tableName}"
32 | target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}"
33 |
34 | helper.enableDbBinlog()
35 |
36 | sql """
37 | CREATE TABLE if NOT EXISTS ${tableName}
38 | (
39 | `test` INT,
40 | `id` INT
41 | )
42 | ENGINE=OLAP
43 | AGGREGATE KEY(`test`, `id`)
44 | PARTITION BY RANGE(`id`)
45 | (
46 | )
47 | DISTRIBUTED BY HASH(`id`) BUCKETS AUTO
48 | PROPERTIES (
49 | "replication_allocation" = "tag.location.default: 1",
50 | "binlog.enable" = "true"
51 | )
52 | """
53 |
54 | helper.ccrJobDelete()
55 | helper.ccrJobCreate()
56 |
57 | assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30))
58 |
59 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql"))
60 |
61 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target"))
62 |
63 | def target_res = target_sql "SHOW CREATE TABLE ${tableName}"
64 |
65 | assertTrue(target_res[0][1].contains("DISTRIBUTED BY HASH(`id`) BUCKETS AUTO"))
66 | }
--------------------------------------------------------------------------------
/regression-test/suites/db_sync/prop/auto_compaction/test_ds_prop_auto_compaction.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite("test_ds_prop_auto_compaction") {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy"))
21 |
22 | def dbName = context.dbName
23 | def tableName = "tbl_" + helper.randomSuffix()
24 | def test_num = 0
25 | def insert_num = 5
26 |
27 | def exist = { res -> Boolean
28 | return res.size() != 0
29 | }
30 |
31 | sql "DROP TABLE IF EXISTS ${dbName}.${tableName}"
32 | target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}"
33 |
34 | helper.enableDbBinlog()
35 |
36 | sql """
37 | CREATE TABLE if NOT EXISTS ${tableName}
38 | (
39 | `test` INT,
40 | `id` INT
41 | )
42 | ENGINE=OLAP
43 | AGGREGATE KEY(`test`, `id`)
44 | PARTITION BY RANGE(`id`)
45 | (
46 | )
47 | DISTRIBUTED BY HASH(id) BUCKETS 1
48 | PROPERTIES (
49 | "replication_allocation" = "tag.location.default: 1",
50 | "binlog.enable" = "true",
51 | "disable_auto_compaction" = "false"
52 | )
53 | """
54 |
55 | helper.ccrJobDelete()
56 | helper.ccrJobCreate()
57 |
58 | assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30))
59 |
60 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql"))
61 |
62 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target"))
63 |
64 | def target_res = target_sql "SHOW CREATE TABLE ${tableName}"
65 |
66 | assertTrue(target_res[0][1].contains("\"disable_auto_compaction\" = \"false\""))
67 | }
--------------------------------------------------------------------------------
/regression-test/suites/db_sync/prop/compression/test_ds_prop_compression.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite("test_ds_prop_compression") {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy"))
21 |
22 | def dbName = context.dbName
23 | def tableName = "tbl_" + helper.randomSuffix()
24 |
25 | def exist = { res -> Boolean
26 | return res.size() != 0
27 | }
28 |
29 | sql "DROP TABLE IF EXISTS ${dbName}.${tableName}"
30 | target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}"
31 |
32 | helper.enableDbBinlog()
33 |
34 | sql """
35 | CREATE TABLE if NOT EXISTS ${tableName}
36 | (
37 | `test` INT,
38 | `id` INT
39 | )
40 | ENGINE=OLAP
41 | AGGREGATE KEY(`test`, `id`)
42 | PARTITION BY RANGE(`id`)
43 | (
44 | )
45 | DISTRIBUTED BY HASH(id) BUCKETS 1
46 | PROPERTIES (
47 | "replication_allocation" = "tag.location.default: 1",
48 | "binlog.enable" = "true",
49 | "compression"="zstd"
50 | )
51 | """
52 |
53 | helper.ccrJobDelete()
54 | helper.ccrJobCreate()
55 |
56 | assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30))
57 |
58 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql"))
59 |
60 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target"))
61 |
62 | def target_res = target_sql "SHOW CREATE TABLE ${tableName}"
63 |
64 | assertTrue(target_res[0][1].contains("\"compression\" = \"ZSTD\""))
65 | }
--------------------------------------------------------------------------------
/regression-test/suites/db_sync/prop/index/test_ds_prop_index.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite("test_ds_prop_index") {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy"))
21 |
22 | def dbName = context.dbName
23 | def tableName = "tbl_" + helper.randomSuffix()
24 | def test_num = 0
25 | def insert_num = 5
26 |
27 | def exist = { res -> Boolean
28 | return res.size() != 0
29 | }
30 |
31 | sql "DROP TABLE IF EXISTS ${dbName}.${tableName}"
32 | target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}"
33 |
34 | helper.enableDbBinlog()
35 |
36 | sql """
37 | CREATE TABLE if NOT EXISTS ${tableName}
38 | (
39 | `test` INT,
40 | `id` INT,
41 | INDEX id_idx (id) USING INVERTED COMMENT 'test_id_idx'
42 | )
43 | ENGINE=OLAP
44 | DUPLICATE KEY(`test`, `id`)
45 | PARTITION BY RANGE(`id`)
46 | (
47 | )
48 | DISTRIBUTED BY HASH(id) BUCKETS 1
49 | PROPERTIES (
50 | "replication_allocation" = "tag.location.default: 1",
51 | "binlog.enable" = "true"
52 | )
53 | """
54 |
55 | helper.ccrJobDelete()
56 | helper.ccrJobCreate()
57 |
58 | assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30))
59 |
60 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql"))
61 |
62 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target"))
63 |
64 | def target_res = target_sql "SHOW CREATE TABLE ${tableName}"
65 |
66 | assertTrue(target_res[0][1].contains("INDEX id_idx (`id`) USING INVERTED COMMENT 'test_id_idx'"))
67 | }
--------------------------------------------------------------------------------
/regression-test/suites/db_sync/prop/schema_change/test_ds_prop_schema_change.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite("test_ds_prop_schema_change") {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy"))
21 |
22 | def dbName = context.dbName
23 | def tableName = "tbl_" + helper.randomSuffix()
24 |
25 | def exist = { res -> Boolean
26 | return res.size() != 0
27 | }
28 |
29 | sql "DROP TABLE IF EXISTS ${dbName}.${tableName}"
30 | target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}"
31 |
32 | helper.enableDbBinlog()
33 |
34 | sql """
35 | CREATE TABLE if NOT EXISTS ${tableName}
36 | (
37 | `test` INT,
38 | `id` INT
39 | )
40 | ENGINE=OLAP
41 | AGGREGATE KEY(`test`, `id`)
42 | PARTITION BY RANGE(`id`)
43 | (
44 | )
45 | DISTRIBUTED BY HASH(id) BUCKETS 1
46 | PROPERTIES (
47 | "replication_allocation" = "tag.location.default: 1",
48 | "binlog.enable" = "true",
49 | "light_schema_change" = "true"
50 | )
51 | """
52 |
53 | helper.ccrJobDelete()
54 | helper.ccrJobCreate()
55 |
56 | assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30))
57 |
58 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql"))
59 |
60 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target"))
61 |
62 | def target_res = target_sql "SHOW CREATE TABLE ${tableName}"
63 |
64 | assertTrue(target_res[0][1].contains("\"light_schema_change\" = \"true\""))
65 | }
--------------------------------------------------------------------------------
/regression-test/suites/db_sync/prop/single_compact/test_ds_prop_single_compact.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite("test_ds_prop_single_compact") {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy"))
21 |
22 | def dbName = context.dbName
23 | def tableName = "tbl_" + helper.randomSuffix()
24 | def test_num = 0
25 | def insert_num = 5
26 |
27 | def exist = { res -> Boolean
28 | return res.size() != 0
29 | }
30 |
31 | sql "DROP TABLE IF EXISTS ${dbName}.${tableName}"
32 | target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}"
33 |
34 | helper.enableDbBinlog()
35 |
36 | sql """
37 | CREATE TABLE if NOT EXISTS ${tableName}
38 | (
39 | `test` INT,
40 | `id` INT
41 | )
42 | ENGINE=OLAP
43 | AGGREGATE KEY(`test`, `id`)
44 | PARTITION BY RANGE(`id`)
45 | (
46 | )
47 | DISTRIBUTED BY HASH(id) BUCKETS 1
48 | PROPERTIES (
49 | "replication_allocation" = "tag.location.default: 1",
50 | "binlog.enable" = "true",
51 | "enable_single_replica_compaction" = "true"
52 | )
53 | """
54 |
55 | helper.ccrJobDelete()
56 | helper.ccrJobCreate()
57 |
58 | assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30))
59 |
60 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql"))
61 |
62 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target"))
63 |
64 | def target_res = target_sql "SHOW CREATE TABLE ${tableName}"
65 |
66 | assertTrue(target_res[0][1].contains("\"enable_single_replica_compaction\" = \"true\""))
67 | }
--------------------------------------------------------------------------------
/regression-test/suites/db_sync/prop/storage_medium/test_ds_prop_storage_medium.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite("test_ds_prop_storage_medium") {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy"))
21 |
22 | def dbName = context.dbName
23 | def tableName = "tbl_" + helper.randomSuffix()
24 | def test_num = 0
25 | def insert_num = 5
26 |
27 | def exist = { res -> Boolean
28 | return res.size() != 0
29 | }
30 |
31 | sql "DROP TABLE IF EXISTS ${dbName}.${tableName}"
32 | target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}"
33 |
34 | helper.enableDbBinlog()
35 |
36 | sql """
37 | CREATE TABLE if NOT EXISTS ${tableName}
38 | (
39 | `test` INT,
40 | `id` INT
41 | )
42 | ENGINE=OLAP
43 | AGGREGATE KEY(`test`, `id`)
44 | PARTITION BY RANGE(`id`)
45 | (
46 | )
47 | DISTRIBUTED BY HASH(id) BUCKETS 1
48 | PROPERTIES (
49 | "replication_allocation" = "tag.location.default: 1",
50 | "binlog.enable" = "true",
51 | "storage_medium" = "SSD"
52 | )
53 | """
54 |
55 | helper.ccrJobDelete()
56 | helper.ccrJobCreate()
57 |
58 | assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30))
59 |
60 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql"))
61 |
62 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target"))
63 |
64 | def target_res = target_sql "SHOW CREATE TABLE ${tableName}"
65 |
66 | assertTrue(target_res[0][1].contains("\"storage_medium\" = \"ssd\""))
67 | }
--------------------------------------------------------------------------------
/regression-test/suites/db_sync/table/drop_create/test_ds_tbl_drop_create.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite("test_ds_tbl_drop_create") {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy"))
21 |
22 | // TBD
23 | }
24 |
--------------------------------------------------------------------------------
/regression-test/suites/db_sync/table/duplicate/test_ds_tbl_duplicate.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite("test_ds_tbl_duplicate") {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy"))
21 |
22 | def dbName = context.dbName
23 | def tableName = "tbl_" + helper.randomSuffix()
24 | def test_num = 0
25 | def insert_num = 5
26 |
27 | def exist = { res -> Boolean
28 | return res.size() != 0
29 | }
30 |
31 | sql "DROP TABLE IF EXISTS ${dbName}.${tableName}"
32 | target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}"
33 |
34 | helper.enableDbBinlog()
35 |
36 | sql """
37 | CREATE TABLE if NOT EXISTS ${tableName}
38 | (
39 | `test` INT,
40 | `id` INT
41 | )
42 | ENGINE=OLAP
43 | DUPLICATE KEY(`test`, `id`)
44 | PARTITION BY RANGE(`id`)
45 | (
46 | )
47 | DISTRIBUTED BY HASH(id) BUCKETS 1
48 | PROPERTIES (
49 | "replication_allocation" = "tag.location.default: 1",
50 | "binlog.enable" = "true"
51 | )
52 | """
53 |
54 | helper.ccrJobDelete()
55 | helper.ccrJobCreate()
56 |
57 | assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30))
58 |
59 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql"))
60 |
61 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target"))
62 |
63 | def sql_res = sql "SHOW CREATE TABLE ${tableName}"
64 |
65 | def target_res = target_sql "SHOW CREATE TABLE ${tableName}"
66 |
67 | assertTrue(target_res[0][1].contains("DUPLICATE KEY(`test`, `id`)"))
68 | }
69 |
--------------------------------------------------------------------------------
/regression-test/suites/db_sync/view/modify_comment/test_ds_view_modify_comment.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite("test_ds_view_modify_comment") {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy"))
21 |
22 | if (!helper.is_version_supported([30099, 20199, 20099])) {
23 | def version = helper.upstream_version()
24 | logger.info("skip this suite because version is not supported, upstream version ${version}")
25 | return
26 | }
27 |
28 | def viewName = "test_ds_view_modify_comment_view"
29 |
30 | def exist = { res -> Boolean
31 | return res.size() != 0
32 | }
33 | def notExist = { res -> Boolean
34 | return res.size() == 0
35 | }
36 |
37 | sql """DROP VIEW IF EXISTS ${viewName}"""
38 | target_sql """DROP VIEW IF EXISTS ${viewName}"""
39 |
40 | sql """
41 | create view ${viewName} as select 1,to_base64(AES_ENCRYPT('doris','doris'));
42 | """
43 |
44 | helper.enableDbBinlog()
45 |
46 | helper.ccrJobDelete()
47 | helper.ccrJobCreate()
48 |
49 | assertTrue(helper.checkRestoreFinishTimesOf("${viewName}", 30))
50 |
51 | assertTrue(helper.checkShowTimesOf("SHOW VIEWS LIKE \"${viewName}\"", exist, 30, "target_sql"))
52 |
53 | sql "ALTER VIEW ${viewName} MODIFY COMMENT \"doris_modify_view_comment\""
54 |
55 | assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${viewName}", { r -> r[0][1].contains("COMMENT 'doris_modify_view_comment'")}, 30, "target_sql"))
56 |
57 | sql "ALTER VIEW ${viewName} MODIFY COMMENT \"\""
58 |
59 | assertTrue(helper.checkShowTimesOf("SHOW CREATE TABLE ${viewName}", { r -> !r[0][1].contains("COMMENT")}, 30, "target_sql"))
60 | }
61 |
--------------------------------------------------------------------------------
/regression-test/suites/table_sync/alt_prop/light_schema_change/test_ts_alt_prop_light_schema_change.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite("test_ts_alt_prop_light_schema_change") {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy"))
21 |
22 | def dbName = context.dbName
23 | def tableName = "tbl_" + helper.randomSuffix()
24 |
25 | def exist = { res -> Boolean
26 | return res.size() != 0
27 | }
28 |
29 | sql "DROP TABLE IF EXISTS ${dbName}.${tableName}"
30 | target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}"
31 |
32 | helper.enableDbBinlog()
33 |
34 | sql """
35 | CREATE TABLE if NOT EXISTS ${tableName}
36 | (
37 | `test` INT,
38 | `id` INT
39 | )
40 | ENGINE=OLAP
41 | AGGREGATE KEY(`test`, `id`)
42 | PARTITION BY RANGE(`id`)
43 | (
44 | )
45 | DISTRIBUTED BY HASH(id) BUCKETS 1
46 | PROPERTIES (
47 | "replication_allocation" = "tag.location.default: 1",
48 | "binlog.enable" = "true",
49 | "light_schema_change" = "false"
50 | )
51 | """
52 | helper.ccrJobDelete(tableName)
53 |
54 | try {
55 | // table property only support ` "light_schema_change" = "true" `
56 | helper.ccrJobCreate(tableName)
57 | } catch (Exception e) {
58 | println "create job error: ${e.message}"
59 | }
60 | }
--------------------------------------------------------------------------------
/regression-test/suites/table_sync/delete/mor/test_ts_delete_mor.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite('test_ts_delete_mor') {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", 'helper.groovy'))
21 |
22 | def suffix = helper.randomSuffix()
23 | def tableName = 'tbl_' + suffix
24 | helper.enableDbBinlog()
25 | sql """
26 | CREATE TABLE if NOT EXISTS ${tableName}
27 | (
28 | `test` INT,
29 | `id` INT
30 | )
31 | ENGINE=OLAP
32 | UNIQUE KEY(`test`, `id`)
33 | PARTITION BY RANGE(id)
34 | (
35 | PARTITION `p1` VALUES LESS THAN ("100"),
36 | PARTITION `p2` VALUES LESS THAN ("200")
37 | )
38 | DISTRIBUTED BY HASH(id) BUCKETS 1
39 | PROPERTIES (
40 | "replication_allocation" = "tag.location.default: 1",
41 | "binlog.enable" = "true",
42 | "binlog.ttl_seconds" = "180"
43 | )
44 | """
45 | helper.ccrJobDelete(tableName)
46 | helper.ccrJobCreate(tableName)
47 |
48 | assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 60))
49 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", { res -> res.size() == 1 }, 60, 'target'))
50 |
51 | for (int i = 0; i < 10; i++) {
52 | sql """ INSERT INTO ${tableName} VALUES (${i}, ${i}) """
53 | }
54 | assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", 10, 60))
55 |
56 | sql """ DELETE FROM ${tableName}
57 | PARTITION `p1`
58 | WHERE test < 5
59 | """
60 |
61 | assertTrue(helper.checkSelectTimesOf("SELECT * FROM ${tableName}", 5, 60))
62 | }
63 |
--------------------------------------------------------------------------------
/regression-test/suites/table_sync/prop/auto_bucket/test_ts_prop_auto_bucket.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite("test_ts_prop_res_auto_bucket") {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy"))
21 |
22 | def dbName = context.dbName
23 | def tableName = "tbl_" + helper.randomSuffix()
24 | def test_num = 0
25 | def insert_num = 5
26 |
27 | def exist = { res -> Boolean
28 | return res.size() != 0
29 | }
30 |
31 | sql "DROP TABLE IF EXISTS ${dbName}.${tableName}"
32 | target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}"
33 |
34 | helper.enableDbBinlog()
35 |
36 | sql """
37 | CREATE TABLE if NOT EXISTS ${tableName}
38 | (
39 | `test` INT,
40 | `id` INT
41 | )
42 | ENGINE=OLAP
43 | AGGREGATE KEY(`test`, `id`)
44 | PARTITION BY RANGE(`id`)
45 | (
46 | )
47 | DISTRIBUTED BY HASH(`id`) BUCKETS AUTO
48 | PROPERTIES (
49 | "replication_allocation" = "tag.location.default: 1",
50 | "binlog.enable" = "true"
51 | )
52 | """
53 |
54 | helper.ccrJobDelete(tableName)
55 | helper.ccrJobCreate(tableName)
56 |
57 | assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30))
58 |
59 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql"))
60 |
61 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target"))
62 |
63 | def target_res = target_sql "SHOW CREATE TABLE ${tableName}"
64 |
65 | assertTrue(target_res[0][1].contains("DISTRIBUTED BY HASH(`id`) BUCKETS AUTO"))
66 | }
--------------------------------------------------------------------------------
/regression-test/suites/table_sync/prop/compression/test_ts_prop_compression.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite("test_ts_prop_compression") {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy"))
21 |
22 | def dbName = context.dbName
23 | def tableName = "tbl_" + helper.randomSuffix()
24 |
25 | def exist = { res -> Boolean
26 | return res.size() != 0
27 | }
28 |
29 | sql "DROP TABLE IF EXISTS ${dbName}.${tableName}"
30 | target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}"
31 |
32 | helper.enableDbBinlog()
33 |
34 | sql """
35 | CREATE TABLE if NOT EXISTS ${tableName}
36 | (
37 | `test` INT,
38 | `id` INT
39 | )
40 | ENGINE=OLAP
41 | AGGREGATE KEY(`test`, `id`)
42 | PARTITION BY RANGE(`id`)
43 | (
44 | )
45 | DISTRIBUTED BY HASH(id) BUCKETS 1
46 | PROPERTIES (
47 | "replication_allocation" = "tag.location.default: 1",
48 | "binlog.enable" = "true",
49 | "compression"="zstd"
50 | )
51 | """
52 |
53 | helper.ccrJobDelete(tableName)
54 | helper.ccrJobCreate(tableName)
55 |
56 | assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30))
57 |
58 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql"))
59 |
60 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target"))
61 |
62 | def target_res = target_sql "SHOW CREATE TABLE ${tableName}"
63 |
64 | assertTrue(target_res[0][1].contains("\"compression\" = \"ZSTD\""))
65 | }
--------------------------------------------------------------------------------
/regression-test/suites/table_sync/prop/storage_medium/test_ts_prop_storage_medium.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite("test_ts_prop_storage_medium") {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy"))
21 |
22 | def dbName = context.dbName
23 | def tableName = "tbl_" + helper.randomSuffix()
24 | def test_num = 0
25 | def insert_num = 5
26 |
27 | def exist = { res -> Boolean
28 | return res.size() != 0
29 | }
30 |
31 | sql "DROP TABLE IF EXISTS ${dbName}.${tableName}"
32 | target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}"
33 |
34 | helper.enableDbBinlog()
35 |
36 | sql """
37 | CREATE TABLE if NOT EXISTS ${tableName}
38 | (
39 | `test` INT,
40 | `id` INT
41 | )
42 | ENGINE=OLAP
43 | AGGREGATE KEY(`test`, `id`)
44 | PARTITION BY RANGE(`id`)
45 | (
46 | )
47 | DISTRIBUTED BY HASH(id) BUCKETS 1
48 | PROPERTIES (
49 | "replication_allocation" = "tag.location.default: 1",
50 | "binlog.enable" = "true",
51 | "storage_medium" = "SSD"
52 | )
53 | """
54 |
55 | helper.ccrJobDelete(tableName)
56 | helper.ccrJobCreate(tableName)
57 |
58 | assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30))
59 |
60 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql"))
61 |
62 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target"))
63 |
64 | def target_res = target_sql "SHOW CREATE TABLE ${tableName}"
65 |
66 | assertTrue(target_res[0][1].contains("\"storage_medium\" = \"ssd\""))
67 | }
--------------------------------------------------------------------------------
/regression-test/suites/table_sync/table/duplicate/test_ts_tbl_duplicate.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite("test_ts_tbl_duplicate") {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy"))
21 |
22 | def dbName = context.dbName
23 | def tableName = "tbl_" + helper.randomSuffix()
24 | def test_num = 0
25 | def insert_num = 5
26 |
27 | def exist = { res -> Boolean
28 | return res.size() != 0
29 | }
30 |
31 | sql "DROP TABLE IF EXISTS ${dbName}.${tableName}"
32 | target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}"
33 |
34 | helper.enableDbBinlog()
35 |
36 | sql """
37 | CREATE TABLE if NOT EXISTS ${tableName}
38 | (
39 | `test` INT,
40 | `id` INT
41 | )
42 | ENGINE=OLAP
43 | DUPLICATE KEY(`test`, `id`)
44 | PARTITION BY RANGE(`id`)
45 | (
46 | )
47 | DISTRIBUTED BY HASH(id) BUCKETS 1
48 | PROPERTIES (
49 | "replication_allocation" = "tag.location.default: 1",
50 | "binlog.enable" = "true"
51 | )
52 | """
53 |
54 | helper.ccrJobDelete(tableName)
55 | helper.ccrJobCreate(tableName)
56 |
57 | assertTrue(helper.checkRestoreFinishTimesOf("${tableName}", 30))
58 |
59 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "sql"))
60 |
61 | assertTrue(helper.checkShowTimesOf("SHOW TABLES LIKE \"${tableName}\"", exist, 60, "target"))
62 |
63 | def sql_res = sql "SHOW CREATE TABLE ${tableName}"
64 |
65 | def target_res = target_sql "SHOW CREATE TABLE ${tableName}"
66 |
67 | assertTrue(target_res[0][1].contains("DUPLICATE KEY(`test`, `id`)"))
68 | }
69 |
--------------------------------------------------------------------------------
/regression-test/suites/table_sync_alias/alt_prop/light_sc/test_tsa_alt_prop_light_schema_change.groovy:
--------------------------------------------------------------------------------
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | suite("test_tsa_alt_prop_light_schema_change") {
19 | def helper = new GroovyShell(new Binding(['suite': delegate]))
20 | .evaluate(new File("${context.config.suitePath}/../common", "helper.groovy"))
21 |
22 | def dbName = context.dbName
23 | def tableName = "tbl_" + helper.randomSuffix()
24 | def aliasTableName = "tbl_alias_" + helper.randomSuffix()
25 | helper.set_alias(aliasTableName)
26 |
27 | def exist = { res -> Boolean
28 | return res.size() != 0
29 | }
30 |
31 | sql "DROP TABLE IF EXISTS ${dbName}.${tableName}"
32 | target_sql "DROP TABLE IF EXISTS TEST_${dbName}.${tableName}"
33 |
34 | helper.enableDbBinlog()
35 |
36 | sql """
37 | CREATE TABLE if NOT EXISTS ${tableName}
38 | (
39 | `test` INT,
40 | `id` INT
41 | )
42 | ENGINE=OLAP
43 | AGGREGATE KEY(`test`, `id`)
44 | PARTITION BY RANGE(`id`)
45 | (
46 | )
47 | DISTRIBUTED BY HASH(id) BUCKETS 1
48 | PROPERTIES (
49 | "replication_allocation" = "tag.location.default: 1",
50 | "binlog.enable" = "true",
51 | "light_schema_change" = "false"
52 | )
53 | """
54 | helper.ccrJobDelete(tableName)
55 |
56 | try {
57 | // table property only support ` "light_schema_change" = "true" `
58 | helper.ccrJobCreate(tableName)
59 | } catch (Exception e) {
60 | println "create job error: ${e.message}"
61 | }
62 | }
--------------------------------------------------------------------------------
/shell/db.conf:
--------------------------------------------------------------------------------
1 | # The key value settings here are used to overwrite the flags passed to the shell
2 | db_type=sqlite3
3 | db_host=127.0.0.1
4 | db_port=3306
5 | db_user=
6 | db_password=
7 | db_name=ccr
8 |
--------------------------------------------------------------------------------