├── .bazelrc ├── .bazelversion ├── .github ├── CODE_OF_CONDUCT.md ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── custom.md │ └── feature_request.md └── workflows │ ├── bazel_debug.yml │ ├── cmake.yml │ ├── cmake_debug.yml │ ├── docker-preview.yml │ ├── nightly.yml │ └── release.yml ├── .gitignore ├── BUILD ├── CMakeLists.txt ├── LICENSE ├── README.md ├── WORKSPACE ├── baikal-client ├── CMakeLists.txt ├── include │ ├── baikal_client.h │ ├── baikal_client_bns_connection_pool.h │ ├── baikal_client_connection.h │ ├── baikal_client_define.h │ ├── baikal_client_epoll.h │ ├── baikal_client_instance.h │ ├── baikal_client_logic_db.h │ ├── baikal_client_manager.h │ ├── baikal_client_mysql_async.h │ ├── baikal_client_mysql_connection.h │ ├── baikal_client_result_set.h │ ├── baikal_client_row.h │ ├── baikal_client_service.h │ ├── baikal_client_util.h │ ├── global.h │ └── shard_operator_mgr.h └── src │ ├── baikal_client_bns_connection_pool.cpp │ ├── baikal_client_connection.cpp │ ├── baikal_client_epoll.cpp │ ├── baikal_client_instance.cpp │ ├── baikal_client_logic_db.cpp │ ├── baikal_client_manager.cpp │ ├── baikal_client_mysql_async.cpp │ ├── baikal_client_mysql_connection.cpp │ ├── baikal_client_result_set.cpp │ ├── baikal_client_row.cpp │ ├── baikal_client_service.cpp │ ├── baikal_client_util.cpp │ ├── global.cpp │ └── shard_operator_mgr.cpp ├── bazel └── baikaldb.bzl ├── ci ├── package.sh └── upload-release-asset.sh ├── cmake ├── FindGperftools.cmake ├── arrow.cmake ├── boost.cmake ├── braft.cmake ├── brpc.cmake ├── bz2.cmake ├── croaring.cmake ├── faiss.cmake ├── gflags.cmake ├── glog.cmake ├── gperftools.cmake ├── leveldb.cmake ├── liburing.cmake ├── lz4.cmake ├── mariadb.cmake ├── openblas.cmake ├── protobuf.cmake ├── rapidjson.cmake ├── re2.cmake ├── rocksdb.cmake ├── snappy.cmake ├── zlib.cmake └── zstd.cmake ├── conf ├── baikalMeta │ └── gflags.conf ├── baikalStore │ ├── gflags.conf │ ├── punctuation.dic │ ├── q2b_gbk.dic │ └── q2b_utf8.dic ├── baikaldb │ └── gflags.conf └── gflags.conf ├── include ├── common │ ├── backup_stream.h │ ├── baikal_heartbeat.h │ ├── cmsketch.h │ ├── common.h │ ├── concurrency.h │ ├── datetime.h │ ├── expr_value.h │ ├── histogram.h │ ├── hll_common.h │ ├── information_schema.h │ ├── item_batch.hpp │ ├── key_encoder.h │ ├── log.h │ ├── lru_cache.h │ ├── lru_cache.hpp │ ├── memory_profile.h │ ├── message_helper.h │ ├── meta_server_interact.hpp │ ├── mut_table_key.h │ ├── object_manager.h │ ├── password.h │ ├── proto_process.hpp │ ├── range.h │ ├── schema_factory.h │ ├── statistics.h │ ├── store_interact.hpp │ ├── table_key.h │ ├── table_record.h │ ├── task_fetcher.h │ ├── task_fetcher.hpp │ ├── tdigest.h │ ├── tuple_record.h │ └── type_utils.h ├── engine │ ├── external_filesystem.h │ ├── my_listener.h │ ├── my_rocksdb.h │ ├── qos.h │ ├── rocks_wrapper.h │ ├── rocksdb_filesystem.h │ ├── rocksdb_merge_operator.h │ ├── split_compaction_filter.h │ ├── sst_file_writer.h │ ├── table_iterator.h │ ├── transaction.h │ ├── transaction_db_bthread_mutex.h │ └── transaction_pool.h ├── exec │ ├── access_path.h │ ├── agg_node.h │ ├── apply_node.h │ ├── begin_manager_node.h │ ├── commit_manager_node.h │ ├── common_manager_node.h │ ├── delete_manager_node.h │ ├── delete_node.h │ ├── dml_manager_node.h │ ├── dml_node.h │ ├── dual_scan_node.h │ ├── exec_node.h │ ├── fetcher_store.h │ ├── filter_node.h │ ├── full_export_node.h │ ├── index_ddl_manager_node.h │ ├── information_schema_scan_node.h │ ├── insert_manager_node.h │ ├── insert_node.h │ ├── join_node.h │ ├── joiner.h │ ├── kill_manager_node.h │ ├── kill_node.h │ ├── limit_node.h │ ├── load_node.h │ ├── lock_primary_node.h │ ├── lock_secondary_node.h │ ├── packet_node.h │ ├── property.h │ ├── redis_scan_node.h │ ├── rocksdb_scan_node.h │ ├── rollback_manager_node.h │ ├── scan_node.h │ ├── select_manager_node.h │ ├── single_txn_manager_node.h │ ├── sort_node.h │ ├── transaction_manager_node.h │ ├── transaction_node.h │ ├── truncate_manager_node.h │ ├── truncate_node.h │ ├── union_node.h │ ├── update_manager_node.h │ ├── update_node.h │ └── vectorize_helpper.h ├── expr │ ├── agg_fn_call.h │ ├── arrow_function.h │ ├── expr_node.h │ ├── fn_manager.h │ ├── internal_functions.h │ ├── literal.h │ ├── operators.h │ ├── predicate.h │ ├── row_expr.h │ ├── scalar_fn_call.h │ └── slot_ref.h ├── logical_plan │ ├── ddl_planner.h │ ├── ddl_work_planner.h │ ├── delete_planner.h │ ├── insert_planner.h │ ├── kill_planner.h │ ├── load_planner.h │ ├── logical_planner.h │ ├── prepare_planner.h │ ├── query_context.h │ ├── select_planner.h │ ├── setkv_planner.h │ ├── transaction_planner.h │ ├── union_planner.h │ └── update_planner.h ├── mem_row │ ├── mem_row.h │ ├── mem_row_compare.h │ └── mem_row_descriptor.h ├── meta_server │ ├── auto_incr_state_machine.h │ ├── cluster_manager.h │ ├── common_state_machine.h │ ├── database_manager.h │ ├── ddl_manager.h │ ├── meta_rocksdb.h │ ├── meta_server.h │ ├── meta_state_machine.h │ ├── meta_util.h │ ├── namespace_manager.h │ ├── privilege_manager.h │ ├── query_cluster_manager.h │ ├── query_database_manager.h │ ├── query_namespace_manager.h │ ├── query_privilege_manager.h │ ├── query_region_manager.h │ ├── query_table_manager.h │ ├── region_manager.h │ ├── schema_manager.h │ ├── table_manager.h │ └── tso_state_machine.h ├── physical_plan │ ├── auto_inc.h │ ├── decorrelate.h │ ├── expr_optimizer.h │ ├── index_selector.h │ ├── join_reorder.h │ ├── limit_calc.h │ ├── physical_planner.h │ ├── plan_router.h │ ├── predicate_pushdown.h │ └── separate.h ├── protocol │ ├── data_buffer.h │ ├── epoll_info.h │ ├── handle_helper.h │ ├── machine_driver.h │ ├── mysql_err_code.h │ ├── mysql_err_handler.h │ ├── mysql_wrapper.h │ ├── network_server.h │ ├── show_helper.h │ ├── state_machine.h │ └── task_manager.h ├── raft │ ├── can_add_peer_setter.h │ ├── index_term_map.h │ ├── log_entry_reader.h │ ├── my_raft_log.h │ ├── my_raft_log_storage.h │ ├── my_raft_meta_storage.h │ ├── raft_control.h │ ├── raft_log_compaction_filter.h │ ├── rocksdb_file_system_adaptor.h │ ├── split_index_getter.h │ └── update_region_status.h ├── reverse │ ├── boolean_engine │ │ ├── boolean_executor.h │ │ ├── boolean_executor.hpp │ │ ├── logical_query.h │ │ └── logical_query.hpp │ ├── reverse_arrow.h │ ├── reverse_common.h │ ├── reverse_common.hpp │ ├── reverse_index.h │ ├── reverse_index.hpp │ ├── reverse_interface.h │ └── reverse_interface.hpp ├── runtime │ ├── arrow_io_excutor.h │ ├── chunk.h │ ├── row_batch.h │ ├── runtime_state.h │ ├── runtime_state_pool.h │ ├── sorter.h │ ├── topn_sorter.h │ └── trace_state.h ├── session │ ├── binlog_context.h │ ├── network_socket.h │ └── user_info.h ├── sqlparser │ ├── base.h │ ├── ddl.h │ ├── dml.h │ ├── expr.cc │ ├── expr.h │ ├── gen_source.sh │ ├── misc.h │ ├── parser.cc │ ├── parser.h │ ├── sql_lex.l │ ├── sql_parse.y │ └── utils.h ├── store │ ├── backup.h │ ├── closure.h │ ├── meta_writer.h │ ├── region.h │ ├── region_control.h │ ├── rpc_sender.h │ └── store.h └── vector_index │ └── vector_index.h ├── insider-preview ├── Dockerfile.preview ├── docker-compose.yml └── entrypoint.sh ├── licenses ├── QL-LICENSE └── REDIS-LICENSE ├── proto ├── binlog.proto ├── common.proto ├── console.proto ├── dms.proto ├── expr.proto ├── fc.proto ├── meta.interface.proto ├── olap.proto ├── optype.proto ├── plan.proto ├── raft.proto ├── reverse.proto ├── statistics.proto ├── store.interface.proto └── test_decode.proto ├── qrcode.jpeg ├── src ├── common │ ├── backup_stream.cpp │ ├── baikal_heartbeat.cpp │ ├── common.cpp │ ├── datetime.cpp │ ├── default_room_define.cpp │ ├── expr_value.cpp │ ├── heartbeat_interval_define.cpp │ ├── histogram.cpp │ ├── hll_common.cpp │ ├── information_schema.cpp │ ├── memory_profile.cpp │ ├── meta_server_interact.cpp │ ├── mut_table_key.cpp │ ├── password.cpp │ ├── proto_process.cpp │ ├── schema_factory.cpp │ ├── store_interact.cpp │ ├── table_key.cpp │ ├── table_record.cpp │ ├── tdigest.cpp │ └── tuple_record.cpp ├── engine │ ├── external_filesystem.cpp │ ├── my_rocksdb.cpp │ ├── qos.cpp │ ├── rocks_wrapper.cpp │ ├── rocksdb_filesystem.cpp │ ├── rocksdb_merge_operator.cpp │ ├── table_iterator.cpp │ ├── transaction.cpp │ ├── transaction_db_bthread_mutex.cpp │ └── transaction_pool.cpp ├── exec │ ├── access_path.cpp │ ├── agg_node.cpp │ ├── apply_node.cpp │ ├── delete_manager_node.cpp │ ├── delete_node.cpp │ ├── dml_manager_node.cpp │ ├── dml_node.cpp │ ├── dual_scan_node.cpp │ ├── exec_node.cpp │ ├── fetcher_store.cpp │ ├── filter_node.cpp │ ├── full_export_node.cpp │ ├── index_ddl_manager_node.cpp │ ├── insert_manager_node.cpp │ ├── insert_node.cpp │ ├── join_node.cpp │ ├── joiner.cpp │ ├── kill_node.cpp │ ├── limit_node.cpp │ ├── load_node.cpp │ ├── lock_primary_node.cpp │ ├── lock_secondary_node.cpp │ ├── packet_node.cpp │ ├── redis_scan_node.cpp │ ├── rocksdb_scan_node.cpp │ ├── scan_node.cpp │ ├── select_manager_node.cpp │ ├── single_txn_manager_node.cpp │ ├── sort_node.cpp │ ├── transaction_manager_node.cpp │ ├── transaction_node.cpp │ ├── truncate_node.cpp │ ├── union_node.cpp │ ├── update_manager_node.cpp │ └── update_node.cpp ├── expr │ ├── agg_fn_call.cpp │ ├── arrow_function.cpp │ ├── expr_node.cpp │ ├── fn_manager.cpp │ ├── internal_functions.cpp │ ├── operators.cpp │ ├── predicate.cpp │ └── scalar_fn_call.cpp ├── logical_plan │ ├── ddl_planner.cpp │ ├── ddl_work_planner.cpp │ ├── delete_planner.cpp │ ├── insert_planner.cpp │ ├── kill_planner.cpp │ ├── load_planner.cpp │ ├── logical_planner.cpp │ ├── prepare_planner.cpp │ ├── query_context.cpp │ ├── select_planner.cpp │ ├── setkv_planner.cpp │ ├── transaction_planner.cpp │ ├── union_planner.cpp │ └── update_planner.cpp ├── mem_row │ ├── mem_row.cpp │ ├── mem_row_compare.cpp │ └── mem_row_descriptor.cpp ├── meta_server │ ├── auto_incr_state_machine.cpp │ ├── cluster_manager.cpp │ ├── common_state_machine.cpp │ ├── database_manager.cpp │ ├── ddl_manager.cpp │ ├── main.cpp │ ├── meta_rocksdb.cpp │ ├── meta_server.cpp │ ├── meta_state_machine.cpp │ ├── meta_util.cpp │ ├── namespace_manager.cpp │ ├── privilege_manager.cpp │ ├── query_cluster_manager.cpp │ ├── query_database_manager.cpp │ ├── query_namespace_manager.cpp │ ├── query_privilege_manager.cpp │ ├── query_region_manager.cpp │ ├── query_table_manager.cpp │ ├── region_manager.cpp │ ├── schema_manager.cpp │ ├── table_manager.cpp │ └── tso_state_machine.cpp ├── physical_plan │ ├── auto_inc.cpp │ ├── expr_optimizer.cpp │ ├── index_selector.cpp │ ├── join_reorder.cpp │ ├── limit_calc.cpp │ ├── physical_planner.cpp │ ├── plan_router.cpp │ └── separate.cpp ├── protocol │ ├── data_buffer.cpp │ ├── epoll_info.cpp │ ├── handle_helper.cpp │ ├── machine_driver.cpp │ ├── main.cpp │ ├── mysql_err_handler.cpp │ ├── mysql_wrapper.cpp │ ├── network_server.cpp │ ├── show_helper.cpp │ ├── state_machine.cpp │ └── task_manager.cpp ├── raft │ ├── log_entry_reader.cpp │ ├── my_raft_log.cpp │ ├── my_raft_log_storage.cpp │ ├── my_raft_meta_storage.cpp │ ├── raft_control.cpp │ └── rocksdb_file_system_adaptor.cpp ├── raft_dummy │ └── dummy_setter.cpp ├── raft_meta │ ├── can_add_peer_setter.cpp │ ├── split_index_getter.cpp │ └── update_region_status.cpp ├── raft_store │ ├── can_add_peer_setter.cpp │ ├── split_index_getter.cpp │ └── update_region_status.cpp ├── reverse │ └── reverse_common.cpp ├── runtime │ ├── arrow_io_excutor.cpp │ ├── chunk.cpp │ ├── runtime_state.cpp │ ├── sorter.cpp │ └── topn_sorter.cpp ├── session │ ├── binlog_context.cpp │ ├── network_socket.cpp │ └── user_info.cpp ├── store │ ├── backup.cpp │ ├── closure.cpp │ ├── main.cpp │ ├── meta_writer.cpp │ ├── region.cpp │ ├── region_binlog.cpp │ ├── region_control.cpp │ ├── region_olap.cpp │ ├── rpc_sender.cpp │ └── store.cpp ├── tools │ ├── backup_import.cpp │ ├── backup_import.h │ ├── backup_tool.cpp │ ├── backup_tool.h │ ├── baikal_capturer.cpp │ ├── baikal_capturer.h │ ├── capture_tool.cpp │ ├── check_table_region.cpp │ ├── create_table.cpp │ ├── create_tmp_table_from_meta_server.cpp │ ├── dm_rocks_wrapper.cpp │ ├── dm_rocks_wrapper.h │ ├── fast_importer.cpp │ ├── fast_importer.h │ ├── fetcher_tool.cpp │ ├── fetcher_tool.h │ ├── importer.cpp │ ├── importer_filesysterm.cpp │ ├── importer_filesysterm.h │ ├── importer_handle.cpp │ ├── importer_handle.h │ ├── main.cpp │ ├── meta_query.cpp │ ├── script │ │ ├── add_auto_increment_id.sh │ │ ├── add_field.sh │ │ ├── add_instance.sh │ │ ├── add_privilege.sh │ │ ├── backup │ │ │ ├── add_field.sh │ │ │ ├── add_instance.sh │ │ │ ├── batch_remove_region.sh │ │ │ ├── create_auto_increment_table.sh │ │ │ ├── create_database.sh │ │ │ ├── create_table.sh │ │ │ ├── download_conf.sh │ │ │ ├── download_schedule.sh │ │ │ ├── drop_database.sh │ │ │ ├── drop_field.sh │ │ │ ├── drop_namespace.sh │ │ │ ├── drop_table.sh │ │ │ ├── gen_done_file.sh │ │ │ ├── meta_query_region_ids.sh │ │ │ ├── meta_query_region_peers_status.sh │ │ │ ├── modify_index_status.sh │ │ │ ├── query_diff_region_ids.sh │ │ │ ├── query_faulty_instance.py │ │ │ ├── rename_field.sh │ │ │ ├── rename_table.sh │ │ │ ├── restore_table.sh │ │ │ ├── show_processlist.sh │ │ │ ├── store_query_illegal_region.sh │ │ │ ├── update_byte_size.sh │ │ │ └── update_instance.sh │ │ ├── batch_remove_region.sh │ │ ├── console_shell │ │ │ ├── create_instance_table.sh │ │ │ ├── create_meta_info.sh │ │ │ ├── create_overview_table.sh │ │ │ ├── create_region_table.sh │ │ │ ├── create_tableinfo_table.sh │ │ │ ├── create_user_table.sh │ │ │ └── watch_meta_query.sh │ │ ├── create_auto_increment_table.sh │ │ ├── create_database.sh │ │ ├── create_instance_table.sh │ │ ├── create_internal_table.sh │ │ ├── create_meta_info.sh │ │ ├── create_namespace.sh │ │ ├── create_overview_table.sh │ │ ├── create_region_table.sh │ │ ├── create_table.sh │ │ ├── create_tableinfo_table.sh │ │ ├── create_user.sh │ │ ├── create_user_table.sh │ │ ├── del_ddlwork.sh │ │ ├── download_conf.sh │ │ ├── download_schedule.sh │ │ ├── drop_database.sh │ │ ├── drop_field.sh │ │ ├── drop_index.sh │ │ ├── drop_instance.sh │ │ ├── drop_namespace.sh │ │ ├── drop_region.sh │ │ ├── drop_table.sh │ │ ├── gen_done_file.sh │ │ ├── get_applied_index.sh │ │ ├── init_meta_server.sh │ │ ├── meta_query.sh │ │ ├── meta_query_region_ids.sh │ │ ├── meta_query_region_peers_status.sh │ │ ├── meta_raft_control.sh │ │ ├── meta_region_recovery.sh │ │ ├── modify_ddlwork.sh │ │ ├── modify_index_status.sh │ │ ├── modify_resource_tag.sh │ │ ├── op_close_load_balance.sh │ │ ├── op_flash_back.sh │ │ ├── op_open_load_balance.sh │ │ ├── op_unsafe_decision.sh │ │ ├── query_ddlwork.sh │ │ ├── query_diff_region_ids.sh │ │ ├── query_faulty_instance.py │ │ ├── raft_control.sh │ │ ├── remove_privilege.sh │ │ ├── remove_region.sh │ │ ├── rename_field.sh │ │ ├── rename_table.sh │ │ ├── restore_region.sh │ │ ├── restore_table.sh │ │ ├── rollback_txn.sh │ │ ├── send_no_op.sh │ │ ├── set_instance_dead.sh │ │ ├── show_processlist.sh │ │ ├── split_region.sh │ │ ├── sql │ │ │ ├── add_privilege.sh │ │ │ ├── create_database.sh │ │ │ ├── create_namespace.sh │ │ │ ├── create_user.sh │ │ │ ├── drop_instance.sh │ │ │ ├── drop_region.sh │ │ │ ├── link_binlog.sh │ │ │ ├── meta_region_recovery.sh │ │ │ ├── modify_ddlwork.sh │ │ │ ├── modify_resource_tag.sh │ │ │ ├── op_close_load_balance.sh │ │ │ ├── op_open_load_balance.sh │ │ │ ├── query_ddlwork.sh │ │ │ ├── remove_privilege.sh │ │ │ ├── remove_region.sh │ │ │ ├── set_instance_dead.sh │ │ │ ├── split_region.sh │ │ │ ├── store_add_peer.sh │ │ │ ├── store_compact_region.sh │ │ │ ├── store_query_region.sh │ │ │ ├── store_query_txn.sh │ │ │ ├── store_remove_peer.sh │ │ │ ├── store_set_peer.sh │ │ │ ├── update_binlog.sh │ │ │ ├── update_dists.sh │ │ │ ├── update_instance_param.sh │ │ │ ├── update_main_logical_room.sh │ │ │ ├── update_resource_tag.sh │ │ │ ├── update_schema_conf.sh │ │ │ ├── update_split_lines.sh │ │ │ └── update_ttl_duration.sh │ │ ├── store_add_peer.sh │ │ ├── store_compact_region.sh │ │ ├── store_query_illegal_region.sh │ │ ├── store_query_region.sh │ │ ├── store_query_txn.sh │ │ ├── store_raft_control.sh │ │ ├── store_remove_peer.sh │ │ ├── store_restore_region.sh │ │ ├── store_rm_txn.sh │ │ ├── store_set_peer.sh │ │ ├── store_snapshot_region.sh │ │ ├── store_split_region.sh │ │ ├── update_backup.sh │ │ ├── update_binlog.sh │ │ ├── update_byte_size.sh │ │ ├── update_dists.sh │ │ ├── update_field.sh │ │ ├── update_instance.sh │ │ ├── update_instance_param.sh │ │ ├── update_merge_switch.sh │ │ ├── update_resource_tag.sh │ │ ├── update_schema_conf.sh │ │ ├── update_separate_switch.sh │ │ ├── update_split_lines.sh │ │ ├── update_ttl_duration.sh │ │ └── watch_meta_query.sh │ └── tso_tool.cpp └── vector_index │ └── vector_index.cpp ├── sysbench ├── baikaldb_deploy_scripts │ ├── baikalMetaConf │ │ └── gflags.conf │ ├── baikalStoreConf │ │ └── gflags.conf │ ├── baikaldbConf │ │ └── gflags.conf │ ├── create_internal_table.sh │ └── init.sh ├── deploy_baikaldb.md ├── lua │ ├── Makefile │ ├── Makefile.am │ ├── Makefile.in │ ├── bulk_insert.lua │ ├── delete.lua │ ├── empty-test.lua │ ├── internal │ │ ├── Makefile │ │ ├── Makefile.am │ │ ├── Makefile.in │ │ ├── sysbench.cmdline.lua │ │ ├── sysbench.cmdline.lua.h │ │ ├── sysbench.histogram.lua │ │ ├── sysbench.histogram.lua.h │ │ ├── sysbench.lua │ │ ├── sysbench.lua.h │ │ ├── sysbench.rand.lua │ │ ├── sysbench.rand.lua.h │ │ ├── sysbench.sql.lua │ │ └── sysbench.sql.lua.h │ ├── oltp_common.lua │ ├── oltp_common_baikaldb.lua │ ├── oltp_common_mysql.lua │ ├── oltp_delete.lua │ ├── oltp_insert.lua │ ├── oltp_point_select.lua │ ├── oltp_read_only.lua │ ├── oltp_read_write.lua │ ├── oltp_update_index.lua │ ├── oltp_update_non_index.lua │ ├── oltp_write_only.lua │ ├── prime-test.lua │ ├── select_random_points.lua │ └── select_random_ranges.lua ├── scripts │ ├── cleanup.sh │ ├── config.conf │ ├── delete.sh │ ├── insert.sh │ ├── insert_noprepare.sh │ ├── prepare.sh │ ├── read-only.sh │ ├── read-only_noprepare.sh │ ├── read-write.sh │ ├── read-write_noprepare.sh │ └── select.sh └── sysbench.md ├── test ├── conf │ ├── data_gbk │ ├── data_utf8 │ ├── punctuation.dic │ ├── q2b_gbk.dic │ └── q2b_utf8.dic ├── fun │ ├── create_table.sql │ ├── create_table_cstore.sql │ ├── exec.conf │ ├── exec_cstore.conf │ ├── fun_abnormal_branch.sql │ ├── fun_ddl.sql │ ├── fun_delete.sql │ ├── fun_from_select.sql │ ├── fun_funx.sql │ ├── fun_insert.sql │ ├── fun_insert_select.sql │ ├── fun_o_createtable.sql │ ├── fun_o_datain.sql │ ├── fun_o_query.sql │ ├── fun_o_write.sql │ ├── fun_select.sql │ ├── fun_select_plan_unit.sql │ ├── fun_sub_select.sql │ ├── fun_transaction.sql │ ├── fun_update.sql │ ├── global_index_consistent_check.py │ ├── global_index_consistent_check_cstore.py │ ├── long_transaction.sql │ └── origin_plan_unit_unitsetting_idea_insert.sql ├── test_access_path.cpp ├── test_arrow_compute.cpp ├── test_arrow_io_executor.cpp ├── test_auto_incr_state_machine.cpp ├── test_backup_ttl.cpp ├── test_binlog_storage.cpp ├── test_bitmap_value.cpp ├── test_cluster_manager.cpp ├── test_cmsketch.cpp ├── test_column_families.cpp ├── test_common.cpp ├── test_convert_charset_gbk.cpp ├── test_convert_charset_utf8.cpp ├── test_database_manager.cpp ├── test_date_time.cpp ├── test_decode_proto.cpp ├── test_dms.cpp ├── test_dynamic_schema.cpp ├── test_expr_value.cpp ├── test_faiss.cpp ├── test_fetcher_store.cpp ├── test_hll_common.cpp ├── test_internal_functions.cpp ├── test_key_encoder.cpp ├── test_mem_row.cpp ├── test_meta_writer.cpp ├── test_my_raft_log_storage.cpp ├── test_namespace_manager.cpp ├── test_or_optimize.cpp ├── test_parquet.cpp ├── test_parse_idx.cpp ├── test_parser_perf.cpp ├── test_partition.cpp ├── test_partition_meta.cpp ├── test_partition_utils.cpp ├── test_plan_cache.cpp ├── test_predicate.cpp ├── test_predicate_utf8.cpp ├── test_prefix_write.cpp ├── test_privilege_manager.cpp ├── test_qos.cpp ├── test_region.cpp ├── test_region_manager.cpp ├── test_reverse_common.cpp ├── test_reverse_common_for_utf8.cpp ├── test_rocksdb.cpp ├── test_schema_factory.cpp ├── test_schema_manager.cpp ├── test_script.sh ├── test_snapshot.cpp ├── test_sqlparser.cpp ├── test_sqlparser_for_dml.cpp ├── test_sqlparser_for_load_data.cpp ├── test_sqlparser_for_select.cpp ├── test_table_key.cpp ├── test_table_manager.cpp ├── test_tdigest.cpp ├── test_tuple_record.cpp └── transaction_example.cpp ├── third-party ├── com_github_RoaringBitmap_CRoaring │ └── BUILD ├── com_github_apache_arrow │ └── BUILD ├── com_github_facebook_rocksdb │ └── BUILD ├── com_github_facebookresearch_faiss │ └── BUILD ├── com_github_xianyi_OpenBLAS │ └── BUILD ├── glog.BUILD ├── gperftools.BUILD ├── gtest.BUILD ├── leveldb.BUILD ├── lz4.BUILD ├── rapidjson.BUILD ├── snappy.BUILD ├── snappy_config │ ├── BUILD │ └── config.h ├── zlib.BUILD └── zstd.BUILD └── watt_proto ├── base_subscribe.proto └── event.proto /.bazelrc: -------------------------------------------------------------------------------- 1 | build --copt -DHAVE_ZLIB=1 2 | # build --copt -DBAIKALDB_REVISION=\"2.1.1\" 3 | # bazel build with glog 4 | build --define=with_glog=true 5 | build -c opt 6 | # unittest 7 | # test --define=unittest=true 8 | -------------------------------------------------------------------------------- /.bazelversion: -------------------------------------------------------------------------------- 1 | 0.18.1 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. See error 17 | 18 | **Expected behavior** 19 | A clear and concise description of what you expected to happen. 20 | 21 | **Screenshots** 22 | If applicable, add screenshots to help explain your problem. 23 | 24 | **Desktop (please complete the following information):** 25 | - OS: [e.g. ubuntu] 26 | - Version [e.g. 18.04] 27 | 28 | 29 | **Additional context** 30 | Add any other context about the problem here. 31 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/custom.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Custom issue template 3 | about: Describe this issue template's purpose here. 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/bazel_debug.yml: -------------------------------------------------------------------------------- 1 | name: Bazel Debug 2 | 3 | on: 4 | [workflow_dispatch] 5 | 6 | jobs: 7 | build: 8 | runs-on: ubuntu-20.04 9 | steps: 10 | - uses: actions/checkout@v2 11 | - name: Cache 12 | uses: actions/cache@v2.1.2 13 | with: 14 | path: | 15 | /home/runner/.cache/bazel 16 | /home/runner/.cache/bazelisk 17 | key: bazel-${{ hashFiles('.bazelversion') }} 18 | - name: Install Dependencies 19 | run: sudo apt-get update && sudo apt-get install -y flex bison libssl-dev autoconf g++ libtool make cmake libz-dev locate gfortran 20 | - name: Install Bazelisk 21 | uses: tullyliu/bazelisk-action@1.2 22 | - name: Build 23 | run: bazelisk build //:all 24 | -------------------------------------------------------------------------------- /.github/workflows/cmake.yml: -------------------------------------------------------------------------------- 1 | name: Cmake Compile 2 | 3 | on: 4 | [push,pull_request] 5 | 6 | jobs: 7 | build: 8 | runs-on: ubuntu-20.04 9 | steps: 10 | - uses: actions/checkout@v3 11 | - name: Cache cmake modules 12 | uses: actions/cache@v4 13 | with: 14 | key: cmake-buildenv-third-party2 15 | path: | 16 | buildenv/third-party 17 | - name: Install Dependencies 18 | run: sudo apt-get update && sudo apt-get install -y flex bison libssl-dev autoconf g++ libtool make cmake libz-dev locate gfortran 19 | - name: Build 20 | run: mkdir -p buildenv && cd buildenv && cmake -DWITH_BAIKAL_CLIENT=OFF .. && make -j2 21 | -------------------------------------------------------------------------------- /.github/workflows/cmake_debug.yml: -------------------------------------------------------------------------------- 1 | name: Cmake Debug 2 | 3 | on: 4 | [workflow_dispatch] 5 | 6 | jobs: 7 | build: 8 | runs-on: ubuntu-20.04 9 | steps: 10 | - uses: actions/checkout@v3 11 | - name: Cache cmake modules 12 | uses: actions/cache@v4 13 | with: 14 | key: cmake-buildenv-third-party2 15 | path: | 16 | buildenv/third-party 17 | - name: Install Dependencies 18 | run: sudo apt-get update && sudo apt-get install -y flex bison libssl-dev autoconf g++ libtool make cmake libz-dev locate gfortran 19 | - name: Build 20 | run: mkdir -p buildenv && cd buildenv && cmake -DWITH_BAIKAL_CLIENT=OFF .. && make -j2 21 | - name: Start tmate session 22 | if: failure() # 只有失败时运行 23 | uses: mxschmitt/action-tmate@v3 24 | -------------------------------------------------------------------------------- /.github/workflows/docker-preview.yml: -------------------------------------------------------------------------------- 1 | name: Docker Compose Preview 2 | 3 | on: workflow_dispatch 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-20.04 8 | strategy: 9 | matrix: 10 | os: 11 | - ubuntu-16.04 12 | - ubuntu-18.04 13 | - ubuntu-20.04 14 | - centos-7 15 | - centos-8 16 | steps: 17 | - uses: actions/checkout@v2 18 | - name: Set up Docker Buildx 19 | uses: docker/setup-buildx-action@v1 20 | - name: Login to GitHub Container Registry 21 | uses: docker/login-action@v1 22 | with: 23 | registry: ghcr.io 24 | username: ${{ github.actor }} 25 | password: ${{ secrets.CR_PAT }} 26 | - name: vars 27 | id: vars 28 | run: | 29 | ostype=`os=${{ matrix.os }}; echo ${os%-*}` 30 | osversion=`os=${{ matrix.os }}; echo ${os#*-}` 31 | echo "::set-output name=ostype::$ostype" 32 | echo "::set-output name=osversion::$osversion" 33 | - name: Build and Push 34 | id: docker_build 35 | uses: docker/build-push-action@v2 36 | with: 37 | context: . 38 | file: ./insider-preview/Dockerfile.preview 39 | push: true 40 | build-args: | 41 | OS=${{ steps.vars.outputs.ostype }} 42 | VERSION=${{ steps.vars.outputs.osversion }} 43 | tags: | 44 | ghcr.io/${{ github.repository_owner }}/baikaldb-preview:${{ matrix.os }} 45 | -------------------------------------------------------------------------------- /.github/workflows/nightly.yml: -------------------------------------------------------------------------------- 1 | name: Nightly Build 2 | 3 | #on: 4 | # schedule: 5 | # - cron: '0 16 * * *' 6 | 7 | jobs: 8 | build: 9 | runs-on: ubuntu-20.04 10 | strategy: 11 | matrix: 12 | os: 13 | - ubuntu-16.04 14 | - ubuntu-18.04 15 | - ubuntu-20.04 16 | - centos-7 17 | - centos-8 18 | container: 19 | image: baikalgroup/baikal-dev:${{ matrix.os }} 20 | steps: 21 | - uses: actions/checkout@v2 22 | - name: Install Bazel Cache 23 | run: cd /work && cp -r /__w/BaikalDB/BaikalDB . && tar xfz bazelcache.tgz 24 | - name: Build & Package 25 | run: cd /work/BaikalDB && env HOME=/work USER=work bazelisk build //:all && bash ./ci/package.sh version=nightly os=${{ matrix.os }} 26 | - uses: actions/upload-artifact@v2 27 | with: 28 | name: baikal-all-nightly-${{ matrix.os }}.tgz 29 | path: /home/runner/work 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Prerequisites 2 | *.d 3 | 4 | # Compiled Object files 5 | *.slo 6 | *.lo 7 | *.o 8 | *.obj 9 | 10 | # Precompiled Headers 11 | *.gch 12 | *.pch 13 | 14 | # Compiled Dynamic libraries 15 | *.so 16 | *.dylib 17 | *.dll 18 | 19 | # Fortran module files 20 | *.mod 21 | *.smod 22 | 23 | # Compiled Static libraries 24 | *.lai 25 | *.la 26 | *.a 27 | *.lib 28 | 29 | # Executables 30 | *.exe 31 | *.out 32 | *.app 33 | 34 | # Swap 35 | [._]*.s[a-v][a-z] 36 | [._]*.sw[a-p] 37 | [._]s[a-rt-v][a-z] 38 | [._]ss[a-gi-z] 39 | [._]sw[a-p] 40 | 41 | .idea 42 | cmake-build-debug 43 | cmake-build-release 44 | .DS_Store 45 | 46 | # eclipse 47 | .cproject 48 | .project 49 | .settings/language.settings.xml 50 | 51 | # bazel 52 | bazel-BaikalDB 53 | bazel-bin 54 | bazel-genfiles 55 | bazel-out 56 | bazel-testlogs 57 | -------------------------------------------------------------------------------- /baikal-client/include/baikal_client.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2019 Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | /** 16 | * @file baikal_client_include.h 17 | * @author liuhuicong(com@baidu.com) 18 | * @date 2015/12/06 15:14:57 19 | * @brief 20 | * 21 | **/ 22 | 23 | #ifndef FC_DBRD_BAIKAL_CLIENT_BAIKAL_CLIENT_H 24 | #define FC_DBRD_BAIKAL_CLIENT_BAIKAL_CLIENT_H 25 | 26 | #include "baikal_client_define.h" 27 | #include "baikal_client_util.h" 28 | #include "shard_operator_mgr.h" 29 | #include "baikal_client_logic_db.h" 30 | #include "baikal_client_result_set.h" 31 | #include "baikal_client_row.h" 32 | #include "baikal_client_manager.h" 33 | #include "baikal_client_service.h" 34 | #include "baikal_client_bns_connection_pool.h" 35 | #include "baikal_client_instance.h" 36 | #include "baikal_client_connection.h" 37 | 38 | #endif //FC_DBRD_BAIKAL_CLIENT_BAIKAL_CLIENT_H 39 | 40 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 41 | -------------------------------------------------------------------------------- /baikal-client/include/global.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2019 Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | /** 16 | * @file global.h 17 | * @author liuhuicong(com@baidu.com) 18 | * @date 2016/02/16 14:35:20 19 | * @brief 20 | * 21 | **/ 22 | 23 | #ifndef FC_DBRD_BAIKAL_CLIENT_INCLUDE_GLOBAL_H 24 | #define FC_DBRD_BAIKAL_CLIENT_INCLUDE_GLOBAL_H 25 | 26 | #include 27 | 28 | namespace baikal { 29 | namespace client { 30 | extern char* INSTANCE_STATUS_CSTR[5]; 31 | extern int DEFAULT_READ_TIMEOUT; 32 | extern int DEFAULT_WRITE_TIMEOUT; 33 | extern int DEFAULT_CONNECT_TIMEOUT; 34 | extern std::string DEFAULT_CHARSET; 35 | extern char* CONN_TYPE_CSTR[3]; 36 | } 37 | } 38 | 39 | #endif //FC_DBRD_BAIKAL_CLIENT_INCLUDE_GLOBAL_H 40 | 41 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 42 | -------------------------------------------------------------------------------- /baikal-client/src/global.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2019 Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | /** 16 | * @file ../src/global.cpp 17 | * @author liuhuicong(com@baidu.com) 18 | * @date 2016/02/16 14:35:39 19 | * @brief 20 | * 21 | **/ 22 | #include "global.h" 23 | 24 | namespace baikal { 25 | namespace client { 26 | 27 | char* INSTANCE_STATUS_CSTR[] = { 28 | "NONE", 29 | "ON_LINE", 30 | "OFF_LINE", 31 | "FAULTY", 32 | "DELAY" 33 | }; 34 | char* CONN_TYPE_CSTR[] = { 35 | "NONE", 36 | "MYSQL CONNECTION", 37 | "REDIS CONNECTION" 38 | }; 39 | 40 | int DEFAULT_READ_TIMEOUT = 10000; 41 | int DEFAULT_WRITE_TIMEOUT = 10000; 42 | int DEFAULT_CONNECT_TIMEOUT = 10; 43 | std::string DEFAULT_CHARSET = "gbk"; 44 | 45 | } 46 | } 47 | 48 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 49 | -------------------------------------------------------------------------------- /bazel/baikaldb.bzl: -------------------------------------------------------------------------------- 1 | load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library") 2 | 3 | def baikaldb_proto_library(name, srcs, deps=[], include=None, visibility=None, testonly=0): 4 | native.filegroup(name=name + "_proto_srcs", 5 | srcs=srcs, 6 | visibility=visibility,) 7 | cc_proto_library(name=name, 8 | srcs=srcs, 9 | deps=deps, 10 | cc_libs=["@com_google_protobuf//:protobuf"], 11 | include=include, 12 | protoc="@com_google_protobuf//:protoc", 13 | default_runtime="@com_google_protobuf//:protobuf", 14 | testonly=testonly, 15 | visibility=visibility,) -------------------------------------------------------------------------------- /ci/package.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -ex 4 | PROJ_DIR="$(cd "$(dirname "$0")" && pwd)"/.. 5 | PACK_DIR=/work/pack 6 | BAZEL_BIN=/work/BaikalDB/bazel-bin/ 7 | 8 | for op in $@; do 9 | eval "$op" 10 | done 11 | 12 | rm -rf $PACK_DIR && mkdir -p $PACK_DIR 13 | cd $PACK_DIR 14 | modules=(baikaldb baikalMeta baikalStore) 15 | for module in ${modules[@]} 16 | do 17 | mkdir -p $module/bin $module/conf $module/log $module/script 18 | cp $BAZEL_BIN/$module $module/bin 19 | cp $PROJ_DIR/conf/$module/* $module/conf 20 | cp -r $PROJ_DIR/src/tools/script/* $module/script 21 | 22 | done 23 | 24 | tar czf baikal-all-$version-$os.tgz -C $PACK_DIR baikaldb baikalMeta baikalStore 25 | cp baikal-all-$version-$os.tgz /__w 26 | -------------------------------------------------------------------------------- /ci/upload-release-asset.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -ex 3 | for op in $@; do 4 | eval "$op" 5 | done 6 | 7 | RELEASE="https://api.github.com/repos/$repo/releases/tags/$tag" 8 | upload_url=`curl -s $RELEASE | grep 'upload_url' |awk -F": \"|\{" '{print $2}'` 9 | content_type=$(file -b --mime-type $filepath) 10 | filename=$(basename "$filepath") 11 | curl -s -X POST \ 12 | -H "authorization: Bearer $github_token" \ 13 | -H "content-type: $content_type" \ 14 | --data-binary @"$filepath" \ 15 | "$upload_url?name=$filename" -------------------------------------------------------------------------------- /conf/baikalMeta/gflags.conf: -------------------------------------------------------------------------------- 1 | -defer_close_second=300 2 | -db_path=./rocks_db 3 | -snapshot_interval_s=600 4 | -election_timeout_ms=10000 5 | -raft_max_election_delay_ms=5000 6 | -log_uri=myraftlog://my_raft_log?id= 7 | -stable_uri=local://./raft_data/stable 8 | -snapshot_uri=local://./data/raft_data/snapshot 9 | -meta_replica_number=1 10 | -meta_server_bns=127.0.0.1:8010 11 | -store_request_timeout=480000 12 | -store_connect_timeout=5000 13 | -bthread_concurrency=100 14 | -bvar_dump 15 | -bvar_dump_file=./monitor/bvar.baikalMeta.data 16 | -meta_port=8010 17 | -------------------------------------------------------------------------------- /conf/baikalStore/gflags.conf: -------------------------------------------------------------------------------- 1 | -db_path=./rocks_db 2 | -defer_close_second=300 3 | -max_body_size=268435456 4 | -byte_size_per_record=1 5 | -snapshot_interval_s=1800 6 | -election_timeout_ms=10000 7 | -raft_max_election_delay_ms=5000 8 | -raft_election_heartbeat_factor=3 9 | -raft_max_byte_count_per_rpc=1048576 10 | -raft_copy_remote_file_timeout_ms=300000 11 | -log_uri=myraftlog://my_raft_log?id= 12 | -stable_uri=myraftmeta://my_raft_meta?id= 13 | -snapshot_uri=local://./raft_data/raft_data/snapshot 14 | -meta_server_bns=127.0.0.1:8010 15 | -bthread_concurrency=100 16 | -bvar_dump 17 | -bvar_dump_file=./monitor/bvar.baikalStore.data 18 | -store_port=8110 19 | -------------------------------------------------------------------------------- /conf/baikalStore/punctuation.dic: -------------------------------------------------------------------------------- 1 | ` 2 | ^ 3 | ~ 4 | < 5 | = 6 | > 7 | | 8 | _ 9 | - 10 | , 11 | ; 12 | : 13 | ! 14 | ? 15 | / 16 | . 17 | ' 18 | " 19 | ( 20 | ) 21 | [ 22 | ] 23 | { 24 | } 25 | @ 26 | $ 27 | * 28 | \ 29 | & 30 | # 31 | % 32 | + 33 | -------------------------------------------------------------------------------- /conf/baikalStore/q2b_gbk.dic: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baidu/BaikalDB/c04e446e4404ab142c50b042a91567062b4398c8/conf/baikalStore/q2b_gbk.dic -------------------------------------------------------------------------------- /conf/baikalStore/q2b_utf8.dic: -------------------------------------------------------------------------------- 1 |   2 | 、 , 3 | 。 . 4 | — - 5 | ~ ~ 6 | ‖ | 7 | … . 8 | ‘ ' 9 | ’ ' 10 | “ " 11 | ” " 12 | 〔 ( 13 | 〕 ) 14 | 〈 < 15 | 〉 > 16 | 「 ' 17 | 」 ' 18 | 『 " 19 | 』 " 20 | 〖 [ 21 | 〗 ] 22 | 【 [ 23 | 】 ] 24 | ∶ : 25 | $ $ 26 | ! ! 27 | " " 28 | # # 29 | % % 30 | & & 31 | ' ' 32 | ( ( 33 | ) ) 34 | * * 35 | + + 36 | , , 37 | - - 38 | . . 39 | / / 40 | 0 0 41 | 1 1 42 | 2 2 43 | 3 3 44 | 4 4 45 | 5 5 46 | 6 6 47 | 7 7 48 | 8 8 49 | 9 9 50 | : : 51 | ; ; 52 | < < 53 | = = 54 | > > 55 | ? ? 56 | @ @ 57 | A a 58 | B b 59 | C c 60 | D d 61 | E e 62 | F f 63 | G g 64 | H h 65 | I i 66 | J j 67 | K k 68 | L l 69 | M m 70 | N n 71 | O o 72 | P p 73 | Q q 74 | R r 75 | S s 76 | T t 77 | U u 78 | V v 79 | W w 80 | X x 81 | Y y 82 | Z z 83 | [ [ 84 | \ \ 85 | ] ] 86 | ^ ^ 87 | _ _ 88 | ` ` 89 | a a 90 | b b 91 | c c 92 | d d 93 | e e 94 | f f 95 | g g 96 | h h 97 | i i 98 | j j 99 | k k 100 | l l 101 | m m 102 | n n 103 | o o 104 | p p 105 | q q 106 | r r 107 | s s 108 | t t 109 | u u 110 | v v 111 | w w 112 | x x 113 | y y 114 | z z 115 | { { 116 | | | 117 | } } 118 |  ̄ ~ 119 | 〝 " 120 | 〞 " 121 | ﹐ , 122 | ﹑ , 123 | ﹒ . 124 | ﹔ ; 125 | ﹕ : 126 | ﹖ ? 127 | ﹗ ! 128 | ﹙ ( 129 | ﹚ ) 130 | ﹛ { 131 | ﹜ { 132 | ﹝ [ 133 | ﹞ ] 134 | ﹟ # 135 | ﹠ & 136 | ﹡ * 137 | ﹢ + 138 | ﹣ - 139 | ﹤ < 140 | ﹥ > 141 | ﹦ = 142 | ﹨ \ 143 | ﹩ $ 144 | ﹪ % 145 | ﹫ @ 146 | -------------------------------------------------------------------------------- /conf/baikaldb/gflags.conf: -------------------------------------------------------------------------------- 1 | -defer_close_second=3600 2 | -meta_server_bns=127.0.0.1:8010 3 | -bthread_concurrency=50 4 | -task_group_runqueue_capacity=16384 5 | -max_body_size=2684354560 6 | -fetch_instance_id=true 7 | -request_timeout=50000 8 | -meta_request_timeout=300000 9 | -query_quota_per_user=1000000 10 | -bvar_dump 11 | -bvar_dump_file=./monitor/bvar.baikaldb.data 12 | -default_2pc=true 13 | -max_connections_per_user=40000 14 | -region_per_batch=1 15 | -fetcher_request_timeout=100000 16 | -print_agg_sql_interval_s=60 17 | -baikal_port=28282 18 | -------------------------------------------------------------------------------- /conf/gflags.conf: -------------------------------------------------------------------------------- 1 | -defer_close_second=10 2 | -------------------------------------------------------------------------------- /include/common/object_manager.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | 17 | #include 18 | #include 19 | 20 | namespace baikaldb { 21 | //T是被管理类,Derived是继承ObjectManager的管理器 22 | template 23 | class ObjectManager { 24 | public: 25 | static Derived* instance() { 26 | static Derived manager; 27 | return &manager; 28 | } 29 | 30 | virtual ~ObjectManager() { 31 | } 32 | 33 | T get_object(const std::string& name) { 34 | if (_objects.count(name) == 1) { 35 | return _objects[name]; 36 | } 37 | return NULL; 38 | } 39 | 40 | int register_object(const std::string& name, T object) { 41 | _objects[name] = object; 42 | return 0; 43 | } 44 | 45 | protected: 46 | ObjectManager() { 47 | } 48 | 49 | std::unordered_map _objects; 50 | }; 51 | } 52 | /* vim: set ts=4 sw=4 sts=4 tw=100 */ 53 | -------------------------------------------------------------------------------- /include/common/password.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | #include 17 | #include "common.h" 18 | 19 | namespace baikaldb { 20 | void scramble(uint8_t* to, const char* message, const char* password); 21 | 22 | } //namespace baikaldb 23 | -------------------------------------------------------------------------------- /include/engine/rocksdb_merge_operator.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | #pragma once 15 | #include "rocksdb/merge_operator.h" 16 | #include "schema_factory.h" 17 | #include "mut_table_key.h" 18 | #include "table_key.h" 19 | #include "table_record.h" 20 | 21 | namespace baikaldb { 22 | class OLAPMergeOperator : public rocksdb::MergeOperator { 23 | public: 24 | bool FullMergeV2(const rocksdb::MergeOperator::MergeOperationInput& merge_in, 25 | rocksdb::MergeOperator::MergeOperationOutput* merge_out) const override; 26 | 27 | bool PartialMerge(const rocksdb::Slice& key, const rocksdb::Slice& left_operand, 28 | const rocksdb::Slice& right_operand, std::string* new_value, 29 | rocksdb::Logger* /*logger*/) const override { 30 | return false; 31 | } 32 | const char* Name() const override { return "OLAPMergeOperator"; } 33 | }; 34 | } // namespace baikaldb -------------------------------------------------------------------------------- /include/engine/transaction_db_bthread_mutex.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | #pragma once 15 | #include "rocksdb/utilities/transaction_db_mutex.h" 16 | 17 | namespace baikaldb { 18 | 19 | // Default implementation of TransactionDBMutexFactory. May be overridden 20 | // by TransactionDBOptions.custom_mutex_factory. 21 | class TransactionDBBthreadFactory : public rocksdb::TransactionDBMutexFactory { 22 | public: 23 | std::shared_ptr AllocateMutex() override; 24 | std::shared_ptr AllocateCondVar() override; 25 | }; 26 | 27 | } // namespace baikaldb -------------------------------------------------------------------------------- /include/exec/delete_node.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | 17 | #include "exec_node.h" 18 | #include "dml_node.h" 19 | #include "transaction.h" 20 | 21 | namespace baikaldb { 22 | class DeleteNode : public DMLNode { 23 | public: 24 | DeleteNode() { 25 | } 26 | virtual ~DeleteNode() { 27 | } 28 | virtual int init(const pb::PlanNode& node); 29 | virtual int open(RuntimeState* state); 30 | 31 | private: 32 | std::vector _primary_slots; 33 | }; 34 | 35 | } 36 | 37 | /* vim: set ts=4 sw=4 sts=4 tw=100 */ 38 | -------------------------------------------------------------------------------- /include/exec/index_ddl_manager_node.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | #include "dml_manager_node.h" 17 | #include "fetcher_store.h" 18 | 19 | namespace baikaldb { 20 | 21 | class IndexDDLManagerNode : public DmlManagerNode { 22 | public: 23 | IndexDDLManagerNode(); 24 | virtual ~IndexDDLManagerNode(); 25 | 26 | virtual int open(RuntimeState* state); 27 | void set_table_id(int64_t table_id) { 28 | _table_id = table_id; 29 | } 30 | void set_index_id(int64_t index_id) { 31 | _index_id = index_id; 32 | } 33 | 34 | void set_task_id(const std::string& task_id) { 35 | _task_id = task_id; 36 | } 37 | 38 | void set_is_global_index(bool flags) { 39 | _is_global_index = flags; 40 | } 41 | 42 | private: 43 | int64_t _table_id {0}; 44 | int64_t _index_id {0}; 45 | std::string _task_id; 46 | bool _is_global_index = false; 47 | bool _is_rollup_index = false; 48 | }; 49 | } // namespace baikaldbame 50 | -------------------------------------------------------------------------------- /include/exec/kill_manager_node.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // Brief: truncate table exec node 16 | #pragma once 17 | 18 | #include "exec_node.h" 19 | 20 | namespace baikaldb { 21 | class KillManagerNode : public ExecNode { 22 | public: 23 | KillManagerNode() { 24 | } 25 | virtual ~KillManagerNode() { 26 | } 27 | virtual int open(RuntimeState* state) { 28 | return 0; 29 | } 30 | }; 31 | 32 | } 33 | 34 | /* vim: set ts=4 sw=4 sts=4 tw=100 */ 35 | -------------------------------------------------------------------------------- /include/exec/kill_node.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // Brief: truncate table exec node 16 | #pragma once 17 | 18 | #include "exec_node.h" 19 | 20 | namespace baikaldb { 21 | class KillNode : public ExecNode { 22 | public: 23 | KillNode() { 24 | } 25 | virtual ~KillNode() { 26 | } 27 | virtual int init(const pb::PlanNode& node); 28 | virtual int open(RuntimeState* state); 29 | private: 30 | uint64_t _db_conn_id = 0; 31 | bool _is_query = 0; 32 | }; 33 | 34 | } 35 | 36 | /* vim: set ts=4 sw=4 sts=4 tw=100 */ 37 | -------------------------------------------------------------------------------- /include/exec/property.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | 17 | #include "exec_node.h" 18 | 19 | namespace baikaldb { 20 | struct Property { 21 | std::vector slot_order_exprs; 22 | std::vector is_asc; 23 | int64_t expected_cnt = -1; 24 | Property() { 25 | } 26 | Property(const std::vector& slot_order_exprs_, 27 | const std::vector& is_asc_, 28 | int64_t expected_cnt_) : 29 | slot_order_exprs(slot_order_exprs_), 30 | is_asc(is_asc_), 31 | expected_cnt(expected_cnt_) {} 32 | 33 | }; 34 | } 35 | 36 | /* vim: set ts=4 sw=4 sts=4 tw=100 */ 37 | -------------------------------------------------------------------------------- /include/exec/single_txn_manager_node.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // Brief: truncate table exec node 16 | #pragma once 17 | 18 | #include "exec_node.h" 19 | #include "transaction_manager_node.h" 20 | 21 | namespace baikaldb { 22 | class SingleTxnManagerNode : public TransactionManagerNode { 23 | public: 24 | SingleTxnManagerNode() { 25 | } 26 | virtual ~SingleTxnManagerNode() { 27 | } 28 | virtual int open(RuntimeState* state); 29 | virtual void reset(RuntimeState* state); 30 | }; 31 | } 32 | 33 | /* vim: set ts=4 sw=4 sts=4 tw=100 */ 34 | -------------------------------------------------------------------------------- /include/exec/truncate_manager_node.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // Brief: truncate table exec node 16 | #pragma once 17 | 18 | #include "exec_node.h" 19 | 20 | namespace baikaldb { 21 | class TruncateManagerNode : public ExecNode { 22 | public: 23 | TruncateManagerNode() { 24 | } 25 | virtual ~TruncateManagerNode() { 26 | } 27 | virtual int open(RuntimeState* state) { 28 | return 0; 29 | } 30 | }; 31 | 32 | } 33 | 34 | /* vim: set ts=4 sw=4 sts=4 tw=100 */ 35 | -------------------------------------------------------------------------------- /include/exec/truncate_node.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // Brief: truncate table exec node 16 | #pragma once 17 | 18 | #include "exec_node.h" 19 | 20 | namespace baikaldb { 21 | class TruncateNode : public ExecNode { 22 | public: 23 | TruncateNode() { 24 | } 25 | virtual ~TruncateNode() { 26 | } 27 | virtual int init(const pb::PlanNode& node); 28 | virtual int open(RuntimeState* state); 29 | virtual void transfer_pb(int64_t region_id, pb::PlanNode* pb_node); 30 | 31 | int64_t table_id() { 32 | return _table_id; 33 | } 34 | int32_t get_partition_field() { 35 | return _table_info->partition_info.partition_field(); 36 | } 37 | 38 | int64_t get_partition_num() { 39 | return _table_info->partition_num; 40 | } 41 | private: 42 | int64_t _region_id = 0; 43 | int64_t _table_id = 0; 44 | SmartTable _table_info; 45 | }; 46 | 47 | } 48 | 49 | /* vim: set ts=4 sw=4 sts=4 tw=100 */ 50 | -------------------------------------------------------------------------------- /include/exec/update_node.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | 17 | #include "exec_node.h" 18 | #include "dml_node.h" 19 | #include "transaction.h" 20 | 21 | namespace baikaldb { 22 | class UpdateNode : public DMLNode { 23 | public: 24 | UpdateNode() { 25 | } 26 | virtual ~UpdateNode() { 27 | for (auto expr : _update_exprs) { 28 | ExprNode::destroy_tree(expr); 29 | } 30 | } 31 | virtual int init(const pb::PlanNode& node) override; 32 | virtual int open(RuntimeState* state) override; 33 | virtual void close(RuntimeState* state) override; 34 | virtual void transfer_pb(int64_t region_id, pb::PlanNode* pb_node) override; 35 | }; 36 | 37 | } 38 | 39 | /* vim: set ts=4 sw=4 sts=4 tw=100 */ 40 | -------------------------------------------------------------------------------- /include/logical_plan/kill_planner.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // Brief: the class for generating and executing prepare statements 16 | #pragma once 17 | #include "logical_planner.h" 18 | #include "query_context.h" 19 | #include "parser.h" 20 | 21 | namespace baikaldb { 22 | 23 | class KillPlanner : public LogicalPlanner { 24 | public: 25 | 26 | KillPlanner(QueryContext* ctx) : LogicalPlanner(ctx) {} 27 | 28 | virtual ~KillPlanner() {} 29 | 30 | virtual int plan(); 31 | 32 | private: 33 | }; 34 | } //namespace baikal 35 | -------------------------------------------------------------------------------- /include/logical_plan/load_planner.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // Brief: the class for generating and executing prepare statements 16 | #pragma once 17 | #include "logical_planner.h" 18 | #include "query_context.h" 19 | #include "parser.h" 20 | 21 | namespace baikaldb { 22 | 23 | class LoadPlanner : public LogicalPlanner { 24 | public: 25 | 26 | LoadPlanner(QueryContext* ctx) : LogicalPlanner(ctx) {} 27 | 28 | virtual ~LoadPlanner() {} 29 | 30 | virtual int plan(); 31 | 32 | private: 33 | int parse_load_info(pb::LoadNode* load_node, pb::InsertNode* insert_node); 34 | int parse_field_list(pb::LoadNode* node, pb::InsertNode* insert_node); 35 | int parse_set_list(pb::LoadNode* node); 36 | 37 | private: 38 | int64_t _table_id = 0; 39 | parser::LoadDataStmt* _load_stmt = nullptr; 40 | std::vector _set_slots; 41 | std::vector _set_values; 42 | }; 43 | } //namespace baikal 44 | -------------------------------------------------------------------------------- /include/logical_plan/prepare_planner.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // Brief: the class for generating and executing prepare statements 16 | #pragma once 17 | #include "logical_planner.h" 18 | #include "query_context.h" 19 | #include "parser.h" 20 | 21 | namespace baikaldb { 22 | 23 | class PreparePlanner : public LogicalPlanner { 24 | public: 25 | 26 | PreparePlanner(QueryContext* ctx) : LogicalPlanner(ctx) {} 27 | 28 | virtual ~PreparePlanner() {} 29 | 30 | virtual int plan(); 31 | 32 | private: 33 | int stmt_prepare(const std::string& stmt_name, const std::string& stmt_sql); 34 | int stmt_execute(const std::string& stmt_name, std::vector& params); 35 | int stmt_close(const std::string& stmt_name); 36 | }; 37 | } //namespace baikal -------------------------------------------------------------------------------- /include/logical_plan/setkv_planner.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // Brief: the class for handling SQL querys like 'set key=val' 16 | #pragma once 17 | #include "logical_planner.h" 18 | #include "query_context.h" 19 | #include "parser.h" 20 | 21 | namespace baikaldb { 22 | 23 | class SetKVPlanner : public LogicalPlanner { 24 | public: 25 | 26 | SetKVPlanner(QueryContext* ctx) : LogicalPlanner(ctx) {} 27 | virtual ~SetKVPlanner() {} 28 | virtual int plan(); 29 | 30 | private: 31 | int set_autocommit_0(); 32 | int set_autocommit_1(); 33 | int set_autocommit(parser::ExprNode* expr); 34 | int set_user_variable(const std::string& key, parser::ExprNode* expr); 35 | int set_sql_mode(parser::ExprNode* expr); 36 | 37 | private: 38 | parser::SetStmt* _set_stmt; 39 | }; 40 | } //namespace baikal 41 | -------------------------------------------------------------------------------- /include/logical_plan/transaction_planner.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // Brief: the class for generating exec plans for 16 | // transaction control SQL 17 | // (start transaction/begin/rollback/commit/set autocommit=0/1) 18 | #pragma once 19 | #include "logical_planner.h" 20 | #include "query_context.h" 21 | #include "parser.h" 22 | 23 | namespace baikaldb { 24 | 25 | class TransactionPlanner : public LogicalPlanner { 26 | public: 27 | 28 | TransactionPlanner(QueryContext* ctx) : LogicalPlanner(ctx) {} 29 | 30 | virtual ~TransactionPlanner() {} 31 | 32 | virtual int plan(); 33 | 34 | private: 35 | 36 | }; 37 | } //namespace baikal -------------------------------------------------------------------------------- /include/meta_server/query_database_manager.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | 17 | #include "database_manager.h" 18 | 19 | namespace baikaldb { 20 | class QueryDatabaseManager { 21 | public: 22 | ~QueryDatabaseManager() {} 23 | static QueryDatabaseManager* get_instance() { 24 | static QueryDatabaseManager instance; 25 | return &instance; 26 | } 27 | void get_database_info(const pb::QueryRequest* request, pb::QueryResponse* response); 28 | private: 29 | QueryDatabaseManager() {} 30 | }; //class 31 | }//namespace 32 | 33 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 34 | -------------------------------------------------------------------------------- /include/meta_server/query_namespace_manager.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | 17 | #include "namespace_manager.h" 18 | 19 | namespace baikaldb { 20 | class QueryNamespaceManager { 21 | public: 22 | ~QueryNamespaceManager() {} 23 | 24 | static QueryNamespaceManager* get_instance() { 25 | static QueryNamespaceManager instance; 26 | return &instance; 27 | } 28 | //查询类接口,与写入类接口并发访问 29 | void get_namespace_info(const pb::QueryRequest* request, pb::QueryResponse* response); 30 | private: 31 | QueryNamespaceManager() {} 32 | }; 33 | }//namespace 34 | 35 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 36 | -------------------------------------------------------------------------------- /include/physical_plan/auto_inc.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | 17 | #include "query_context.h" 18 | #include "meta_server_interact.hpp" 19 | namespace baikaldb { 20 | DECLARE_string(meta_server_bns); 21 | class AutoInc { 22 | public: 23 | /* 24 | * 计算自增id 25 | */ 26 | int analyze(QueryContext* ctx); 27 | 28 | int update_auto_inc(SmartTable table_info_ptr, 29 | NetworkSocket* client_conn, 30 | bool use_backup, 31 | std::vector& insert_records); 32 | static bool need_degrade; 33 | static TimeCost last_degrade_time; 34 | static bvar::Adder auto_inc_count; 35 | static bvar::Adder auto_inc_error; 36 | }; 37 | } 38 | 39 | /* vim: set ts=4 sw=4 sts=4 tw=100 */ 40 | -------------------------------------------------------------------------------- /include/physical_plan/decorrelate.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | 17 | #include "exec_node.h" 18 | #include "apply_node.h" 19 | #include "query_context.h" 20 | 21 | namespace baikaldb { 22 | class DeCorrelate { 23 | public: 24 | /* 相关子查询去相关 25 | */ 26 | int analyze(QueryContext* ctx) { 27 | ExecNode* plan = ctx->root; 28 | std::vector apply_nodes; 29 | plan->get_node(pb::APPLY_NODE, apply_nodes); 30 | if (apply_nodes.size() == 0) { 31 | return 0; 32 | } 33 | for (auto& apply_node : apply_nodes) { 34 | ApplyNode* apply = static_cast(apply_node); 35 | apply->decorrelate(); 36 | } 37 | return 0; 38 | } 39 | }; 40 | } 41 | 42 | /* vim: set ts=4 sw=4 sts=4 tw=100 */ -------------------------------------------------------------------------------- /include/physical_plan/join_reorder.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | 17 | #include "query_context.h" 18 | 19 | namespace baikaldb { 20 | class JoinReorder { 21 | public: 22 | int analyze(QueryContext* ctx); 23 | int reorder(QueryContext* ctx, ExecNode* node); 24 | }; 25 | } 26 | 27 | /* vim: set ts=4 sw=4 sts=4 tw=100 */ 28 | -------------------------------------------------------------------------------- /include/physical_plan/limit_calc.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | 17 | #include "exec_node.h" 18 | #include "query_context.h" 19 | #include "limit_node.h" 20 | 21 | namespace baikaldb { 22 | class LimitCalc { 23 | public: 24 | /* 从limit节点开始 25 | * 每个节点limit值都置位offset+limit 26 | * 直到filter、sort、merge_agg/agg 27 | */ 28 | int analyze(QueryContext* ctx); 29 | private: 30 | void _analyze_limit(QueryContext* ctx, ExecNode* node, int64_t limit); 31 | }; 32 | } 33 | 34 | /* vim: set ts=4 sw=4 sts=4 tw=100 */ 35 | -------------------------------------------------------------------------------- /include/protocol/mysql_err_handler.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | 17 | #include 18 | #include "common.h" 19 | #include "mysql_err_code.h" 20 | 21 | namespace baikaldb { 22 | 23 | struct MysqlErrorItem { 24 | int err_code; 25 | std::string err_name; 26 | std::string state_odbc; 27 | std::string state_jdbc; 28 | 29 | }; 30 | 31 | class MysqlErrHandler { 32 | public: 33 | virtual ~MysqlErrHandler(); 34 | 35 | static MysqlErrHandler* get_instance() { 36 | static MysqlErrHandler err_handler; 37 | return &err_handler; 38 | } 39 | 40 | bool init(); 41 | 42 | MysqlErrorItem* get_error_item_by_code(MysqlErrCode code); 43 | 44 | private: 45 | MysqlErrHandler() {}; 46 | MysqlErrHandler& operator=(const MysqlErrHandler& other); 47 | 48 | std::unordered_map _error_mapping; 49 | bool _is_init = false; 50 | }; 51 | 52 | } // namespace baikal 53 | 54 | -------------------------------------------------------------------------------- /include/protocol/task_manager.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "common.h" 4 | #include "task_fetcher.h" 5 | 6 | namespace baikaldb { 7 | 8 | DECLARE_int32(worker_number); 9 | 10 | class TaskManager : public Singleton { 11 | public: 12 | int init(); 13 | 14 | void fetch_thread(); 15 | 16 | void process_ddl_work(pb::RegionDdlWork work); 17 | void process_txn_ddl_work(pb::DdlWorkInfo work); 18 | 19 | private: 20 | ConcurrencyBthread _workers {FLAGS_worker_number}; 21 | }; 22 | 23 | } // namespace baikaldb 24 | -------------------------------------------------------------------------------- /include/raft/can_add_peer_setter.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | #include 17 | 18 | namespace baikaldb { 19 | class CanAddPeerSetter { 20 | public: 21 | virtual ~CanAddPeerSetter() {} 22 | 23 | static CanAddPeerSetter* get_instance() { 24 | static CanAddPeerSetter _instance; 25 | return &_instance; 26 | } 27 | void set_can_add_peer(int64_t region_id); 28 | private: 29 | CanAddPeerSetter() {} 30 | }; 31 | } 32 | 33 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 34 | -------------------------------------------------------------------------------- /include/raft/my_raft_log.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | 17 | namespace baikaldb { 18 | 19 | extern int register_myraft_extension(); 20 | 21 | } 22 | 23 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 24 | -------------------------------------------------------------------------------- /include/raft/raft_control.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | 17 | #ifdef BAIDU_INTERNAL 18 | #include 19 | #include 20 | #include 21 | #include 22 | #else 23 | #include 24 | #include 25 | #include 26 | #include 27 | #endif 28 | #include "proto/raft.pb.h" 29 | #include "common.h" 30 | 31 | namespace baikaldb { 32 | extern void common_raft_control(google::protobuf::RpcController* controller, 33 | const pb::RaftControlRequest* request, 34 | pb::RaftControlResponse* response, 35 | google::protobuf::Closure* done, 36 | braft::Node* node); 37 | } 38 | 39 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 40 | -------------------------------------------------------------------------------- /include/raft/split_index_getter.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | #include 17 | 18 | namespace baikaldb { 19 | class SplitIndexGetter { 20 | public: 21 | virtual ~SplitIndexGetter() {} 22 | 23 | static SplitIndexGetter* get_instance() { 24 | static SplitIndexGetter _instance; 25 | return &_instance; 26 | } 27 | int64_t get_split_index(int64_t region_id); 28 | private: 29 | SplitIndexGetter() {} 30 | }; 31 | } 32 | 33 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 34 | -------------------------------------------------------------------------------- /include/raft/update_region_status.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #pragma once 16 | #include 17 | 18 | namespace baikaldb { 19 | class UpdateRegionStatus { 20 | public: 21 | virtual ~UpdateRegionStatus() {} 22 | 23 | static UpdateRegionStatus* get_instance() { 24 | static UpdateRegionStatus _instance; 25 | return &_instance; 26 | } 27 | void reset_region_status(int64_t region_id); 28 | private: 29 | UpdateRegionStatus() {} 30 | }; 31 | } 32 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 33 | -------------------------------------------------------------------------------- /include/sqlparser/gen_source.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | prefix='/opt/compiler/gcc-4.8.2/bin/' 4 | 5 | cur_dir='.' 6 | if [[ $2 == "opensource" ]]; then 7 | prefix='' 8 | cur_dir=$1 9 | fi 10 | 11 | echo "prefix: ${prefix}" 12 | echo "output: ${out_dir}" 13 | 14 | cd ${cur_dir}/include/sqlparser && ${prefix}flex sql_lex.l && ${prefix}bison sql_parse.y 15 | 16 | dest_dir=$3 17 | if [ ! -z "${dest_dir}" ]; then 18 | mv *.flex.* ${dest_dir}/ && mv *.yacc.* ${dest_dir}/ 19 | fi 20 | -------------------------------------------------------------------------------- /insider-preview/Dockerfile.preview: -------------------------------------------------------------------------------- 1 | ARG OS 2 | ARG VERSION 3 | FROM ghcr.io/baikalgroup/baikal-dev:${OS}-${VERSION} as builder 4 | ARG OS 5 | ARG VERSION 6 | WORKDIR /work 7 | COPY . ./BaikalDB/ 8 | RUN tar xfz bazelcache.tgz && cd BaikalDB \ 9 | && env HOME=/work USER=work bazelisk build //:all \ 10 | && bash ./ci/package.sh version=nightly os=${OS}-${VERSION} 11 | 12 | RUN mkdir /work/output && tar xfz /work/pack/baikal-all-nightly-${OS}-${VERSION}.tgz -C /work/output \ 13 | && cp /work/BaikalDB/insider-preview/entrypoint.sh /work/output && chmod +x /work/output/entrypoint.sh 14 | 15 | 16 | 17 | FROM ${OS}:${VERSION} 18 | ARG OS 19 | ARG VERSION 20 | 21 | ENV TZ=Asia/Shanghai 22 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone 23 | ENV LANG=en_US.utf8 24 | 25 | RUN if [ "$VERSION" = "16.04" ]; then SSL_LIB="libssl1.0" ; else SSL_LIB="libssl1.1" ;fi \ 26 | && if [ "${OS}" = "ubuntu" ]; then \ 27 | apt-get update && apt-get install -y curl $SSL_LIB && rm -rf /var/lib/apt/lists/*; \ 28 | elif [ "${OS}" = "centos" ] ; then \ 29 | yum update -y && yum install -y file && yum clean all && rm -rf /var/cache/yum; \ 30 | fi 31 | 32 | # copy artifacts 33 | COPY --from=builder /work/output /app/ 34 | 35 | WORKDIR /app/ 36 | ENTRYPOINT [ "/app/entrypoint.sh" ] 37 | -------------------------------------------------------------------------------- /insider-preview/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | services: 3 | meta: 4 | image: ghcr.io/baidu/baikaldb-preview:${OS} 5 | command: meta 6 | ports: 7 | - "8010:8010" 8 | store: 9 | image: ghcr.io/baidu/baikaldb-preview:${OS} 10 | command: store 11 | ports: 12 | - "8110:8110" 13 | depends_on: 14 | - meta 15 | db: 16 | image: ghcr.io/baidu/baikaldb-preview:${OS} 17 | command: db 18 | depends_on: 19 | - meta 20 | - store 21 | ports: 22 | - "28282:28282" 23 | -------------------------------------------------------------------------------- /insider-preview/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Docker Image entrypoint script 3 | 4 | startMeta() 5 | { 6 | echo "Starting baikalMeta" 7 | cd baikalMeta 8 | bin/baikalMeta --meta_server_bns=${META_SERVER_BNS:-$(hostname -i):8010} 9 | 10 | } 11 | startStore() 12 | { 13 | echo "Starting baikalStore" 14 | sleep 10 15 | cd baikalStore 16 | source script/init_meta_server.sh meta:8010 17 | source script/create_namespace.sh meta:8010 18 | source script/create_database.sh meta:8010 19 | source script/create_user.sh meta:8010 20 | 21 | bin/baikalStore --meta_server_bns=${META_SERVER_BNS:-meta:8010} 22 | 23 | } 24 | 25 | startDb() { 26 | echo "Starting baikaldb" 27 | sleep 10 28 | cd baikaldb 29 | source script/create_internal_table.sh meta:8010 30 | bin/baikaldb --meta_server_bns=${META_SERVER_BNS:-meta:8010} 31 | } 32 | 33 | cmd=$1 34 | shift 35 | case $cmd in 36 | meta) 37 | startMeta $@ 38 | exit $? 39 | ;; 40 | store) 41 | startStore $@ 42 | exit $? 43 | ;; 44 | db) 45 | startDb $@ 46 | exit $? 47 | ;; 48 | *) 49 | echo "Unknown Command" 50 | exit 1 51 | ;; 52 | esac 53 | -------------------------------------------------------------------------------- /licenses/QL-LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014 The ql Authors. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are 5 | met: 6 | 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above 10 | copyright notice, this list of conditions and the following disclaimer 11 | in the documentation and/or other materials provided with the 12 | distribution. 13 | * Neither the names of the authors nor the names of the 14 | contributors may be used to endorse or promote products derived from 15 | this software without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /licenses/REDIS-LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2006-2015 The redis Authors. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are 5 | met: 6 | 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above 10 | copyright notice, this list of conditions and the following disclaimer 11 | in the documentation and/or other materials provided with the 12 | distribution. 13 | * Neither the names of the authors nor the names of the 14 | contributors may be used to endorse or promote products derived from 15 | this software without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /proto/binlog.proto: -------------------------------------------------------------------------------- 1 | syntax="proto2"; 2 | package baikaldb.pb; 3 | 4 | enum BinlogType { 5 | PREWRITE = 0; 6 | COMMIT = 1; 7 | ROLLBACK = 2; 8 | DDL = 3; 9 | FAKE = 4; 10 | } 11 | 12 | message Binlog { 13 | optional BinlogType type = 1; 14 | optional int64 start_ts = 2; 15 | optional int64 commit_ts = 3; 16 | optional bytes prewrite_key = 4; 17 | optional PrewriteValue prewrite_value = 5; 18 | optional bytes ddl_query = 6; 19 | optional bytes stmts = 7; 20 | optional uint64 partition_key = 8; 21 | } 22 | 23 | message PrewriteValue { 24 | optional int64 schema_version = 1; 25 | repeated TableMutation mutations = 2; 26 | } 27 | 28 | enum MutationType { 29 | INSERT = 0; 30 | UPDATE = 1; 31 | DELETE = 2; 32 | } 33 | 34 | message TableMutation { 35 | optional int64 table_id = 1; 36 | repeated bytes insert_rows = 2; 37 | repeated bytes update_rows = 3; 38 | repeated bytes deleted_rows = 4; 39 | repeated MutationType sequence = 5; 40 | optional bytes sql = 6; 41 | optional uint64 sign = 7; 42 | } 43 | -------------------------------------------------------------------------------- /proto/fc.proto: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baidu/BaikalDB/c04e446e4404ab142c50b042a91567062b4398c8/proto/fc.proto -------------------------------------------------------------------------------- /proto/raft.proto: -------------------------------------------------------------------------------- 1 | syntax="proto2"; 2 | import "common.proto"; 3 | package baikaldb.pb; 4 | //option cc_enable_arenas = true; 5 | 6 | enum RaftControlOp { 7 | SetPeer = 1; //只能发送给当前leader 8 | TransLeader = 2; //主动切走leader,只能发送到leader机器上 9 | SnapShot = 3; //手动控制做snapshot, leader and follower都可以做 10 | GetLeader = 4; //获取leader, leader and follower都可以做 11 | ShutDown = 5; //leader and follower both can do, 只关闭,但并没有从raft group里去掉 12 | Vote = 6; //调用node的vote接口 13 | ResetVoteTime = 7; //调用node的reset_election_timeout_ms接口 14 | GetPeerList = 8; //获取当前peer list, 只能发leader 15 | }; 16 | 17 | // operation request/response 18 | //force == true,只用在大部分节点已经挂掉需要紧急恢复的情况 19 | //官方wiki上给出的解释是可能会丢数据,尽量不要使用该方法 20 | message RaftControlRequest { 21 | required RaftControlOp op_type = 1; 22 | optional int64 region_id = 2; 23 | optional string new_leader = 3; 24 | repeated string old_peers = 4; 25 | repeated string new_peers = 5; 26 | optional int64 election_time = 6; 27 | optional bool force = 7; 28 | }; 29 | 30 | message RaftControlResponse { 31 | required int64 region_id = 1; 32 | required ErrCode errcode = 2; 33 | optional string leader = 3; 34 | optional string errmsg = 4; 35 | repeated string peers = 5; 36 | }; 37 | -------------------------------------------------------------------------------- /proto/reverse.proto: -------------------------------------------------------------------------------- 1 | syntax="proto2"; 2 | package baikaldb.pb; 3 | //option cc_enable_arenas = true; 4 | 5 | enum ReverseNodeType { 6 | REVERSE_NODE_NORMAL = 0; 7 | REVERSE_NODE_DELETE = 1; 8 | }; 9 | 10 | //--common 11 | message CommonReverseNode 12 | { 13 | optional bytes key = 1;//must 14 | required ReverseNodeType flag = 2;//must 15 | optional float weight = 3; 16 | }; 17 | message CommonReverseList 18 | { 19 | repeated CommonReverseNode reverse_nodes = 1;//must 20 | }; 21 | -------------------------------------------------------------------------------- /proto/statistics.proto: -------------------------------------------------------------------------------- 1 | syntax="proto2"; 2 | import "common.proto"; 3 | package baikaldb.pb; 4 | 5 | message BucketInfo { 6 | required int32 distinct_cnt = 1; 7 | required int32 bucket_size = 2; 8 | required ExprValue start = 3; 9 | required ExprValue end = 4; 10 | }; 11 | message ColumnInfo { 12 | required PrimitiveType col_type = 1; 13 | required int32 field_id = 2; 14 | required int32 distinct_cnt = 3; 15 | required int32 null_value_cnt = 4; 16 | repeated BucketInfo bucket_infos = 5; 17 | }; 18 | message Histogram { 19 | required int64 sample_rows = 1; 20 | required int64 total_rows = 2; 21 | repeated ColumnInfo column_infos = 3; 22 | }; 23 | message CMsketchItem { 24 | required int32 depth = 1; 25 | required int32 width = 2; 26 | required int32 value = 3; 27 | }; 28 | message CMsketchColumn { 29 | required int32 field_id = 1; 30 | repeated CMsketchItem cmitems = 2; 31 | }; 32 | message CMsketch { 33 | required int32 depth = 1; 34 | required int32 width = 2; 35 | repeated CMsketchColumn cmcolumns = 3; 36 | }; 37 | message Statistics { 38 | required int64 table_id = 1; 39 | optional int64 version = 2; 40 | required Histogram histogram = 3; 41 | required CMsketch cmsketch = 4; 42 | }; 43 | 44 | -------------------------------------------------------------------------------- /proto/test_decode.proto: -------------------------------------------------------------------------------- 1 | syntax="proto2"; 2 | option cc_enable_arenas = true; 3 | 4 | message TestMessage { 5 | optional sint32 col1 = 1; 6 | optional sint32 col2 = 2; 7 | optional uint32 col3 = 3; 8 | optional uint32 col4 = 4; 9 | optional sint64 col5 = 5; 10 | optional sint64 col6 = 6; 11 | optional uint64 col7 = 7; 12 | optional string col8 = 8; 13 | } 14 | 15 | message TestTupleRecord { 16 | optional sint32 col1 = 1; 17 | optional sint64 col2 = 2; 18 | optional uint32 col3 = 3; 19 | optional uint64 col4 = 4; 20 | optional int32 col5 = 5; 21 | optional int64 col6 = 6; 22 | optional fixed32 col7 = 7; 23 | optional fixed64 col8 = 8; 24 | optional sfixed32 col9 = 9; 25 | optional sfixed64 col10 = 10; 26 | optional float col11 = 11; 27 | optional double col12 = 12; 28 | optional bool col13 = 13; 29 | optional bytes col14 = 14; 30 | } 31 | 32 | message Pg { 33 | repeated int32 a = 1; 34 | optional int32 b = 2; 35 | }; 36 | 37 | message Packed { 38 | repeated Pg a = 1; 39 | } 40 | 41 | message Optional { 42 | optional Pg a = 1; 43 | } 44 | -------------------------------------------------------------------------------- /qrcode.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baidu/BaikalDB/c04e446e4404ab142c50b042a91567062b4398c8/qrcode.jpeg -------------------------------------------------------------------------------- /src/common/default_room_define.cpp: -------------------------------------------------------------------------------- 1 | 2 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | 16 | #include 17 | 18 | namespace baikaldb{ 19 | DEFINE_string(default_logical_room, "default", "default_logical_room"); 20 | DEFINE_string(default_physical_room, "default", "default_physical_room"); 21 | } 22 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 23 | -------------------------------------------------------------------------------- /src/common/mut_table_key.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "mut_table_key.h" 16 | #include "table_key.h" 17 | #include "table_record.h" 18 | 19 | namespace baikaldb { 20 | 21 | MutTableKey::MutTableKey(const TableKey& key) : 22 | _full(key.get_full()), 23 | _data(key.data().data_, key.data().size_) {} 24 | 25 | MutTableKey& MutTableKey::append_index(const TableKey& key) { 26 | _data.append(key.data().data_, key.data().size_); 27 | return *this; 28 | } 29 | 30 | int MutTableKey::append_index(IndexInfo& index, TableRecord* record, int field_cnt, bool clear) { 31 | return record->encode_key(index, *this, field_cnt, clear, false); 32 | } 33 | } // end of namespace baikaldb 34 | -------------------------------------------------------------------------------- /src/common/store_interact.cpp: -------------------------------------------------------------------------------- 1 | 2 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | 16 | #include "store_interact.hpp" 17 | #include 18 | 19 | namespace baikaldb { 20 | DEFINE_int32(store_request_timeout, 60000, 21 | "store as server request timeout, default:60000ms"); 22 | DEFINE_int32(store_connect_timeout, 5000, 23 | "store as server connect timeout, default:5000ms"); 24 | } 25 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 26 | -------------------------------------------------------------------------------- /src/exec/kill_node.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "kill_node.h" 16 | #include "runtime_state.h" 17 | 18 | namespace baikaldb { 19 | int KillNode::init(const pb::PlanNode& node) { 20 | int ret = 0; 21 | ret = ExecNode::init(node); 22 | if (ret < 0) { 23 | DB_WARNING("ExecNode::init fail, ret:%d", ret); 24 | return ret; 25 | } 26 | _db_conn_id = node.derive_node().kill_node().db_conn_id(); 27 | _is_query = node.derive_node().kill_node().is_query(); 28 | return 0; 29 | } 30 | 31 | int KillNode::open(RuntimeState* state) { 32 | int ret = 0; 33 | ret = ExecNode::open(state); 34 | if (ret < 0) { 35 | DB_WARNING_STATE(state, "ExecNode::open fail:%d", ret); 36 | return ret; 37 | } 38 | if (_db_conn_id != 0) { 39 | state->conn_id_cancel(_db_conn_id); 40 | } 41 | return 0; 42 | } 43 | 44 | } 45 | -------------------------------------------------------------------------------- /src/raft_dummy/dummy_setter.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "can_add_peer_setter.h" 16 | #include "split_index_getter.h" 17 | #include "update_region_status.h" 18 | 19 | namespace baikaldb { 20 | void CanAddPeerSetter::set_can_add_peer(int64_t /*region_id*/) { 21 | } 22 | int64_t SplitIndexGetter::get_split_index(int64_t /*region_id*/) { 23 | return INT_FAST64_MAX; 24 | } 25 | void UpdateRegionStatus::reset_region_status(int64_t /*region_id*/) { 26 | return; 27 | } 28 | } 29 | 30 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 31 | -------------------------------------------------------------------------------- /src/raft_meta/can_add_peer_setter.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "can_add_peer_setter.h" 16 | 17 | namespace baikaldb { 18 | void CanAddPeerSetter::set_can_add_peer(int64_t /*region_id*/) { 19 | } 20 | } 21 | 22 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 23 | -------------------------------------------------------------------------------- /src/raft_meta/split_index_getter.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "split_index_getter.h" 16 | 17 | namespace baikaldb { 18 | int64_t SplitIndexGetter::get_split_index(int64_t /*region_id*/) { 19 | return INT_FAST64_MAX; 20 | } 21 | } 22 | 23 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 24 | -------------------------------------------------------------------------------- /src/raft_meta/update_region_status.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "update_region_status.h" 16 | 17 | namespace baikaldb { 18 | void UpdateRegionStatus::reset_region_status(int64_t /*region_id*/) { 19 | return; 20 | } 21 | } 22 | 23 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 24 | -------------------------------------------------------------------------------- /src/raft_store/can_add_peer_setter.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "can_add_peer_setter.h" 16 | #include "store.h" 17 | 18 | namespace baikaldb { 19 | void CanAddPeerSetter::set_can_add_peer(int64_t region_id) { 20 | Store::get_instance()->set_can_add_peer_for_region(region_id); 21 | } 22 | } 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 43 | -------------------------------------------------------------------------------- /src/raft_store/split_index_getter.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "split_index_getter.h" 16 | #include "store.h" 17 | 18 | namespace baikaldb { 19 | int64_t SplitIndexGetter::get_split_index(int64_t region_id) { 20 | return Store::get_instance()->get_split_index_for_region(region_id); 21 | } 22 | } 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 43 | -------------------------------------------------------------------------------- /src/raft_store/update_region_status.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "update_region_status.h" 16 | #include "store.h" 17 | 18 | namespace baikaldb { 19 | void UpdateRegionStatus::reset_region_status(int64_t region_id) { 20 | DB_WARNING("region status was reset, reigon_id: %ld", region_id); 21 | Store::get_instance()->reset_region_status(region_id); 22 | } 23 | } 24 | 25 | /* vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 26 | -------------------------------------------------------------------------------- /src/session/user_info.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018-present Baidu, Inc. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "user_info.h" 16 | 17 | namespace baikaldb { 18 | DEFINE_int32(query_quota_per_user, 3000, "default user query quota by 1 second"); 19 | BRPC_VALIDATE_GFLAG(query_quota_per_user, brpc::PassValidate); 20 | 21 | bool UserInfo::is_exceed_quota() { 22 | if (query_cost.get_time() > 1000000) { 23 | query_cost.reset(); 24 | query_count = 0; 25 | return false; 26 | } 27 | int32_t quota = query_quota; 28 | if (quota == 0) { 29 | quota = FLAGS_query_quota_per_user; 30 | } 31 | return query_count++ > quota; 32 | } 33 | 34 | } // namespace baikaldb 35 | -------------------------------------------------------------------------------- /src/tools/script/add_field.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | #创建table 7 | echo -e "修改列名\n" 8 | curl -d '{ 9 | "op_type":"OP_ADD_FIELD", 10 | "table_info": { 11 | "table_name": "wordinfo_new", 12 | "database": "TEST", 13 | "namespace_name": "TEST", 14 | "fields": [ { 15 | "field_name" : "test", 16 | "mysql_type": 6 17 | }, 18 | { 19 | "field_name" : "test_word", 20 | "mysql_type": 13 21 | } 22 | ] 23 | } 24 | }' http://$1/MetaService/meta_manager 25 | echo -e "\n" 26 | 27 | #查询table 28 | curl -d '{ 29 | "op_type" : "QUERY_SCHEMA" 30 | }' http://$1/MetaService/query 31 | echo -e "\n" 32 | 33 | 34 | 35 | curl -d '{ 36 | "op_type":"OP_ADD_FIELD", 37 | "table_info": { 38 | "table_name": "quality_diary", 39 | "database": "TEST", 40 | "namespace_name": "TEST", 41 | "fields": [ { 42 | "field_name" : "image_front_score", 43 | "mysql_type": 13 44 | } 45 | ] 46 | } 47 | }' http://$1/MetaService/meta_manager 48 | -------------------------------------------------------------------------------- /src/tools/script/add_instance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | #插入物理机房 6 | echo -e "增加实例\n" 7 | curl -d '{ 8 | "op_type": "OP_ADD_INSTANCE", 9 | "instance": { 10 | "address" : "127.0.0.1:8210", 11 | "capacity" : 107374182400, 12 | "used_size" : 0, 13 | "resource_tag" :"", 14 | "physical_room" :"default", 15 | "status": "FAULTY" 16 | } 17 | }' http://$1/MetaService/meta_manager 18 | echo -e "\n" 19 | 20 | -------------------------------------------------------------------------------- /src/tools/script/add_privilege.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | #增加权限 6 | curl -d '{ 7 | "op_type":"OP_ADD_PRIVILEGE", 8 | "user_privilege" : { 9 | "username" : "******", 10 | "password" : "******", 11 | "namespace_name" : "TEST", 12 | "privilege_database" : [{ 13 | "database" : "TEST", 14 | "database_rw" : 2 15 | }] 16 | } 17 | }' http://$1/MetaService/meta_manager 18 | echo -e "\n" 19 | 20 | #查询权限 21 | curl -d '{ 22 | "op_type" : "QUERY_USERPRIVILEG" 23 | }' http://$1/MetaService/query 24 | echo -e "\n" 25 | -------------------------------------------------------------------------------- /src/tools/script/backup/add_field.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | #创建table 7 | echo -e "修改列名\n" 8 | curl -d '{ 9 | "op_type":"OP_ADD_FIELD", 10 | "table_info": { 11 | "table_name": "wordinfo_new", 12 | "database": "TEST", 13 | "namespace_name": "TEST", 14 | "fields": [ { 15 | "field_name" : "test", 16 | "mysql_type": 6 17 | }, 18 | { 19 | "field_name" : "test_word", 20 | "mysql_type": 13 21 | } 22 | ] 23 | } 24 | }' http://$1/MetaService/meta_manager 25 | echo -e "\n" 26 | 27 | #查询table 28 | curl -d '{ 29 | "op_type" : "QUERY_SCHEMA" 30 | }' http://$1/MetaService/query 31 | echo -e "\n" 32 | 33 | 34 | 35 | curl -d '{ 36 | "op_type":"OP_ADD_FIELD", 37 | "table_info": { 38 | "table_name": "quality_diary", 39 | "database": "TEST", 40 | "namespace_name": "TEST", 41 | "fields": [ { 42 | "field_name" : "image_front_score", 43 | "mysql_type": 13 44 | } 45 | ] 46 | } 47 | }' http://$1/MetaService/meta_manager 48 | -------------------------------------------------------------------------------- /src/tools/script/backup/add_instance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | #插入物理机房 6 | echo -e "增加实例\n" 7 | curl -d '{ 8 | "op_type": "OP_ADD_INSTANCE", 9 | "instance": { 10 | "address" : "127.0.0.1:8210", 11 | "capacity" : 107374182400, 12 | "used_size" : 0, 13 | "resource_tag" :"", 14 | "physical_room" :"default", 15 | "status": "FAULTY" 16 | } 17 | }' http://$1/MetaService/meta_manager 18 | echo -e "\n" 19 | 20 | -------------------------------------------------------------------------------- /src/tools/script/backup/batch_remove_region.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:在store删除无用的region 4 | 5 | sh remove_region.sh 127.0.0.1:8222 32862 6 | sh remove_region.sh 127.0.0.1:8222 32863 7 | -------------------------------------------------------------------------------- /src/tools/script/backup/create_database.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | 6 | #创建database 7 | echo -e "创建database\n" 8 | curl -d '{ 9 | "op_type":"OP_CREATE_DATABASE", 10 | "database_info": { 11 | "database":"TestDB", 12 | "namespace_name":"TEST_NAMESPACE", 13 | "quota": 524288 14 | } 15 | }' http://$1/MetaService/meta_manager 16 | echo -e "\n" 17 | -------------------------------------------------------------------------------- /src/tools/script/backup/download_conf.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | #/*************************************************************************** 3 | # * 4 | # * Copyright (c) 2016 Baidu.com, Inc. All Rights Reserved 5 | # * 6 | # **************************************************************************/ 7 | 8 | 9 | 10 | #/** 11 | # * @file conf.sh 12 | # * @date 2016/08/01 19:23:47 13 | # * @brief 14 | # * 15 | # **/ 16 | 17 | #配置 18 | home_dir="$HOME" 19 | #日志目录 20 | script_dir=$(pwd) 21 | log_dir="${script_dir}/log" 22 | #日志文件 23 | log_filename="${log_dir}/download_schedule_new.log" 24 | #检查任务是否完成的次数 25 | retry_task_check=1000 26 | #每次检查任务间隔时间 27 | sleep_per_task_check=30 28 | 29 | hadoop="$HOME/hadoop-client/hadoop/bin/hadoop" 30 | hdfs_path="/app/ecom/fcr/roi/ocpc/ocpc_data/yewuduan_v3" 31 | data_path="${script_dir}/data" 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | #/y vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 48 | -------------------------------------------------------------------------------- /src/tools/script/backup/drop_database.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | 6 | #删除database 7 | echo -e "drop database\n" 8 | curl -d '{ 9 | "op_type":"OP_DROP_DATABASE", 10 | "database_info": { 11 | "database":"TEST", 12 | "namespace_name":"TEST" 13 | } 14 | }' http://$1/MetaService/meta_manager 15 | echo -e "\n" 16 | 17 | #查询database 18 | curl -d '{ 19 | "op_type" : "QUERY_DATABASE" 20 | }' http://$1/MetaService/query 21 | echo -e "\n" 22 | 23 | -------------------------------------------------------------------------------- /src/tools/script/backup/drop_field.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | #创建table 7 | echo -e "drop列名\n" 8 | curl -d '{ 9 | "op_type":"OP_DROP_FIELD", 10 | "table_info": { 11 | "table_name": "wordinfo_new", 12 | "database": "TEST", 13 | "namespace_name": "TEST", 14 | "fields": [ { 15 | "field_name" : "" 16 | }, 17 | { 18 | "field_name" : "" 19 | } 20 | ] 21 | } 22 | }' http://$1/MetaService/meta_manager 23 | echo -e "\n" 24 | 25 | #查询table 26 | curl -d '{ 27 | "op_type" : "QUERY_SCHEMA" 28 | }' http://$1/MetaService/query 29 | echo -e "\n" 30 | 31 | -------------------------------------------------------------------------------- /src/tools/script/backup/drop_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | #创建namespace 6 | echo -e "创建namespace\n" 7 | curl -d '{ 8 | "op_type":"OP_DROP_NAMESPACE", 9 | "namespace_info":{ 10 | "namespace_name": "TEST" 11 | } 12 | }' http://$1/MetaService/meta_manager 13 | echo -e "\n" 14 | 15 | #查询namespace 16 | curl -d '{ 17 | "op_type" : "QUERY_NAMESPACE" 18 | }' http://$1/MetaService/query 19 | echo -e "\n" 20 | 21 | -------------------------------------------------------------------------------- /src/tools/script/backup/drop_table.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | 4 | echo -e "\n" 5 | echo -e "drop table\n" 6 | curl -d '{ 7 | "op_type":"OP_DROP_TABLE", 8 | "table_info": { 9 | "table_name": "clue_history", 10 | "database": "TEST", 11 | "namespace_name": "TEST" 12 | } 13 | }' http://$1/MetaService/meta_manager 14 | echo -e "\n" 15 | 16 | curl -d '{ 17 | "op_type" : "QUERY_SCHEMA" 18 | }' http://$1/MetaService/query 19 | 20 | 21 | curl -d '{ 22 | "op_type":"OP_DROP_TABLE", 23 | "table_info": { 24 | "table_name": "ideacontent_test", 25 | "database": "TEST", 26 | "namespace_name": "TEST" 27 | } 28 | }' http://$1/MetaService/meta_manager 29 | -------------------------------------------------------------------------------- /src/tools/script/backup/meta_query_region_ids.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | 4 | echo -e "查看从instance角度和raft角度 store上存储的region的diff" 5 | curl -d '{ 6 | "op_type": "QUERY_REGION_IDS", 7 | "instance_address": "'$2'" 8 | }' http://$1/MetaService/query 9 | echo -e "\n" 10 | 11 | -------------------------------------------------------------------------------- /src/tools/script/backup/meta_query_region_peers_status.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo -e "query region peers status\n" 3 | echo 'param: meta address' 4 | curl -d '{ 5 | "op_type" : "QUERY_REGION_PEER_STATUS" 6 | }' http://$1/MetaService/query 7 | echo -e "\n" 8 | 9 | 10 | -------------------------------------------------------------------------------- /src/tools/script/backup/modify_index_status.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2020-09-18 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | echo -e "修改索引状态\n" 7 | echo -e "IHS_NORMAL : 普通索引\n" 8 | echo -e "IHS_DISABLE : 屏蔽索引\n" 9 | curl -d '{ 10 | "op_type":"OP_SET_INDEX_HINT_STATUS", 11 | "table_info": { 12 | "table_name": "'$4'", 13 | "database": "'$3'", 14 | "namespace_name": "'$2'", 15 | "indexs": [ { 16 | "index_name" : "'$5'", 17 | "hint_status" : "IHS_DISABLE" 18 | } 19 | ] 20 | 21 | } 22 | }' http://$1/MetaService/meta_manager 23 | echo -e "\n" 24 | -------------------------------------------------------------------------------- /src/tools/script/backup/query_diff_region_ids.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | 4 | echo -e "查看从instance角度和raft角度 store上存储的region的diff" 5 | curl -d '{ 6 | "op_type": "QUERY_DIFF_REGION_IDS", 7 | "instance_address": "'$2'" 8 | }' http://$1/MetaService/query 9 | echo -e "\n" 10 | 11 | -------------------------------------------------------------------------------- /src/tools/script/backup/query_faulty_instance.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: UTF-8 -*- 3 | 4 | import re 5 | import json 6 | import requests 7 | 8 | def query_faulty_instance(hosts): 9 | ''' 10 | 查询状态不正常的store 11 | ''' 12 | data = {'op_type': 'QUERY_INSTANCE_FLATTEN'} 13 | url = 'http://' + hosts + '/MetaService/query' 14 | baikal_res = requests.post(url, data=json.dumps(data)) 15 | normal_instance = [] 16 | faulty_instance = [] 17 | if baikal_res.status_code == 200: 18 | for item in baikal_res.json()['flatten_instances']: 19 | if item['status'] == 'NORMAL': 20 | normal_instance.append(item['address']) 21 | else: 22 | faulty_instance.append(item['address']) 23 | regex = re.compile('baikalStore') 24 | instance_list = [] 25 | for host in faulty_instance: 26 | ip, _ = host.split(':') 27 | url = 'http://api.matrix.baidu.com/api/v1/matrix/host/' + ip 28 | matrix_res = requests.get(url) 29 | if baikal_res.status_code == 200: 30 | for instance in matrix_res.json()['instances']: 31 | if re.search(regex, instance): 32 | instance_list.append(instance) 33 | print instance 34 | 35 | if __name__ == '__main__': 36 | query_faulty_instance('10.152.70.12:8110') 37 | -------------------------------------------------------------------------------- /src/tools/script/backup/rename_field.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | #创建table 7 | echo -e "修改列名\n" 8 | curl -d '{ 9 | "op_type":"OP_RENAME_FIELD", 10 | "table_info": { 11 | "table_name": "wordinfo_new", 12 | "database": "TEST", 13 | "namespace_name": "TEST", 14 | "fields": [ { 15 | "field_name" : "wordid", 16 | "new_field_name": "id" 17 | }, 18 | { 19 | "field_name" : "showword", 20 | "new_field_name": "literal" 21 | } 22 | ] 23 | } 24 | }' http://$1/MetaService/meta_manager 25 | echo -e "\n" 26 | 27 | #查询table 28 | curl -d '{ 29 | "op_type" : "QUERY_SCHEMA" 30 | }' http://$1/MetaService/query 31 | echo -e "\n" 32 | 33 | -------------------------------------------------------------------------------- /src/tools/script/backup/rename_table.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | #创建table 7 | echo -e "rename table\n" 8 | curl -d '{ 9 | "op_type":"OP_RENAME_TABLE", 10 | "table_info": { 11 | "table_name": "wordinfo", 12 | "new_table_name": "wordinfo_new", 13 | "database": "TEST", 14 | "namespace_name": "TEST" 15 | } 16 | }' http://$1/MetaService/meta_manager 17 | echo -e "\n" 18 | 19 | #查询table 20 | curl -d '{ 21 | "op_type" : "QUERY_SCHEMA" 22 | }' http://$1/MetaService/query 23 | -------------------------------------------------------------------------------- /src/tools/script/backup/restore_table.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #用来恢复误删的表 3 | echo -e "\n" 4 | curl -d '{ 5 | "op_type":"OP_RESTORE_TABLE", 6 | "table_info": { 7 | "table_name": "test", 8 | "database": "TEST", 9 | "namespace_name": "TEST" 10 | } 11 | }' http://$1/MetaService/meta_manager 12 | echo -e "\n" 13 | 14 | -------------------------------------------------------------------------------- /src/tools/script/backup/store_query_illegal_region.sh: -------------------------------------------------------------------------------- 1 | #Created on 2017-12-23 2 | #测试场景: 不传region_id, 返回整个机房的region信息中leader是0.0.0.0的region 3 | 4 | echo -e "query_region\n" 5 | echo 'param: address' 6 | curl -d '{ 7 | }' http://$1/StoreService/query_illegal_region 8 | echo -e "\n" 9 | 10 | -------------------------------------------------------------------------------- /src/tools/script/backup/update_byte_size.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | #创建table 7 | echo -e "update byte_size_per_record\n" 8 | curl -d '{ 9 | "op_type":"OP_UPDATE_BYTE_SIZE", 10 | "table_info": { 11 | "table_name": "hotmap", 12 | "database": "TEST", 13 | "namespace_name": "TEST", 14 | "byte_size_per_record": 50 15 | } 16 | }' http://$1/MetaService/meta_manager 17 | echo -e "\n" 18 | 19 | -------------------------------------------------------------------------------- /src/tools/script/backup/update_instance.sh: -------------------------------------------------------------------------------- 1 | #插入物理机房 2 | echo -e "update 实例, 有哪些字段就会更新哪些字段\n" 3 | curl -d '{ 4 | "op_type": "OP_UPDATE_INSTANCE", 5 | "instance": { 6 | "address" : "127.0.0.1:8011", 7 | "status": 1 8 | } 9 | }' http://$1/MetaService/meta_manager 10 | echo -e "\n" 11 | 12 | curl -d '{ 13 | "op_type": "QUERY_INSTANCE", 14 | "instance_address": "127.0.0.1:8011" 15 | }' http://$1/MetaService/query 16 | -------------------------------------------------------------------------------- /src/tools/script/batch_remove_region.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:在store删除无用的region 4 | 5 | sh remove_region.sh 127.0.0.1:8222 32862 6 | sh remove_region.sh 127.0.0.1:8222 32863 7 | -------------------------------------------------------------------------------- /src/tools/script/console_shell/create_meta_info.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo -e " create namespace\n" 4 | curl -d '{ 5 | "op_type":"OP_CREATE_NAMESPACE", 6 | "namespace_info":{ 7 | "namespace_name": "CLUSTER_STATUS", 8 | "quota": 5368709120 9 | } 10 | }' http://$1/MetaService/meta_manager 11 | echo -e "\n" 12 | 13 | #create database 5G 14 | echo -e "create database\n" 15 | curl -d '{ 16 | "op_type":"OP_CREATE_DATABASE", 17 | "database_info": { 18 | "database":"cluster_status", 19 | "namespace_name":"CLUSTER_STATUS", 20 | "quota": 5368709120 21 | } 22 | }' http://$1/MetaService/meta_manager 23 | echo -e "\n" 24 | 25 | echo -e "create user\n" 26 | curl -d '{ 27 | "op_type":"OP_CREATE_USER", 28 | "user_privilege" : { 29 | "username" : "******", 30 | "password" : "******", 31 | "namespace_name" : "CLUSTER_STATUS", 32 | "privilege_database" : [{ 33 | "database" : "cluster_status", 34 | "database_rw" : 2 35 | }], 36 | "bns":["preonline", "offline"], 37 | "ip":["127.0.0.1", "127.0.0.2"] 38 | } 39 | }' http://$1/MetaService/meta_manager 40 | echo -e "\n" 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /src/tools/script/console_shell/watch_meta_query.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | echo -e "query namespace\n" 5 | curl -d '{ 6 | "op_type" : "QUERY_NAMESPACE", 7 | "namespace_name" : "CLUSTER_STATUS" 8 | }' http://$1/MetaService/query 9 | echo -e "\n" 10 | 11 | echo -e "query database\n" 12 | curl -d '{ 13 | "op_type" : "QUERY_DATABASE", 14 | "database" : "cluster_status", 15 | "namespace_name":"CLUSTER_STATUS" 16 | }' http://$1/MetaService/query 17 | echo -e "\n" 18 | 19 | echo -e "query user\n" 20 | curl -d '{ 21 | "op_type" : "QUERY_USERPRIVILEG", 22 | "user_name" : "test" 23 | }' http://$1/MetaService/query 24 | echo -e "\n" 25 | 26 | echo -e 'query table\n' 27 | curl -d '{ 28 | "op_type" : "QUERY_SCHEMA", 29 | "namespace_name" : "CLUSTER_STATUS" 30 | }' http://$1/MetaService/query 31 | echo -e "\n" 32 | 33 | echo -e 'query instance\n' 34 | curl -d '{ 35 | "op_type" : "QUERY_INSTANCE", 36 | "namespace_name" : "CLUSTER_STATUS" 37 | }' http://$1/MetaService/query 38 | echo -e "\n" 39 | -------------------------------------------------------------------------------- /src/tools/script/create_database.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | 6 | #创建database 7 | echo -e "create database\n" 8 | curl -s -d '{ 9 | "op_type":"OP_CREATE_DATABASE", 10 | "database_info": { 11 | "database":"TestDB", 12 | "namespace_name":"TEST_NAMESPACE", 13 | "quota": 524288 14 | } 15 | }' http://$1/MetaService/meta_manager 16 | -------------------------------------------------------------------------------- /src/tools/script/create_internal_table.sh: -------------------------------------------------------------------------------- 1 | #!/bin/ 2 | curl -s -d '{ 3 | "op_type":"OP_CREATE_NAMESPACE", 4 | "namespace_info":{ 5 | "namespace_name": "INTERNAL", 6 | "quota": 1048576 7 | } 8 | }' http://$1/MetaService/meta_manager 9 | 10 | curl -s -d '{ 11 | "op_type":"OP_CREATE_DATABASE", 12 | "database_info": { 13 | "database":"baikaldb", 14 | "namespace_name":"INTERNAL", 15 | "quota": 524288 16 | } 17 | }' http://$1/MetaService/meta_manager 18 | 19 | curl -s -d '{ 20 | "op_type": "OP_CREATE_TABLE", 21 | "table_info": { 22 | "table_name": "__baikaldb_instance", 23 | "database": "baikaldb", 24 | "namespace_name": "INTERNAL", 25 | "fields": [ 26 | { 27 | "field_name" : "instance_id", 28 | "mysql_type" : "UINT64", 29 | "auto_increment" : true 30 | } 31 | ], 32 | "indexs": [ 33 | { 34 | "index_name" : "priamry_key", 35 | "index_type" : "I_PRIMARY", 36 | "field_names": ["instance_id"] 37 | } 38 | ] 39 | } 40 | }' http://$1/MetaService/meta_manager 41 | -------------------------------------------------------------------------------- /src/tools/script/create_meta_info.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo -e " create namespace\n" 4 | curl -d '{ 5 | "op_type":"OP_CREATE_NAMESPACE", 6 | "namespace_info":{ 7 | "namespace_name": "CLUSTER_STATUS", 8 | "quota": 5368709120 9 | } 10 | }' http://$1/MetaService/meta_manager 11 | echo -e "\n" 12 | 13 | #create database 5G 14 | echo -e "create database\n" 15 | curl -d '{ 16 | "op_type":"OP_CREATE_DATABASE", 17 | "database_info": { 18 | "database":"cluster_status", 19 | "namespace_name":"CLUSTER_STATUS", 20 | "quota": 5368709120 21 | } 22 | }' http://$1/MetaService/meta_manager 23 | echo -e "\n" 24 | 25 | echo -e "create user\n" 26 | curl -d '{ 27 | "op_type":"OP_CREATE_USER", 28 | "user_privilege" : { 29 | "username" : "******", 30 | "password" : "******", 31 | "namespace_name" : "CLUSTER_STATUS", 32 | "privilege_database" : [{ 33 | "database" : "cluster_status", 34 | "database_rw" : 2 35 | }], 36 | "bns":["preonline", "offline"], 37 | "ip":["127.0.0.1", "127.0.0.2"] 38 | } 39 | }' http://$1/MetaService/meta_manager 40 | echo -e "\n" 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /src/tools/script/create_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | #创建namespace 6 | echo -e "create namespace\n" 7 | curl -s -d '{ 8 | "op_type":"OP_CREATE_NAMESPACE", 9 | "namespace_info":{ 10 | "namespace_name": "TEST_NAMESPACE", 11 | "quota": 1048576 12 | } 13 | }' http://$1/MetaService/meta_manager 14 | 15 | #查询namespace 16 | curl -s -d '{ 17 | "op_type" : "QUERY_NAMESPACE" 18 | }' http://$1/MetaService/query 19 | 20 | -------------------------------------------------------------------------------- /src/tools/script/create_user.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | #创建用户 6 | echo -e "create user\n" 7 | curl -s -d '{ 8 | "op_type":"OP_CREATE_USER", 9 | "user_privilege" : { 10 | "username" : "root", 11 | "password" : "****", 12 | "namespace_name" : "TEST_NAMESPACE", 13 | "privilege_database" : [{ 14 | "database" : "TestDB", 15 | "database_rw" : "WRITE" 16 | }], 17 | "bns":["preonline", "offline"], 18 | "ip":["127.0.0.1", "127.0.0.2"] 19 | } 20 | }' http://$1/MetaService/meta_manager 21 | 22 | #查询权限 23 | curl -s -d '{ 24 | "op_type" : "QUERY_USERPRIVILEG" 25 | }' http://$1/MetaService/query 26 | echo -e "\n" 27 | -------------------------------------------------------------------------------- /src/tools/script/del_ddlwork.sh: -------------------------------------------------------------------------------- 1 | echo -e "查询查询实例\n" 2 | curl -d '{ 3 | "op_type" : "OP_DELETE_DDLWORK", 4 | "ddlwork_info" : { 5 | "table_id" : '$2' 6 | } 7 | }' http://$1/MetaService/meta_manager 8 | echo -e "\n" 9 | -------------------------------------------------------------------------------- /src/tools/script/download_conf.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | #/*************************************************************************** 3 | # * 4 | # * Copyright (c) 2016 Baidu.com, Inc. All Rights Reserved 5 | # * 6 | # **************************************************************************/ 7 | 8 | 9 | 10 | #/** 11 | # * @file conf.sh 12 | # * @author liguoqiang(com@baidu.com) 13 | # * @date 2016/08/01 19:23:47 14 | # * @brief 15 | # * 16 | # **/ 17 | 18 | #配置 19 | home_dir="$HOME" 20 | #日志目录 21 | script_dir=$(pwd) 22 | log_dir="${script_dir}/log" 23 | #日志文件 24 | log_filename="${log_dir}/download_schedule_new.log" 25 | #检查任务是否完成的次数 26 | retry_task_check=1000 27 | #每次检查任务间隔时间 28 | sleep_per_task_check=30 29 | 30 | hadoop="$HOME/hadoop-client/hadoop/bin/hadoop" 31 | hdfs_path="/app/ecom/fcr/roi/ocpc/ocpc_data/yewuduan_v3" 32 | data_path="${script_dir}/data" 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | #/y vim: set expandtab ts=4 sw=4 sts=4 tw=100: */ 49 | -------------------------------------------------------------------------------- /src/tools/script/drop_database.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | 6 | #删除database 7 | echo -e "drop database\n" 8 | curl -d '{ 9 | "op_type":"OP_DROP_DATABASE", 10 | "database_info": { 11 | "database":"TEST", 12 | "namespace_name":"TEST" 13 | } 14 | }' http://$1/MetaService/meta_manager 15 | echo -e "\n" 16 | 17 | #查询database 18 | curl -d '{ 19 | "op_type" : "QUERY_DATABASE" 20 | }' http://$1/MetaService/query 21 | echo -e "\n" 22 | 23 | -------------------------------------------------------------------------------- /src/tools/script/drop_field.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | #创建table 7 | echo -e "drop列名\n" 8 | curl -d '{ 9 | "op_type":"OP_DROP_FIELD", 10 | "table_info": { 11 | "table_name": "wordinfo_new", 12 | "database": "TEST", 13 | "namespace_name": "TEST", 14 | "fields": [ { 15 | "field_name" : "" 16 | }, 17 | { 18 | "field_name" : "" 19 | } 20 | ] 21 | } 22 | }' http://$1/MetaService/meta_manager 23 | echo -e "\n" 24 | 25 | #查询table 26 | curl -d '{ 27 | "op_type" : "QUERY_SCHEMA" 28 | }' http://$1/MetaService/query 29 | echo -e "\n" 30 | 31 | -------------------------------------------------------------------------------- /src/tools/script/drop_index.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | basename=$2 4 | table_name=$3 5 | index_name=$4 6 | 7 | curl -d '{ 8 | "op_type" : "OP_DROP_INDEX", 9 | "table_info" : { 10 | "database" : "'$databasename'", 11 | "table_name" : "'$table_name'", 12 | "namespace_name" : "FENGCHAO", 13 | "indexs" : [ 14 | {"index_name" : "'$index_name'"} 15 | ] 16 | } 17 | }' http://$1/MetaService/meta_manager 18 | 19 | echo -e '\n' 20 | -------------------------------------------------------------------------------- /src/tools/script/drop_instance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | #删除实例 6 | echo -e "drop 实例, 有哪些字段就会更新哪些字段\n" 7 | echo 'param: meta_server_address store_address' 8 | drop_instance=$2 9 | curl -d '{ 10 | "op_type": "OP_DROP_INSTANCE", 11 | "instance": { 12 | "address" : "'$2'", 13 | "physical_room": "njjs" 14 | } 15 | }' http://$1/MetaService/meta_manager 16 | echo -e "\n" 17 | 18 | -------------------------------------------------------------------------------- /src/tools/script/drop_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | #创建namespace 6 | echo -e "创建namespace\n" 7 | curl -d '{ 8 | "op_type":"OP_DROP_NAMESPACE", 9 | "namespace_info":{ 10 | "namespace_name": "TEST" 11 | } 12 | }' http://$1/MetaService/meta_manager 13 | echo -e "\n" 14 | 15 | #查询nemaspace 16 | curl -d '{ 17 | "op_type" : "QUERY_NAMESPACE" 18 | }' http://$1/MetaService/query 19 | echo -e "\n" 20 | 21 | -------------------------------------------------------------------------------- /src/tools/script/drop_region.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | #删除region 7 | echo -e "drop region\n" 8 | curl -d '{ 9 | "op_type":"OP_DROP_REGION", 10 | "drop_region_ids":[2109836,2321095] 11 | }' http://$1/MetaService/meta_manager 12 | echo -e "\n" 13 | 14 | -------------------------------------------------------------------------------- /src/tools/script/drop_table.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | 4 | echo -e "\n" 5 | echo -e "drop table\n" 6 | curl -d '{ 7 | "op_type":"OP_DROP_TABLE", 8 | "table_info": { 9 | "table_name": "clue_history", 10 | "database": "TEST", 11 | "namespace_name": "TEST" 12 | } 13 | }' http://$1/MetaService/meta_manager 14 | echo -e "\n" 15 | 16 | curl -d '{ 17 | "op_type" : "QUERY_SCHEMA" 18 | }' http://$1/MetaService/query 19 | 20 | 21 | curl -d '{ 22 | "op_type":"OP_DROP_TABLE", 23 | "table_info": { 24 | "table_name": "ideacontent_test", 25 | "database": "TEST", 26 | "namespace_name": "TEST" 27 | } 28 | }' http://$1/MetaService/meta_manager 29 | -------------------------------------------------------------------------------- /src/tools/script/gen_done_file.sh: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/tools/script/get_applied_index.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | curl -d '{ 5 | "region_id" : '$2' 6 | }' http://$1/StoreService/get_applied_index 7 | 8 | echo -e '\n' 9 | -------------------------------------------------------------------------------- /src/tools/script/init_meta_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #验证场景:完成的meta_server流程 3 | 4 | echo -e "add logic idc: bj nj hz\n" 5 | curl -s -d '{ 6 | "op_type": "OP_ADD_LOGICAL", 7 | "logical_rooms": { 8 | "logical_rooms" : ["bj", "nj", "gz"] 9 | } 10 | }' http://$1/MetaService/meta_manager 11 | 12 | 13 | #插入物理机房 14 | echo -e "add physical idc: bj nj gz\n" 15 | curl -s -d '{ 16 | "op_type": "OP_ADD_PHYSICAL", 17 | "physical_rooms": { 18 | "logical_room" : "bj", 19 | "physical_rooms" : ["bj"] 20 | } 21 | }' http://$1/MetaService/meta_manager 22 | 23 | curl -s -d '{ 24 | "op_type": "OP_ADD_PHYSICAL", 25 | "physical_rooms": { 26 | "logical_room" : "nj", 27 | "physical_rooms" : ["nj"] 28 | } 29 | }' http://$1/MetaService/meta_manager 30 | 31 | curl -s -d '{ 32 | "op_type": "OP_ADD_PHYSICAL", 33 | "physical_rooms": { 34 | "logical_room" : "gz", 35 | "physical_rooms" : ["gz"] 36 | } 37 | }' http://$1/MetaService/meta_manager 38 | 39 | #查询逻辑机房 40 | echo -e "query logical idc\n" 41 | curl -s -d '{ 42 | "op_type" : "QUERY_LOGICAL" 43 | }' http://$1/MetaService/query 44 | 45 | #查询物理机房 46 | echo -e "\nquery physical idc\n" 47 | curl -s -d '{ 48 | "op_type" : "QUERY_PHYSICAL" 49 | }' http://$1/MetaService/query 50 | 51 | ##查询测试实例 52 | echo -e "\nquery instance\n" 53 | curl -s -d '{ 54 | "op_type" : "QUERY_INSTANCE" 55 | }' http://$1/MetaService/query 56 | 57 | -------------------------------------------------------------------------------- /src/tools/script/meta_query.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-12-15 3 | #作用:目前metaserver提供了7类查询接口,分别用来查询逻辑机房、物理机房、实例信息、权限信息# namespace datable table信息 4 | 5 | echo -e '\n' 6 | echo -e "查询当前系统中某个或全部逻辑机房下有哪些物理机房" 7 | curl -d '{ 8 | "op_type" : "QUERY_LOGICAL", 9 | "logical_room" : "bj" 10 | }' http://$1/MetaService/query 11 | 12 | echo -e '\n' 13 | echo -e "查询某个或全部物理机房信息,包括所属的逻辑机房和物理机房下机器的信息" 14 | curl -d '{ 15 | "op_type" : "QUERY_PHYSICAL", 16 | "physical_room" : "bjyz" 17 | }' http://$1/MetaService/query 18 | 19 | echo -e '\n' 20 | echo -e "查询某个或者全部store信息,如果查询全部,则不显示region的分布信息" 21 | curl -d '{ 22 | "op_type" : "QUERY_INSTANCE", 23 | "instance_address" : "127.0.0.1:8010" 24 | }' http://$1/MetaService/query 25 | 26 | echo -e '\n' 27 | echo -e "查询某个或者全部的用户权限信息" 28 | curl -d '{ 29 | "op_type" : "QUERY_USERPRIVILEG", 30 | "user_name" : "test" 31 | }' http://$1/MetaService/query 32 | 33 | echo -e '\n' 34 | echo -e "查询某个或者全部的namespace" 35 | curl -d '{ 36 | "op_type" : "QUERY_NAMESPACE", 37 | "namespace_name" : "Test" 38 | }' http://$1/MetaService/query 39 | 40 | echo -e '\n' 41 | echo -e "查询某个或者全部的database" 42 | curl -d '{ 43 | "op_type" : "QUERY_DATABASE", 44 | "database" : "TEST" 45 | }' http://$1/MetaService/query 46 | 47 | echo -e '\n' 48 | echo -e "查询某个或者全部的table" 49 | curl -d '{ 50 | "op_type" : "QUERY_SCHEMA", 51 | "table_name" : "ideacontent" 52 | }' http://$1/MetaService/query 53 | 54 | echo -e '\n' 55 | echo -e "查询某个或者全部的region" 56 | curl -d '{ 57 | "op_type" : "QUERY_REGION", 58 | "region_id" : 1 59 | }' http://$1/MetaService/query 60 | -------------------------------------------------------------------------------- /src/tools/script/meta_query_region_ids.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | 4 | echo -e "查看从instance角度和raft角度 store上存储的region的diff" 5 | curl -d '{ 6 | "op_type": "QUERY_REGION_IDS", 7 | "instance_address": "'$2'" 8 | }' http://$1/MetaService/query 9 | echo -e "\n" 10 | 11 | -------------------------------------------------------------------------------- /src/tools/script/meta_query_region_peers_status.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo -e "query region peers status\n" 3 | echo 'param: meta address' 4 | curl -d '{ 5 | "op_type" : "QUERY_REGION_PEER_STATUS" 6 | }' http://$1/MetaService/query 7 | echo -e "\n" 8 | -------------------------------------------------------------------------------- /src/tools/script/meta_region_recovery.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo -e '\n' 4 | echo -e "bad region recovery" 5 | curl -d '{ 6 | "op_type" : "OP_RECOVERY_ALL_REGION", 7 | "resource_tags" : [""] 8 | }' http://$1/MetaService/meta_manager |tee result_`date +%Y_%m_%d_%k_%M_%S` 9 | echo -e '\n' 10 | -------------------------------------------------------------------------------- /src/tools/script/modify_ddlwork.sh: -------------------------------------------------------------------------------- 1 | echo "二级索引" 2 | curl -d '{ 3 | "op_type" : "OP_DELETE_DDLWORK", 4 | "ddlwork_info" : { 5 | "table_id" : '$2' 6 | } 7 | }' http://$1/MetaService/meta_manager 8 | echo -e "\n" 9 | 10 | echo "暂停全局二级索引任务" 11 | curl -d '{ 12 | "op_type" : "OP_SUSPEND_DDL_WORK", 13 | "global_ddl_request" : { 14 | "table_id" : '$2' 15 | } 16 | }' http://$1/MetaService/meta_manager 17 | echo -e "\n" 18 | 19 | echo "重启全局二级索引任务" 20 | curl -d '{ 21 | "op_type" : "OP_RESTART_DDL_WORK", 22 | "global_ddl_request" : { 23 | "table_id" : '$2' 24 | } 25 | }' http://$1/MetaService/meta_manager 26 | echo -e "\n" 27 | 28 | 29 | -------------------------------------------------------------------------------- /src/tools/script/modify_index_status.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2020-09-18 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | echo -e "修改索引状态\n" 7 | echo -e "IHS_NORMAL : 普通索引\n" 8 | echo -e "IHS_DISABLE : 屏蔽索引\n" 9 | curl -d '{ 10 | "op_type":"OP_SET_INDEX_HINT_STATUS", 11 | "table_info": { 12 | "table_name": "TEST", 13 | "database": "TEST", 14 | "namespace_name": "TEST", 15 | "indexs": [ { 16 | "index_name" : "TEST", 17 | "hint_status" : "IHS_DISABLE" 18 | } 19 | ] 20 | 21 | } 22 | }' http://$1/MetaService/meta_manager 23 | echo -e "\n" -------------------------------------------------------------------------------- /src/tools/script/modify_resource_tag.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | #创建table 7 | echo -e "创建table\n" 8 | curl -d '{ 9 | "op_type":"OP_MODIFY_RESOURCE_TAG", 10 | "table_info": { 11 | "table_name": "heartbeat", 12 | "database": "TEST", 13 | "namespace_name": "TEST", 14 | "resource_tag" : "qa" 15 | } 16 | }' http://$1/MetaService/meta_manager 17 | echo -e "\n" 18 | 19 | -------------------------------------------------------------------------------- /src/tools/script/op_close_load_balance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | 4 | echo -e "打开meta server的负载均衡策略,但是切主之后会自动关闭" 5 | curl -d '{ 6 | "op_type": "OP_CLOSE_LOAD_BALANCE", 7 | "resource_tags": ["cip-yz"] 8 | }' http://$1/MetaService/meta_manager 9 | echo -e "\n" 10 | 11 | -------------------------------------------------------------------------------- /src/tools/script/op_open_load_balance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | 4 | echo -e "打开meta server的负载均衡策略,但是切主之后会自动关闭" 5 | curl -d '{ 6 | "op_type": "OP_OPEN_LOAD_BALANCE", 7 | "resource_tags": ["cip-yz"] 8 | }' http://$1/MetaService/meta_manager 9 | echo -e "\n" 10 | 11 | -------------------------------------------------------------------------------- /src/tools/script/op_unsafe_decision.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | 4 | echo -e "打开log_index是0 删除region 和 region_id所在的table id不存在删除region开关" 5 | curl -d '{ 6 | "op_type": "OP_OPEN_UNSAFE_DECISION" 7 | }' http://$1/MetaService/meta_manager 8 | echo -e "\n" 9 | 10 | -------------------------------------------------------------------------------- /src/tools/script/query_ddlwork.sh: -------------------------------------------------------------------------------- 1 | curl -d '{ 2 | "op_type" : "QUERY_DDLWORK", 3 | "table_id" : '$2' 4 | }' http://$1/MetaService/query 5 | echo -e "\n" 6 | -------------------------------------------------------------------------------- /src/tools/script/query_diff_region_ids.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | 4 | echo -e "查看从instance角度和raft角度 store上存储的region的diff" 5 | curl -d '{ 6 | "op_type": "QUERY_DIFF_REGION_IDS", 7 | "instance_address": "'$2'" 8 | }' http://$1/MetaService/query 9 | echo -e "\n" 10 | 11 | -------------------------------------------------------------------------------- /src/tools/script/query_faulty_instance.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: UTF-8 -*- 3 | 4 | import re 5 | import json 6 | import requests 7 | 8 | def query_faulty_instance(hosts): 9 | ''' 10 | 查询状态不正常的store 11 | ''' 12 | data = {'op_type': 'QUERY_INSTANCE_FLATTEN'} 13 | url = 'http://' + hosts + '/MetaService/query' 14 | baikal_res = requests.post(url, data=json.dumps(data)) 15 | normal_instance = [] 16 | faulty_instance = [] 17 | if baikal_res.status_code == 200: 18 | for item in baikal_res.json()['flatten_instances']: 19 | if item['status'] == 'NORMAL': 20 | normal_instance.append(item['address']) 21 | else: 22 | faulty_instance.append(item['address']) 23 | regex = re.compile('baikalStore') 24 | instance_list = [] 25 | for host in faulty_instance: 26 | ip, _ = host.split(':') 27 | url = 'http://api.matrix.baidu.com/api/v1/matrix/host/' + ip 28 | matrix_res = requests.get(url) 29 | if baikal_res.status_code == 200: 30 | for instance in matrix_res.json()['instances']: 31 | if re.search(regex, instance): 32 | instance_list.append(instance) 33 | print instance 34 | 35 | if __name__ == '__main__': 36 | query_faulty_instance('10.152.70.12:8110') 37 | -------------------------------------------------------------------------------- /src/tools/script/remove_privilege.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | 4 | #删除权限 5 | curl -d '{ 6 | "op_type":"OP_DROP_PRIVILEGE", 7 | "user_privilege" : { 8 | "username" : "******", 9 | "password" : "******", 10 | "namespace_name" : "TEST", 11 | "privilege_table" : [{ 12 | "database" : "TEST", 13 | "table_name" : "wordinfo" 14 | }] 15 | } 16 | }' http://$1/MetaService/meta_manager 17 | echo -e "\n" 18 | 19 | #查询权限 20 | curl -d '{ 21 | "op_type" : "QUERY_USERPRIVILEG" 22 | }' http://$1/MetaService/query 23 | echo -e "\n" 24 | -------------------------------------------------------------------------------- /src/tools/script/remove_region.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:在store删除无用的region 4 | 5 | echo -e "remove region to store" 6 | echo 'param: address region_id' 7 | region_id=$2 8 | curl -d '{ 9 | "region_id": '$region_id', 10 | "force": true 11 | }' http://$1/StoreService/remove_region 12 | echo -e "\n" 13 | 14 | 15 | -------------------------------------------------------------------------------- /src/tools/script/rename_field.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | #创建table 7 | echo -e "修改列名\n" 8 | curl -d '{ 9 | "op_type":"OP_RENAME_FIELD", 10 | "table_info": { 11 | "table_name": "wordinfo_new", 12 | "database": "TEST", 13 | "namespace_name": "TEST", 14 | "fields": [ { 15 | "field_name" : "wordid", 16 | "new_field_name": "id" 17 | }, 18 | { 19 | "field_name" : "showword", 20 | "new_field_name": "literal" 21 | } 22 | ] 23 | } 24 | }' http://$1/MetaService/meta_manager 25 | echo -e "\n" 26 | 27 | #查询table 28 | curl -d '{ 29 | "op_type" : "QUERY_SCHEMA" 30 | }' http://$1/MetaService/query 31 | echo -e "\n" 32 | 33 | -------------------------------------------------------------------------------- /src/tools/script/rename_table.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | #创建table 7 | echo -e "rename table\n" 8 | curl -d '{ 9 | "op_type":"OP_RENAME_TABLE", 10 | "table_info": { 11 | "table_name": "wordinfo", 12 | "new_table_name": "wordinfo_new", 13 | "database": "TEST", 14 | "namespace_name": "TEST" 15 | } 16 | }' http://$1/MetaService/meta_manager 17 | echo -e "\n" 18 | 19 | #查询table 20 | curl -d '{ 21 | "op_type" : "QUERY_SCHEMA" 22 | }' http://$1/MetaService/query 23 | -------------------------------------------------------------------------------- /src/tools/script/restore_region.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | echo -e "restore region through lower and upper region_id" 7 | curl -d '{ 8 | "op_type":"OP_RESTORE_REGION", 9 | "restore_region": { 10 | "restore_region_id":'$2' 11 | } 12 | }' http://$1/MetaService/meta_manager 13 | echo -e "\n" 14 | 15 | 16 | -------------------------------------------------------------------------------- /src/tools/script/restore_table.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #用来恢复误删的表 3 | echo -e "\n" 4 | curl -d '{ 5 | "op_type":"OP_RESTORE_TABLE", 6 | "table_info": { 7 | "table_name": "test", 8 | "database": "TEST", 9 | "namespace_name": "TEST" 10 | } 11 | }' http://$1/MetaService/meta_manager 12 | echo -e "\n" 13 | 14 | -------------------------------------------------------------------------------- /src/tools/script/rollback_txn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | table_id=$2 4 | 5 | curl -d '{ 6 | "region_ids" : [], 7 | "clear_all_txns" : true, 8 | "table_id" : '$table_id', 9 | "txn_timeout" : 100 10 | }' http://$1/StoreService/query_region 11 | 12 | echo -e '\n' 13 | -------------------------------------------------------------------------------- /src/tools/script/send_no_op.sh: -------------------------------------------------------------------------------- 1 | #Created on 2017-12-23 2 | #测试场景: region_raft_control 3 | 4 | echo -e "send no_op\n" 5 | echo 'param: address region_id region_version' 6 | region_id=$2 7 | region_version=$3 8 | curl -d '{ 9 | "op_type" : "OP_NONE", 10 | "region_id" : '$region_id', 11 | "region_version" : '$region_version' 12 | }' http://$1/StoreService/query 13 | 14 | 15 | -------------------------------------------------------------------------------- /src/tools/script/set_instance_dead.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:在store删除无用的region 4 | 5 | echo -e "set instance dead, 迁移走所有的region" 6 | echo -e "使用前提是该store不再上报心跳" 7 | echo 'param: meta_server_address, store_address' 8 | store=$2 9 | curl -d '{ 10 | "op_type": "OP_SET_INSTANCE_MIGRATE", 11 | "instance": { 12 | "address" : "'$store'" 13 | } 14 | }' http://$1/MetaService/meta_manager 15 | echo -e "\n" 16 | 17 | 18 | -------------------------------------------------------------------------------- /src/tools/script/show_processlist.sh: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/tools/script/split_region.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | echo -e "split region, 分配一个region_id\n" 7 | curl -d '{ 8 | "op_type":"OP_SPLIT_REGION", 9 | "region_split": { 10 | "region_id": 1, 11 | "split_key": "not used" 12 | } 13 | }' http://$1/MetaService/meta_manager 14 | echo -e "\n" 15 | 16 | 17 | -------------------------------------------------------------------------------- /src/tools/script/sql/add_privilege.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | # handle add_privilege DBname WRITE 6 | 7 | #增加权限 8 | curl -d '{ 9 | "op_type":"OP_ADD_PRIVILEGE", 10 | "user_privilege" : { 11 | "username" : "baikal_user", 12 | "password" : "59ACp2Bl#", 13 | "namespace_name" : "FENGCHAO", 14 | "privilege_database" : [{ 15 | "database" : "BAIKALDB_ONES_CENTER", 16 | "database_rw" : 2 17 | }] 18 | } 19 | }' http://$1/MetaService/meta_manager 20 | echo -e "\n" 21 | 22 | #查询权限 23 | curl -d '{ 24 | "op_type" : "QUERY_USERPRIVILEG" 25 | }' http://$1/MetaService/query 26 | echo -e "\n" 27 | -------------------------------------------------------------------------------- /src/tools/script/sql/create_database.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | 6 | #创建database 7 | echo -e "创建database\n" 8 | curl -d '{ 9 | "op_type":"OP_CREATE_DATABASE", 10 | "database_info": { 11 | "database":"Tinder_Vector", 12 | "namespace_name":"FENGCHAO", 13 | "quota": 524288 14 | } 15 | }' http://$1/MetaService/meta_manager 16 | echo -e "\n" 17 | -------------------------------------------------------------------------------- /src/tools/script/sql/create_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | #创建namespace 6 | echo -e "创建namespace\n" 7 | curl -d '{ 8 | "op_type":"OP_CREATE_NAMESPACE", 9 | "namespace_info":{ 10 | "namespace_name": "MALL", 11 | "quota": 1048576 12 | } 13 | }' http://$1/MetaService/meta_manager 14 | echo -e "\n" 15 | 16 | #查询namespace 17 | curl -d '{ 18 | "op_type" : "QUERY_NAMESPACE" 19 | }' http://$1/MetaService/query 20 | echo -e "\n" 21 | -------------------------------------------------------------------------------- /src/tools/script/sql/create_user.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | # handle add_user namespace username password json 6 | 7 | #创建用户 8 | curl -d '{ 9 | "op_type":"OP_CREATE_USER", 10 | "user_privilege" : { 11 | "username" : "baikal_user", 12 | "password" : "59ACp2Bl#", 13 | "namespace_name" : "FENGCHAO", 14 | "privilege_database" : [{ 15 | "database" : "BAIKALDB_ONES_CENTER", 16 | "database_rw" : 2 17 | }], 18 | "bns":["preonline", "offline"], 19 | "ip":["127.0.0.1", "127.0.0.2"] 20 | } 21 | }' http://$1/MetaService/meta_manager 22 | echo -e "\n" 23 | 24 | #查询权限 25 | curl -d '{ 26 | "op_type" : "QUERY_USERPRIVILEG" 27 | }' http://$1/MetaService/query 28 | echo -e "\n" 29 | -------------------------------------------------------------------------------- /src/tools/script/sql/drop_instance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | # handle drop_instance physical_room address 6 | 7 | #删除实例 8 | echo -e "drop 实例, 有哪些字段就会更新哪些字段\n" 9 | echo 'param: meta_server_address store_address' 10 | drop_instance=$2 11 | curl -d '{ 12 | "op_type": "OP_DROP_INSTANCE", 13 | "instance": { 14 | "address" : "'$2'", 15 | "physical_room": "njjs" 16 | } 17 | }' http://$1/MetaService/meta_manager 18 | echo -e "\n" 19 | -------------------------------------------------------------------------------- /src/tools/script/sql/drop_region.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | # handle drop_region 1 2 3 6 | 7 | echo -e "\n" 8 | #删除region 9 | echo -e "drop region\n" 10 | curl -d '{ 11 | "op_type":"OP_DROP_REGION", 12 | "drop_region_ids":['$2'] 13 | }' http://$1/MetaService/meta_manager 14 | echo -e "\n" 15 | 16 | -------------------------------------------------------------------------------- /src/tools/script/sql/link_binlog.sh: -------------------------------------------------------------------------------- 1 | # handle link_binlog srcNamespace srcDB srcTable binlogNamespace binlogDB binlogTable 2 | 3 | curl -d '{ 4 | "op_type":"OP_LINK_BINLOG", 5 | "table_info": { 6 | "table_name": "'$4'", 7 | "database": "'$3'", 8 | "namespace_name": "'$2'" 9 | }, 10 | "binlog_info": { 11 | "table_name": "'$6'", 12 | "database": "'$5'", 13 | "namespace_name": "'$2'" 14 | } 15 | }' http://$1/MetaService/meta_manager 16 | -------------------------------------------------------------------------------- /src/tools/script/sql/meta_region_recovery.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # handle restore region recoveryType resourceTag 4 | 5 | echo -e '\n' 6 | echo -e "bad region recovery" 7 | curl -d '{ 8 | "op_type" : "OP_RECOVERY_ALL_REGION", 9 | "resource_tags" : ["qa","qadisk"] 10 | }' http://$1/MetaService/meta_manager |tee result_`date +%Y_%m_%d_%k_%M_%S` 11 | echo -e '\n' 12 | -------------------------------------------------------------------------------- /src/tools/script/sql/modify_ddlwork.sh: -------------------------------------------------------------------------------- 1 | echo "二级索引" 2 | 3 | # handle delete_ddl TableID global/local 4 | # handle suspend_ddl TableID 5 | # handle restart_ddl TableID 6 | 7 | curl -d '{ 8 | "op_type" : "OP_DELETE_DDLWORK", 9 | "ddlwork_info" : { 10 | "table_id" : '$2' 11 | } 12 | }' http://$1/MetaService/meta_manager 13 | echo -e "\n" 14 | 15 | echo "暂停二级索引任务" 16 | curl -d '{ 17 | "op_type" : "OP_SUSPEND_DDL_WORK", 18 | "index_ddl_request" : { 19 | "table_id" : '$2' 20 | } 21 | }' http://$1/MetaService/meta_manager 22 | echo -e "\n" 23 | 24 | echo "重启二级索引任务" 25 | curl -d '{ 26 | "op_type" : "OP_RESTART_DDL_WORK", 27 | "index_ddl_request" : { 28 | "table_id" : '$2' 29 | } 30 | }' http://$1/MetaService/meta_manager 31 | echo -e "\n" 32 | 33 | 34 | -------------------------------------------------------------------------------- /src/tools/script/sql/modify_resource_tag.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | # handle table_resource_tag TableName NewResourceTag 6 | 7 | echo -e "\n" 8 | #创建table 9 | echo -e "创建table\n" 10 | curl -d '{ 11 | "op_type":"OP_MODIFY_RESOURCE_TAG", 12 | "table_info": { 13 | "table_name": "heartbeat", 14 | "database": "TEST", 15 | "namespace_name": "TEST", 16 | "resource_tag" : "qa" 17 | } 18 | }' http://$1/MetaService/meta_manager 19 | echo -e "\n" 20 | 21 | -------------------------------------------------------------------------------- /src/tools/script/sql/op_close_load_balance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # sh op_close_load_balance.sh 10.1.1.1:8000 resource_tag 3 | 4 | # handle load_balance ResourceTag open/close 5 | # handle migrate ResourceTag open/close 6 | 7 | # OP_CLOSE_LOAD_BALANCE 负载均衡 8 | # OP_CLOSE_MIGRATE 故障迁移 9 | curl -d '{ 10 | "op_type": "OP_CLOSE_LOAD_BALANCE", 11 | "resource_tags": ["'$2'"] 12 | }' http://$1/MetaService/meta_manager 13 | echo -e "\n" 14 | 15 | curl -d '{ 16 | "op_type": "OP_CLOSE_MIGRATE", 17 | "resource_tags": ["'$2'"] 18 | }' http://$1/MetaService/meta_manager 19 | echo -e "\n" 20 | 21 | -------------------------------------------------------------------------------- /src/tools/script/sql/op_open_load_balance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | 4 | # handle load_balance ResourceTag open/close 5 | # handle migrate ResourceTag open/close 6 | 7 | # OP_OPEN_LOAD_BALANCE 负载均衡 8 | # OP_OPEN_MIGRATE 故障迁移 9 | echo -e "打开meta server的负载均衡策略,但是切主之后会自动关闭" 10 | curl -d '{ 11 | "op_type": "OP_OPEN_MIGRATE", 12 | "resource_tags": ["'$2'"] 13 | }' http://$1/MetaService/meta_manager 14 | echo -e "\n" 15 | 16 | curl -d '{ 17 | "op_type": "OP_OPEN_LOAD_BALANCE", 18 | "resource_tags": ["'$2'"] 19 | }' http://$1/MetaService/meta_manager 20 | echo -e "\n" 21 | 22 | -------------------------------------------------------------------------------- /src/tools/script/sql/query_ddlwork.sh: -------------------------------------------------------------------------------- 1 | 2 | # show ddlwork TableID (region) 3 | 4 | curl -d '{ 5 | "op_type" : "QUERY_DDLWORK", 6 | "table_id" : '$2' 7 | }' http://$1/MetaService/query 8 | echo -e "\n" 9 | -------------------------------------------------------------------------------- /src/tools/script/sql/remove_privilege.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | 4 | # handle rm_privilege DB [table]; 5 | 6 | #删除权限 7 | curl -d '{ 8 | "op_type":"OP_DROP_PRIVILEGE", 9 | "user_privilege" : { 10 | "username" : "******", 11 | "password" : "******", 12 | "namespace_name" : "TEST", 13 | "privilege_table" : [{ 14 | "database" : "TEST", 15 | "table_name" : "wordinfo" 16 | }] 17 | } 18 | }' http://$1/MetaService/meta_manager 19 | echo -e "\n" 20 | 21 | #查询权限 22 | curl -d '{ 23 | "op_type" : "QUERY_USERPRIVILEG" 24 | }' http://$1/MetaService/query 25 | echo -e "\n" 26 | -------------------------------------------------------------------------------- /src/tools/script/sql/remove_region.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:在store删除无用的region 4 | 5 | # handle store_rm_region storeAddress regionID (no_delay) (force) 6 | 7 | echo -e "remove region to store" 8 | echo 'param: address region_id' 9 | region_id=$2 10 | curl -d '{ 11 | "region_id": '$region_id', 12 | "need_delay_drop":true, 13 | "force": true 14 | }' http://$1/StoreService/remove_region 15 | echo -e "\n" 16 | 17 | 18 | -------------------------------------------------------------------------------- /src/tools/script/sql/set_instance_dead.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:在store删除无用的region 4 | 5 | # handle migrate_instance InstanceAddress 6 | 7 | echo -e "set instance dead, 迁移走所有的region" 8 | echo -e "使用前提是该store不再上报心跳" 9 | echo 'param: meta_server_address, store_address' 10 | store=$2 11 | curl -d '{ 12 | "op_type": "OP_SET_INSTANCE_MIGRATE", 13 | "instance": { 14 | "address" : "'$store'" 15 | } 16 | }' http://$1/MetaService/meta_manager 17 | echo -e "\n" 18 | -------------------------------------------------------------------------------- /src/tools/script/sql/split_region.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | # handle split_region regionID splitKey 6 | 7 | echo -e "\n" 8 | echo -e "split region, 分配一个region_id\n" 9 | curl -d '{ 10 | "op_type":"OP_SPLIT_REGION", 11 | "region_split": { 12 | "region_id": 1, 13 | "split_key": "not used" 14 | } 15 | }' http://$1/MetaService/meta_manager 16 | echo -e "\n" 17 | 18 | 19 | -------------------------------------------------------------------------------- /src/tools/script/sql/store_add_peer.sh: -------------------------------------------------------------------------------- 1 | #Created on 2017-12-23 2 | #测试场景: region_raft_control 3 | 4 | # handle store_add_peer leaderAddress tableID regionID newPeerAddress 5 | 6 | echo -e "sender to leader to add_peer\n, 1 init region 2 add_peer" 7 | echo 'param: address region_id' 8 | region_id=$2 9 | curl -d '{ 10 | "region_id" : '$region_id', 11 | "old_peers" : ["127.0.0.1:8222", "127.0.0.1:8222", "127.0.0.1:8222"], 12 | "new_peers" : ["127.0.0.1:8222", "127.0.0.1:8222", "127.0.0.1:8222", "127.0.0.1:8222"] 13 | }' http://$1/StoreService/add_peer 14 | 15 | 16 | -------------------------------------------------------------------------------- /src/tools/script/sql/store_compact_region.sh: -------------------------------------------------------------------------------- 1 | #Created on 2017-12-23 2 | #测试场景: 主动调用做compaction, 不传region_id, 代表整个store做compact 3 | 4 | # handle store_compact_region type(data, meta, raft_log) RegionID 5 | 6 | echo -e "compact_region\n" 7 | echo 'param: address' 8 | curl -d '{ 9 | "compact_raft_log": false 10 | }' http://$1/StoreService/compact_region 11 | echo -e "\n" 12 | 13 | -------------------------------------------------------------------------------- /src/tools/script/sql/store_query_region.sh: -------------------------------------------------------------------------------- 1 | #Created on 2017-12-23 2 | #测试场景: 不传region_id, 返回整个机房的region信息 3 | 4 | # show store region regionID 5 | 6 | echo -e "query_region\n" 7 | echo 'param: address' 8 | curl -d '{ 9 | "region_ids":[9525031,9526477] 10 | }' http://$1/StoreService/query_region 11 | echo -e "\n" 12 | 13 | 14 | -------------------------------------------------------------------------------- /src/tools/script/sql/store_query_txn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 查看region所属的txn状态 3 | 4 | # show store_txn storeAddress regionID region_version 5 | 6 | echo -e "query region txns state" 7 | echo 'param: address region_id' 8 | region_id=$2 9 | curl -d '{ 10 | "op_type" : "OP_TXN_QUERY_STATE", 11 | "region_id": '$region_id', 12 | "region_version": 1, 13 | "force": true 14 | }' http://$1/StoreService/query 15 | echo -e "\n" 16 | -------------------------------------------------------------------------------- /src/tools/script/sql/store_remove_peer.sh: -------------------------------------------------------------------------------- 1 | #Created on 2017-12-23 2 | #测试场景: region_raft_control 3 | 4 | # handle store_rm_peer leaderAddress tableID regionID rmPeerAddress 5 | 6 | #只有在系统崩溃的情况下可用,正常情况下不起作用 7 | echo -e "RemovePeer\n" 8 | echo 'param: address' 9 | curl -d '{ 10 | "op_type" : "SetPeer", 11 | "region_id" : 2260710, 12 | "old_peers" : ["127.0.0.1:8219", " 127.0.0.1:8219", "127.0.0.1:8219"], 13 | "new_peers" : ["127.0.0.1:8219", " 127.0.0.1:8219"] 14 | }' http://$1/StoreService/region_raft_control 15 | echo -e "\n" 16 | 17 | 18 | -------------------------------------------------------------------------------- /src/tools/script/sql/store_set_peer.sh: -------------------------------------------------------------------------------- 1 | #Created on 2017-12-23 2 | #测试场景: region_raft_control 3 | 4 | # handle store_add_peer leaderAddress tableID regionID address 5 | # handle store_rm_peer leaderAddress tableID regionID address 6 | 7 | #只有在系统崩溃的情况下可用,正常情况下不起作用 8 | echo -e "Force set peer\n" 9 | echo 'param: address region_id new_peer' 10 | region_id=$2 11 | new_peer=$1 12 | curl -d '{ 13 | "op_type" : "SetPeer", 14 | "region_id" : '$region_id', 15 | "new_peers" : ["'$new_peer'"], 16 | "force" : true 17 | }' http://$1/StoreService/region_raft_control 18 | 19 | 20 | -------------------------------------------------------------------------------- /src/tools/script/sql/update_binlog.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2020-6-22 3 | 4 | # handle link_binlog srcNamespace srcDB srcTable binlogNamespace binlogDB binlogTable 5 | 6 | echo -e "\n" 7 | curl -d '{ 8 | "op_type":"OP_UNLINK_BINLOG", 9 | "table_info": { 10 | "table_name": "tuuu", 11 | "database": "Feed", 12 | "namespace_name": "FENGCHAO" 13 | }, 14 | "binlog_info": { 15 | "table_name": "tt", 16 | "database": "Feed", 17 | "namespace_name": "FENGCHAO" 18 | } 19 | }' http://$1/MetaService/meta_manager 20 | echo -e "\n" 21 | -------------------------------------------------------------------------------- /src/tools/script/sql/update_dists.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | # handle update_dists TableName 6 | # '{ 7 | # "replica_num": 3, 8 | # "main_logical_room" : "bj", 9 | # "dists": [ 10 | # { 11 | # "logical_room": "bj", 12 | # "count" : 3 13 | # }] 14 | #}' 15 | 16 | echo -e "\n" 17 | #创建table 18 | echo -e "update replica dists\n" 19 | curl -d '{ 20 | "op_type":"OP_UPDATE_DISTS", 21 | "table_info": { 22 | "table_name": "'$2'", 23 | "database": "FC_Word", 24 | "namespace_name": "FENGCHAO", 25 | "replica_num": 3, 26 | "main_logical_room" : "bd", 27 | "dists": [ 28 | { 29 | "logical_room": "bd", 30 | "count" : 2 31 | }, 32 | { 33 | "logical_room": "bj", 34 | "count" : 1 35 | } 36 | ] 37 | } 38 | }' http://$1/MetaService/meta_manager 39 | echo -e "\n" 40 | 41 | -------------------------------------------------------------------------------- /src/tools/script/sql/update_instance_param.sh: -------------------------------------------------------------------------------- 1 | 2 | # handle instance_param resource_tag_or_address key value (is_meta, 默认false) 3 | 4 | #插入物理机房 5 | echo -e "update instance param\n" 6 | curl -d '{ 7 | "op_type": "OP_UPDATE_INSTANCE_PARAM", 8 | "instance_params" : [{ 9 | "resource_tag_or_address" : "qa", 10 | "params" : [ 11 | { 12 | "key" : "use_token_bucket", 13 | "value" : "1" 14 | }, 15 | { 16 | "key" : "get_token_weight", 17 | "value" : "5" 18 | }, 19 | { 20 | "key" : "token_bucket_burst_window_ms", 21 | "value" : "10" 22 | }, 23 | { 24 | "key" : "token_num_acquired_each_time", 25 | "value" : "0" 26 | }, 27 | { 28 | "key" : "max_tokens_per_second", 29 | "value" : "100000" 30 | }, 31 | { 32 | "key" : "sql_extended_burst_percent", 33 | "value" : "80" 34 | } 35 | ] 36 | }] 37 | }' http://$1/MetaService/meta_manager 38 | echo -e "\n" 39 | 40 | curl -d '{ 41 | "op_type": "QUERY_INSTANCE_PARAM", 42 | "instance_address": "127.0.0.1:8011" 43 | }' http://$1/MetaService/query 44 | -------------------------------------------------------------------------------- /src/tools/script/sql/update_main_logical_room.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # handle main_logical_room Table MainLogicalRoom 4 | 5 | echo -e "更新表主机房" 6 | curl -d '{ 7 | "op_type": "OP_UPDATE_MAIN_LOGICAL_ROOM", 8 | "table_info": { 9 | "table_name": "t1", 10 | "database": "TEST", 11 | "namespace_name": "TEST", 12 | "main_logical_room": "bj" 13 | } 14 | }' http://$1/MetaService/meta_manager 15 | echo -e "\n" 16 | -------------------------------------------------------------------------------- /src/tools/script/sql/update_resource_tag.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | # handle table_resource_tag TableName NewResourceTag 6 | 7 | echo -e "\n" 8 | #创建table 9 | echo -e "创建table\n" 10 | curl -d '{ 11 | "op_type":"OP_MODIFY_RESOURCE_TAG", 12 | "table_info": { 13 | "table_name": "'$3'", 14 | "database": "'$2'", 15 | "namespace_name": "DMP", 16 | "resource_tag" : "DMPxinghe-yq" 17 | } 18 | }' http://$1/MetaService/meta_manager 19 | echo -e "\n" 20 | 21 | -------------------------------------------------------------------------------- /src/tools/script/sql/update_schema_conf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 打开、关闭计算存储分离 3 | # 使用方法: sh update_separate_switch.sh 10.1.1.1:8111 NAMESPACE DATABASE TABLE conf false/true 4 | 5 | echo -e "\n" 6 | curl -d '{ 7 | "op_type":"OP_UPDATE_SCHEMA_CONF", 8 | "table_info": { 9 | "table_name": "'$4'", 10 | "database": "'$3'", 11 | "namespace_name": "'$2'", 12 | "schema_conf":{ 13 | "'$5'": '$6' 14 | } 15 | } 16 | }' http://$1/MetaService/meta_manager 17 | echo -e "\n" 18 | 19 | -------------------------------------------------------------------------------- /src/tools/script/sql/update_split_lines.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 更新split_lines 3 | # 使用方法: sh update_split_lines.sh 10.1.1.1:8111 NAMESPACE DATABASE TABLE 1000000 4 | 5 | # handle split_lines Table SplitLines 6 | 7 | echo -e "\n" 8 | #创建table 9 | curl -d '{ 10 | "op_type":"OP_UPDATE_SPLIT_LINES", 11 | "table_info": { 12 | "table_name": "'$4'", 13 | "database": "'$3'", 14 | "namespace_name": "'$2'", 15 | "region_split_lines": '$5' 16 | } 17 | }' http://$1/MetaService/meta_manager 18 | echo -e "\n" 19 | 20 | -------------------------------------------------------------------------------- /src/tools/script/sql/update_ttl_duration.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 更新update_ttl_duration 3 | # 使用方法: sh update_ttl_duration.sh 10.1.1.1:8111 NAMESPACE DATABASE TABLE 1000000 4 | 5 | # handle ttl_duration Table ttl_duration 6 | 7 | echo -e "\n" 8 | #创建table 9 | curl -d '{ 10 | "op_type":"OP_UPDATE_TTL_DURATION", 11 | "table_info": { 12 | "table_name": "'$4'", 13 | "database": "'$3'", 14 | "namespace_name": "'$2'", 15 | "ttl_duration": '$5' 16 | } 17 | }' http://$1/MetaService/meta_manager 18 | echo -e "\n" 19 | 20 | -------------------------------------------------------------------------------- /src/tools/script/store_add_peer.sh: -------------------------------------------------------------------------------- 1 | #Created on 2017-12-23 2 | #测试场景: region_raft_control 3 | 4 | echo -e "sender to leader to add_peer\n, 1 init region 2 add_peer" 5 | echo 'param: address region_id' 6 | region_id=$2 7 | curl -d '{ 8 | "region_id" : '$region_id', 9 | "old_peers" : ["127.0.0.1:8222", "127.0.0.1:8222", "127.0.0.1:8222"], 10 | "new_peers" : ["127.0.0.1:8222", "127.0.0.1:8222", "127.0.0.1:8222", "127.0.0.1:8222"] 11 | }' http://$1/StoreService/add_peer 12 | 13 | 14 | -------------------------------------------------------------------------------- /src/tools/script/store_compact_region.sh: -------------------------------------------------------------------------------- 1 | #Created on 2017-12-23 2 | #测试场景: 主动调用做compaction, 不传region_id, 代表整个store做compact 3 | 4 | echo -e "compact_region\n" 5 | echo 'param: address' 6 | curl -d '{ 7 | "region_id":[200504], 8 | "compact_raft_log": false 9 | }' http://$1/StoreService/compact_region 10 | echo -e "\n" 11 | 12 | 13 | -------------------------------------------------------------------------------- /src/tools/script/store_query_illegal_region.sh: -------------------------------------------------------------------------------- 1 | #Created on 2017-12-23 2 | #测试场景: 不传region_id, 返回整个机房的region信息中leader是0.0.0.0的region 3 | 4 | echo -e "query_region\n" 5 | echo 'param: address' 6 | curl -d '{ 7 | }' http://$1/StoreService/query_illegal_region 8 | echo -e "\n" 9 | 10 | 11 | -------------------------------------------------------------------------------- /src/tools/script/store_query_region.sh: -------------------------------------------------------------------------------- 1 | #Created on 2017-12-23 2 | #测试场景: 不传region_id, 返回整个机房的region信息 3 | 4 | echo -e "query_region\n" 5 | echo 'param: address' 6 | curl -d '{ 7 | }' http://$1/StoreService/query_region 8 | echo -e "\n" 9 | 10 | 11 | -------------------------------------------------------------------------------- /src/tools/script/store_query_txn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 查看region所属的txn状态 3 | echo -e "query region txns state" 4 | echo 'param: address region_id' 5 | region_id=$2 6 | curl -d '{ 7 | "op_type" : "OP_TXN_QUERY_STATE", 8 | "region_id": '$region_id', 9 | "region_version": 1, 10 | "force": true 11 | }' http://$1/StoreService/query 12 | echo -e "\n" 13 | -------------------------------------------------------------------------------- /src/tools/script/store_remove_peer.sh: -------------------------------------------------------------------------------- 1 | #Created on 2017-12-23 2 | #测试场景: region_raft_control 3 | 4 | #只有在系统崩溃的情况下可用,正常情况下不起作用 5 | echo -e "RemovePeer\n" 6 | echo 'param: address' 7 | curl -d '{ 8 | "op_type" : "SetPeer", 9 | "region_id" : 2260710, 10 | "old_peers" : ["127.0.0.1:8219", " 127.0.0.1:8219", "127.0.0.1:8219"], 11 | "new_peers" : ["127.0.0.1:8219", " 127.0.0.1:8219"] 12 | }' http://$1/StoreService/region_raft_control 13 | echo -e "\n" 14 | 15 | 16 | -------------------------------------------------------------------------------- /src/tools/script/store_restore_region.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | 4 | echo 'param: address region_id' 5 | curl -d '{ 6 | "force": true 7 | }' http://$1/StoreService/restore_region 8 | echo -e "\n" 9 | 10 | 11 | -------------------------------------------------------------------------------- /src/tools/script/store_rm_txn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 查看region所属的txn状态 3 | 4 | # show store_txn storeAddress regionID region_version 5 | 6 | echo -e "query region txns state" 7 | echo 'param: address region_id' 8 | region_id=$2 9 | curl -d '{ 10 | "op_type" : "OP_TXN_COMPLETE", 11 | "region_id": '$region_id', 12 | "region_version": 0, 13 | "rollback_txn_ids": ['$3'], 14 | "force": true 15 | }' http://$1/StoreService/query 16 | echo -e "\n" 17 | -------------------------------------------------------------------------------- /src/tools/script/store_set_peer.sh: -------------------------------------------------------------------------------- 1 | #Created on 2017-12-23 2 | #测试场景: region_raft_control 3 | 4 | #只有在系统崩溃的情况下可用,正常情况下不起作用 5 | echo -e "Force set peer\n" 6 | echo 'param: address region_id new_peer' 7 | region_id=$2 8 | new_peer=$1 9 | curl -d '{ 10 | "op_type" : "SetPeer", 11 | "region_id" : '$region_id', 12 | "new_peers" : ["'$new_peer'"], 13 | "force" : true 14 | }' http://$1/StoreService/region_raft_control 15 | 16 | 17 | -------------------------------------------------------------------------------- /src/tools/script/store_snapshot_region.sh: -------------------------------------------------------------------------------- 1 | #Created on 2017-12-23 2 | #测试场景: 主动调用做snapshot, 不传region_id, 代表整个store做snapshot 3 | 4 | echo -e "snapshot_region\n" 5 | echo 'param: address' 6 | curl -d '{ 7 | }' http://$1/StoreService/snapshot_region 8 | echo -e "\n" 9 | 10 | 11 | -------------------------------------------------------------------------------- /src/tools/script/store_split_region.sh: -------------------------------------------------------------------------------- 1 | #Created on 2017-12-23 2 | #测试场景: 主动调用做snapshot, 不传region_id, 代表整个store做snapshot 3 | 4 | echo -e "snapshot_region\n" 5 | echo 'param: address' 6 | curl -d '{ 7 | "region_ids":[585] 8 | }' http://$1/StoreService/manual_split_region 9 | echo -e "\n" 10 | 11 | 12 | -------------------------------------------------------------------------------- /src/tools/script/update_backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | #bool need_merge 5 | #bool storage_compute_separate 6 | #bool select_index_by_cost 7 | #optional float filter_ratio 8 | 9 | echo -e "\n" 10 | curl -d '{ 11 | "op_type":"OP_UPDATE_SCHEMA_CONF", 12 | "table_info": { 13 | "table_name": "tb_online_html", 14 | "database": "jz_b_render", 15 | "namespace_name": "FENGCHAO", 16 | "schema_conf":{ 17 | "backup_table": "BT_AUTO" 18 | } 19 | } 20 | }' http://$1/MetaService/meta_manager 21 | echo -e "\n" 22 | 23 | -------------------------------------------------------------------------------- /src/tools/script/update_binlog.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2020-6-22 3 | 4 | echo -e "\n" 5 | curl -d '{ 6 | "op_type":"OP_UNLINK_BINLOG", 7 | "table_info": { 8 | "table_name": "tuuu", 9 | "database": "Feed", 10 | "namespace_name": "FENGCHAO" 11 | }, 12 | "binlog_info": { 13 | "table_name": "tt", 14 | "database": "Feed", 15 | "namespace_name": "FENGCHAO" 16 | } 17 | }' http://$1/MetaService/meta_manager 18 | echo -e "\n" 19 | -------------------------------------------------------------------------------- /src/tools/script/update_byte_size.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | #创建table 7 | echo -e "update byte_size_per_record\n" 8 | curl -d '{ 9 | "op_type":"OP_UPDATE_BYTE_SIZE", 10 | "table_info": { 11 | "table_name": "hotmap", 12 | "database": "TEST", 13 | "namespace_name": "TEST", 14 | "byte_size_per_record": 50 15 | } 16 | }' http://$1/MetaService/meta_manager 17 | echo -e "\n" 18 | 19 | -------------------------------------------------------------------------------- /src/tools/script/update_dists.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | #创建table 7 | echo -e "update replica dists\n" 8 | curl -d '{ 9 | "op_type":"OP_UPDATE_DISTS", 10 | "table_info": { 11 | "table_name": "'$2'", 12 | "database": "testdb", 13 | "namespace_name": "TEST", 14 | "replica_num": 3, 15 | "dists": [ 16 | { 17 | "logical_room": "bj", 18 | "count" : 0 19 | }, 20 | { 21 | "logical_room": "nj", 22 | "count" : 3 23 | }, 24 | { 25 | "logical_room": "gz", 26 | "count" : 0 27 | } 28 | ] 29 | } 30 | }' http://$1/MetaService/meta_manager 31 | echo -e "\n" 32 | 33 | -------------------------------------------------------------------------------- /src/tools/script/update_field.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2020-12-17 3 | #测试场景:完成的modify field流程 4 | 5 | echo -e "\n" 6 | #comment字段用protobuf的bytes类型表示,请求时需要进行base64编码 7 | #可以在线编码https://www.base64encode.org/,"noskip" base64编码后为: "bm9za2lw" 8 | echo -e "修改列信息\n" 9 | curl -d '{ 10 | "op_type":"OP_MODIFY_FIELD", 11 | "table_info": { 12 | "table_name": "t", 13 | "database": "TestDB", 14 | "namespace_name": "TEST", 15 | "fields": [ { 16 | "field_name" : "id", 17 | "comment" : "bm9za2lw" 18 | }] 19 | } 20 | }' http://$1/MetaService/meta_manager 21 | echo -e "\n" 22 | 23 | #查询table 24 | curl -d '{ 25 | "op_type" : "QUERY_SCHEMA" 26 | }' http://$1/MetaService/query 27 | echo -e "\n" 28 | 29 | -------------------------------------------------------------------------------- /src/tools/script/update_instance.sh: -------------------------------------------------------------------------------- 1 | #插入物理机房 2 | echo -e "update 实例, 有哪些字段就会更新哪些字段\n" 3 | curl -d '{ 4 | "op_type": "OP_UPDATE_INSTANCE", 5 | "instance": { 6 | "address" : "127.0.0.1:8011", 7 | "status": 1 8 | } 9 | }' http://$1/MetaService/meta_manager 10 | echo -e "\n" 11 | 12 | curl -d '{ 13 | "op_type": "QUERY_INSTANCE", 14 | "instance_address": "127.0.0.1:8011" 15 | }' http://$1/MetaService/query 16 | -------------------------------------------------------------------------------- /src/tools/script/update_instance_param.sh: -------------------------------------------------------------------------------- 1 | #插入物理机房 2 | echo -e "update instance param\n" 3 | curl -d '{ 4 | "op_type": "OP_UPDATE_INSTANCE_PARAM", 5 | "instance_params" : [{ 6 | "resource_tag_or_address" : "qa", 7 | "params" : [ 8 | { 9 | "key" : "use_token_bucket", 10 | "value" : "1" 11 | }, 12 | { 13 | "key" : "get_token_weight", 14 | "value" : "5" 15 | }, 16 | { 17 | "key" : "token_bucket_burst_window_ms", 18 | "value" : "10" 19 | }, 20 | { 21 | "key" : "token_num_acquired_each_time", 22 | "value" : "0" 23 | }, 24 | { 25 | "key" : "max_tokens_per_second", 26 | "value" : "100000" 27 | }, 28 | { 29 | "key" : "sql_extended_burst_percent", 30 | "value" : "80" 31 | } 32 | ] 33 | }] 34 | }' http://$1/MetaService/meta_manager 35 | echo -e "\n" 36 | 37 | curl -d '{ 38 | "op_type": "QUERY_INSTANCE_PARAM", 39 | "instance_address": "127.0.0.1:8011" 40 | }' http://$1/MetaService/query 41 | -------------------------------------------------------------------------------- /src/tools/script/update_merge_switch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | curl -d '{ 7 | "op_type":"OP_UPDATE_SCHEMA_CONF", 8 | "table_info": { 9 | "table_name": "region_merge_not_sep", 10 | "database": "TEST", 11 | "namespace_name": "TEST", 12 | "schema_conf":{ 13 | "need_merge": false 14 | } 15 | } 16 | }' http://$1/MetaService/meta_manager 17 | echo -e "\n" 18 | 19 | -------------------------------------------------------------------------------- /src/tools/script/update_resource_tag.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | #创建table 7 | echo -e "创建table\n" 8 | curl -d '{ 9 | "op_type":"OP_MODIFY_RESOURCE_TAG", 10 | "table_info": { 11 | "table_name": "'$2'", 12 | "database": "TEST", 13 | "namespace_name": "TEST", 14 | "resource_tag" : "qa" 15 | } 16 | }' http://$1/MetaService/meta_manager 17 | echo -e "\n" 18 | 19 | -------------------------------------------------------------------------------- /src/tools/script/update_schema_conf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | #bool need_merge 5 | #bool storage_compute_separate 6 | #bool select_index_by_cost 7 | #optional float filter_ratio 8 | 9 | echo -e "\n" 10 | curl -d '{ 11 | "op_type":"OP_UPDATE_SCHEMA_CONF", 12 | "table_info": { 13 | "table_name": "t_sort", 14 | "database": "TEST", 15 | "namespace_name": "TEST", 16 | "schema_conf":{ 17 | "select_index_by_cost": true 18 | } 19 | } 20 | }' http://$1/MetaService/meta_manager 21 | echo -e "\n" 22 | 23 | -------------------------------------------------------------------------------- /src/tools/script/update_separate_switch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | curl -d '{ 7 | "op_type":"OP_UPDATE_SCHEMA_CONF", 8 | "table_info": { 9 | "table_name": "region_merge_not_sep", 10 | "database": "TEST", 11 | "namespace_name": "TEST", 12 | "schema_conf":{ 13 | "storage_compute_separate": true 14 | } 15 | } 16 | }' http://$1/MetaService/meta_manager 17 | echo -e "\n" 18 | 19 | -------------------------------------------------------------------------------- /src/tools/script/update_split_lines.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #Created on 2017-11-22 3 | #测试场景:完成的meta_server流程 4 | 5 | echo -e "\n" 6 | #创建table 7 | curl -d '{ 8 | "op_type":"OP_UPDATE_SPLIT_LINES", 9 | "table_info": { 10 | "table_name": "ideainfo", 11 | "database": "TEST", 12 | "namespace_name": "TEST", 13 | "region_split_lines": 250000 14 | } 15 | }' http://$1/MetaService/meta_manager 16 | echo -e "\n" 17 | 18 | -------------------------------------------------------------------------------- /src/tools/script/update_ttl_duration.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 更新update_ttl_duration 3 | # 使用方法: sh update_ttl_duration.sh 10.1.1.1:8111 NAMESPACE DATABASE TABLE 1000000 4 | 5 | echo -e "\n" 6 | #创建table 7 | curl -d '{ 8 | "op_type":"OP_UPDATE_TTL_DURATION", 9 | "table_info": { 10 | "table_name": "'$4'", 11 | "database": "'$3'", 12 | "namespace_name": "'$2'", 13 | "ttl_duration": '$5' 14 | } 15 | }' http://$1/MetaService/meta_manager 16 | echo -e "\n" 17 | 18 | -------------------------------------------------------------------------------- /src/tools/script/watch_meta_query.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | echo -e "query namespace\n" 5 | curl -d '{ 6 | "op_type" : "QUERY_NAMESPACE", 7 | "namespace_name" : "CLUSTER_STATUS" 8 | }' http://$1/MetaService/query 9 | echo -e "\n" 10 | 11 | echo -e "query database\n" 12 | curl -d '{ 13 | "op_type" : "QUERY_DATABASE", 14 | "database" : "cluster_status", 15 | "namespace_name":"CLUSTER_STATUS" 16 | }' http://$1/MetaService/query 17 | echo -e "\n" 18 | 19 | echo -e "query user\n" 20 | curl -d '{ 21 | "op_type" : "QUERY_USERPRIVILEG", 22 | "user_name" : "test" 23 | }' http://$1/MetaService/query 24 | echo -e "\n" 25 | 26 | echo -e 'query table\n' 27 | curl -d '{ 28 | "op_type" : "QUERY_SCHEMA", 29 | "namespace_name" : "CLUSTER_STATUS" 30 | }' http://$1/MetaService/query 31 | echo -e "\n" 32 | 33 | echo -e 'query instance\n' 34 | curl -d '{ 35 | "op_type" : "QUERY_INSTANCE", 36 | "namespace_name" : "CLUSTER_STATUS" 37 | }' http://$1/MetaService/query 38 | echo -e "\n" 39 | -------------------------------------------------------------------------------- /sysbench/baikaldb_deploy_scripts/baikalMetaConf/gflags.conf: -------------------------------------------------------------------------------- 1 | -defer_close_second=300 2 | -db_path=./rocks_db 3 | -snapshot_interval_s=600 4 | -election_timeout_ms=1000 5 | -log_uri=myraftlog://my_raft_log?id= 6 | -meta_replica_number=1 7 | -meta_server_bns=metaIP:metaport 8 | -meta_raft_peers=metaIP:metaport 9 | -store_request_timeout=240000 10 | -store_connect_timeout=5000 11 | -bthread_concurrency=100 12 | -meta_port=metaport 13 | -------------------------------------------------------------------------------- /sysbench/baikaldb_deploy_scripts/baikalStoreConf/gflags.conf: -------------------------------------------------------------------------------- 1 | -db_path=db_path 2 | -defer_close_second=300 3 | -byte_size_per_record=1 4 | -capacity=100 *1024 * 1024 * 1024LL 5 | -snapshot_interval_s=600 6 | -election_timeout_ms=10000 7 | -raft_max_election_delay_ms=5000 8 | -raft_election_heartbeat_factor=3 9 | -log_uri=myraftlog://my_raft_log?id= 10 | -stable_uri=local://stable_uri 11 | -snapshot_uri=local://snapshot_uri 12 | -save_applied_index_path=save_applied_index_path/save_applied_index 13 | -quit_gracefully_file=quit_gracefully_file/gracefully_quit.txt 14 | -meta_server_bns=metaIP:metaport 15 | -wordrank_conf=./config/drpc_client.xml 16 | -bthread_concurrency=500 17 | -store_port=storeport 18 | -rocks_transaction_lock_timeout_ms=200000 19 | -transaction_clear_interval_ms=10000 20 | -transaction_clear_delay_ms=300000 21 | -------------------------------------------------------------------------------- /sysbench/baikaldb_deploy_scripts/baikaldbConf/gflags.conf: -------------------------------------------------------------------------------- 1 | -defer_close_second=3600 2 | -meta_server_bns=metaIP:metaport 3 | -bthread_concurrency=50 4 | -task_group_runqueue_capacity=16384 5 | -max_body_size=268435456 6 | -query_quota_per_user=20000 7 | -baikal_port=baikalport 8 | -connect_idle_timeout_s=1800 9 | -fetch_instance_id=true 10 | -wait_after_prepare_us=0 11 | -------------------------------------------------------------------------------- /sysbench/baikaldb_deploy_scripts/create_internal_table.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | curl -d '{ 3 | "op_type": "OP_CREATE_TABLE", 4 | "table_info": { 5 | "table_name": "__baikaldb_instance", 6 | "database": "baikaldb", 7 | "namespace_name": "INTERNAL", 8 | "fields": [ 9 | { 10 | "field_name" : "instance_id", 11 | "mysql_type" : "UINT64", 12 | "auto_increment" : true 13 | } 14 | ], 15 | "indexs": [ 16 | { 17 | "index_name" : "priamry_key", 18 | "index_type" : "I_PRIMARY", 19 | "field_names": ["instance_id"] 20 | } 21 | ] 22 | } 23 | }' http://$1/MetaService/meta_manager 24 | echo -e "\n" 25 | -------------------------------------------------------------------------------- /sysbench/deploy_baikaldb.md: -------------------------------------------------------------------------------- 1 | ## 安装baikalDB环境 2 | * 编译baikalDB+baikalMeta+baikalStore bin文件 3 | * 创建文件目录 4 | * bin 5 | * conf 6 | * log 7 | * 更新baikalDB+baikalMeta+baikalStore配置 8 | * baikalMeta conf更新 9 | * -meta_server_bns=metaIP:metaport(部署机器IP:空闲端口) 10 | * -db_path=./rocks_db(默认在部署目录同级目录,可根据需要改变目录) 11 | * -meta_raft_peers=metaIP:metaport(部署机器IP:空闲端口,部署一个实例时与meta_server_bns一致即可) 12 | * -meta_port=metaport(meta的端口) 13 | * 其余参数可根据需要更新 14 | * baikalStore conf更新 15 | * -db_path=db_path(指定一个db文件存放地址) 16 | * -stable_uri=local://stable_uri(指定stable_uri地址) 17 | * -snapshot_uri=local://snapshot_uri(指定snapshot_uri地址) 18 | * -save_applied_index_path=save_applied_index_path/save_applied_index(指定save_applied_index_path地址) 19 | * -quit_gracefully_file=quit_gracefully_file/gracefully_quit.txt(指定quit_gracefully_file地址) 20 | * -meta_server_bns=metaIP:metaport 21 | * -store_port=storeport 22 | * 其余参数可根据需要更新 23 | * baikaldb conf更新 24 | * -meta_server_bns=metaIP:metaport 25 | * -baikal_port=baikalport 26 | * 其余参数可根据需要更新 27 | 28 | ## 启动baikalDB环境 29 | * 启动baikalMeta 30 | * 启动三个baikalStore 31 | * 创建数据库、表 32 | * 执行sh init.sh创建database与机房过滤等,$1为metaIP:metaport 33 | * 执行sh create_internal_table.sh 创建内部表,$1为metaIP:metaport 34 | * 启动baikaldb 35 | 36 | ## 测试baikalDB环境 37 | * 找到一个安装mysql环境的机器,mysql -hbaikaldbIP -pbaikaldbport -uroot -proot进行测试连接 38 | * baikaldb的使用方式与mysql完全一致 39 | 40 | -------------------------------------------------------------------------------- /sysbench/lua/Makefile.am: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016 Alexey Kopytov 2 | # 3 | # This program is free software; you can redistribute it and/or modify 4 | # it under the terms of the GNU General Public License as published by 5 | # the Free Software Foundation; either version 2 of the License, or 6 | # (at your option) any later version. 7 | # 8 | # This program is distributed in the hope that it will be useful, 9 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | # GNU General Public License for more details. 12 | # 13 | # You should have received a copy of the GNU General Public License 14 | # along with this program; if not, write to the Free Software 15 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 16 | 17 | SUBDIRS = internal 18 | 19 | dist_pkgdata_SCRIPTS = bulk_insert.lua \ 20 | oltp_delete.lua \ 21 | oltp_insert.lua \ 22 | oltp_read_only.lua \ 23 | oltp_read_write.lua \ 24 | oltp_point_select.lua \ 25 | oltp_update_index.lua \ 26 | oltp_update_non_index.lua \ 27 | oltp_write_only.lua\ 28 | select_random_points.lua \ 29 | select_random_ranges.lua 30 | 31 | dist_pkgdata_DATA = oltp_common.lua 32 | -------------------------------------------------------------------------------- /sysbench/lua/delete.lua: -------------------------------------------------------------------------------- 1 | pathtest = string.match(test, "(.*/)") or "" 2 | 3 | dofile(pathtest .. "common.lua") 4 | 5 | function thread_init(thread_id) 6 | set_vars() 7 | end 8 | 9 | function event(thread_id) 10 | local table_name 11 | table_name = "sbtest".. sb_rand_uniform(1, oltp_tables_count) 12 | rs = db_query("DELETE FROM " .. table_name .. " WHERE id=" .. sb_rand(1, oltp_table_size)) 13 | end 14 | -------------------------------------------------------------------------------- /sysbench/lua/empty-test.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sysbench 2 | 3 | -- you can run this script like this: 4 | -- $ ./empty-test.lua --cpu-max-prime=20000 --threads=8 --histogram --report-interval=1 run 5 | 6 | sysbench.cmdline.options = { 7 | -- the default values for built-in options are currently ignored, see 8 | -- https://github.com/akopytov/sysbench/issues/151 9 | ["cpu-max-prime"] = {"CPU maximum prime", 10000}, 10 | ["threads"] = {"Number of threads", 1}, 11 | ["histogram"] = {"Show histogram", "off"}, 12 | ["report-interval"] = {"Report interval", 1} 13 | } 14 | 15 | function event() 16 | end 17 | 18 | function sysbench.hooks.report_cumulative(stat) 19 | local seconds = stat.time_interval 20 | print(string.format([[ 21 | { 22 | "errors": %4.0f, 23 | "events": %4.0f, 24 | "latency_avg": %4.10f, 25 | "latency_max": %4.10f, 26 | "latency_min": %4.10f, 27 | "latency_pct": %4.10f, 28 | "latency_sum": %4.10f, 29 | "other": %4.0f, 30 | "reads": %4.0f, 31 | "reconnects": %4.0f, 32 | "threads_running": %4.0f, 33 | "time_interval": %4.10f, 34 | "time_total": %4.10f, 35 | "writes": %4.0f 36 | } 37 | ]], 38 | stat.errors, 39 | stat.events, 40 | stat.latency_avg, 41 | stat.latency_max, 42 | stat.latency_min, 43 | stat.latency_pct, 44 | stat.latency_sum, 45 | stat.other, 46 | stat.reads, 47 | stat.reconnects, 48 | stat.threads_running, 49 | stat.time_interval, 50 | stat.time_total, 51 | stat.writes)) 52 | end 53 | -------------------------------------------------------------------------------- /sysbench/lua/internal/Makefile.am: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-2018 Alexey Kopytov 2 | # 3 | # This program is free software; you can redistribute it and/or modify 4 | # it under the terms of the GNU General Public License as published by 5 | # the Free Software Foundation; either version 2 of the License, or 6 | # (at your option) any later version. 7 | # 8 | # This program is distributed in the hope that it will be useful, 9 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | # GNU General Public License for more details. 12 | # 13 | # You should have received a copy of the GNU General Public License 14 | # along with this program; if not, write to the Free Software 15 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 16 | 17 | BUILT_SOURCES = sysbench.lua.h sysbench.rand.lua.h sysbench.sql.lua.h \ 18 | sysbench.cmdline.lua.h \ 19 | sysbench.histogram.lua.h 20 | 21 | CLEANFILES = $(BUILT_SOURCES) 22 | 23 | EXTRA_DIST = $(BUILT_SOURCES:.h=) 24 | 25 | SUFFIXES = .lua .lua.h 26 | 27 | .lua.lua.h: 28 | @echo "Creating $@ from $<" 29 | @var=$$(echo $< | sed 's/\./_/g') && \ 30 | ( echo "unsigned char $${var}[] =" && \ 31 | sed -e 's/\\/\\\\/g' \ 32 | -e 's/"/\\"/g' \ 33 | -e 's/^/ "/g' \ 34 | -e 's/$$/\\n"/g' $< && \ 35 | echo ";" && \ 36 | echo "size_t $${var}_len = sizeof($${var}) - 1;" ) > $@ 37 | -------------------------------------------------------------------------------- /sysbench/lua/oltp_delete.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sysbench 2 | -- Copyright (C) 2006-2017 Alexey Kopytov 3 | 4 | -- This program is free software; you can redistribute it and/or modify 5 | -- it under the terms of the GNU General Public License as published by 6 | -- the Free Software Foundation; either version 2 of the License, or 7 | -- (at your option) any later version. 8 | 9 | -- This program is distributed in the hope that it will be useful, 10 | -- but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | -- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | -- GNU General Public License for more details. 13 | 14 | -- You should have received a copy of the GNU General Public License 15 | -- along with this program; if not, write to the Free Software 16 | -- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 17 | 18 | -- ---------------------------------------------------------------------- 19 | -- Delete-Only OLTP benchmark 20 | -- ---------------------------------------------------------------------- 21 | 22 | require("oltp_common") 23 | 24 | function prepare_statements() 25 | prepare_for_each_table("deletes") 26 | end 27 | 28 | function event() 29 | local tnum = sysbench.rand.uniform(1, sysbench.opt.tables) 30 | local id = sysbench.rand.default(1, sysbench.opt.table_size) 31 | 32 | param[tnum].deletes[1]:set(id) 33 | stmt[tnum].deletes:execute() 34 | end 35 | -------------------------------------------------------------------------------- /sysbench/lua/oltp_point_select.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sysbench 2 | -- Copyright (C) 2006-2017 Alexey Kopytov 3 | 4 | -- This program is free software; you can redistribute it and/or modify 5 | -- it under the terms of the GNU General Public License as published by 6 | -- the Free Software Foundation; either version 2 of the License, or 7 | -- (at your option) any later version. 8 | 9 | -- This program is distributed in the hope that it will be useful, 10 | -- but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | -- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | -- GNU General Public License for more details. 13 | 14 | -- You should have received a copy of the GNU General Public License 15 | -- along with this program; if not, write to the Free Software 16 | -- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 17 | 18 | -- ---------------------------------------------------------------------- 19 | -- OLTP Point Select benchmark 20 | -- ---------------------------------------------------------------------- 21 | 22 | require("oltp_common") 23 | 24 | function prepare_statements() 25 | -- use 1 query per event, rather than sysbench.opt.point_selects which 26 | -- defaults to 10 in other OLTP scripts 27 | sysbench.opt.point_selects=1 28 | 29 | prepare_point_selects() 30 | end 31 | 32 | function event() 33 | execute_point_selects() 34 | end 35 | -------------------------------------------------------------------------------- /sysbench/lua/oltp_update_index.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sysbench 2 | -- Copyright (C) 2006-2017 Alexey Kopytov 3 | 4 | -- This program is free software; you can redistribute it and/or modify 5 | -- it under the terms of the GNU General Public License as published by 6 | -- the Free Software Foundation; either version 2 of the License, or 7 | -- (at your option) any later version. 8 | 9 | -- This program is distributed in the hope that it will be useful, 10 | -- but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | -- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | -- GNU General Public License for more details. 13 | 14 | -- You should have received a copy of the GNU General Public License 15 | -- along with this program; if not, write to the Free Software 16 | -- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 17 | 18 | -- ---------------------------------------------------------------------- 19 | -- Update-Index OLTP benchmark 20 | -- ---------------------------------------------------------------------- 21 | 22 | require("oltp_common") 23 | 24 | function prepare_statements() 25 | prepare_index_updates() 26 | end 27 | 28 | function event() 29 | execute_index_updates(con) 30 | end 31 | -------------------------------------------------------------------------------- /sysbench/lua/oltp_update_non_index.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sysbench 2 | -- Copyright (C) 2006-2017 Alexey Kopytov 3 | 4 | -- This program is free software; you can redistribute it and/or modify 5 | -- it under the terms of the GNU General Public License as published by 6 | -- the Free Software Foundation; either version 2 of the License, or 7 | -- (at your option) any later version. 8 | 9 | -- This program is distributed in the hope that it will be useful, 10 | -- but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | -- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | -- GNU General Public License for more details. 13 | 14 | -- You should have received a copy of the GNU General Public License 15 | -- along with this program; if not, write to the Free Software 16 | -- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 17 | 18 | -- ---------------------------------------------------------------------- 19 | -- Update-Non-Index OLTP benchmark 20 | -- ---------------------------------------------------------------------- 21 | 22 | require("oltp_common") 23 | 24 | function prepare_statements() 25 | prepare_non_index_updates() 26 | end 27 | 28 | function event() 29 | execute_non_index_updates() 30 | end 31 | -------------------------------------------------------------------------------- /sysbench/lua/oltp_write_only.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sysbench 2 | -- Copyright (C) 2006-2017 Alexey Kopytov 3 | 4 | -- This program is free software; you can redistribute it and/or modify 5 | -- it under the terms of the GNU General Public License as published by 6 | -- the Free Software Foundation; either version 2 of the License, or 7 | -- (at your option) any later version. 8 | 9 | -- This program is distributed in the hope that it will be useful, 10 | -- but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | -- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | -- GNU General Public License for more details. 13 | 14 | -- You should have received a copy of the GNU General Public License 15 | -- along with this program; if not, write to the Free Software 16 | -- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 17 | 18 | -- ---------------------------------------------------------------------- 19 | -- Write-Only OLTP benchmark 20 | -- ---------------------------------------------------------------------- 21 | 22 | require("oltp_common") 23 | 24 | function prepare_statements() 25 | if not sysbench.opt.skip_trx then 26 | prepare_begin() 27 | prepare_commit() 28 | end 29 | 30 | prepare_index_updates() 31 | prepare_non_index_updates() 32 | prepare_delete_inserts() 33 | end 34 | 35 | function event() 36 | if not sysbench.opt.skip_trx then 37 | begin() 38 | end 39 | 40 | execute_index_updates() 41 | execute_non_index_updates() 42 | execute_delete_inserts() 43 | 44 | if not sysbench.opt.skip_trx then 45 | commit() 46 | end 47 | end 48 | -------------------------------------------------------------------------------- /sysbench/scripts/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #set -x 4 | workpath=`pwd` 5 | cd $workpath 6 | source ./config.conf 7 | 8 | #baikaldb or mysql 9 | 10 | if [[ $1 = "baikaldb" ]]; 11 | then 12 | common_file="lua/oltp_common.lua" 13 | storage_engine="rocksdb" 14 | else 15 | common_file="lua/oltp_common.lua" 16 | storage_engine="innodb" 17 | fi 18 | 19 | cd .. 20 | echo `pwd` 21 | 22 | # cleanup 23 | ./sysbench --auto-inc=off --create_secondary=true --db-ps-mode=auto \ 24 | --mysql-host=$host --mysql-port=$port --mysql-user=$user --mysql-password=$passwd \ 25 | --mysql-storage-engine=$storage_engine --table_size=$table_size --tables=$tables $common_file cleanup 26 | -------------------------------------------------------------------------------- /sysbench/scripts/config.conf: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | workpath=`pwd` 3 | 4 | #baikaldb conf 5 | host='127.0.0.1' 6 | port=8888 7 | user='root' 8 | passwd='****' 9 | 10 | 11 | #common conf 12 | tables=32 13 | table_size=1000000 14 | events=0 15 | time=600 16 | threads=1024 17 | requests=1000000 18 | interval=10 19 | 20 | -------------------------------------------------------------------------------- /sysbench/scripts/delete.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #set -x 4 | workpath=`pwd` 5 | cd $workpath 6 | source ./config.conf 7 | 8 | #baikaldb or mysql 9 | 10 | if [[ $1 = "baikaldb" ]]; 11 | then 12 | common_file="oltp_delete.lua" 13 | storage_engine="rocksdb" 14 | else 15 | common_file="oltp_delete.lua" 16 | storage_engine="innodb" 17 | fi 18 | 19 | cd ../lua 20 | echo `pwd` 21 | # run read-only 22 | 23 | ../sysbench --auto-inc=off --create_secondary=true --db-ps-mode=auto \ 24 | --mysql-host=$host --mysql-port=$port --mysql-user=$user --mysql-password=$passwd \ 25 | --mysql-storage-engine=$storage_engine --table_size=$table_size --tables=$tables \ 26 | --report-interval=$interval --events=$events --time=$time --threads=$threads --rand-type=uniform $common_file run 27 | -------------------------------------------------------------------------------- /sysbench/scripts/insert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #set -x 4 | workpath=`pwd` 5 | cd $workpath 6 | source ./config.conf 7 | 8 | #baikaldb or mysql 9 | 10 | if [[ $1 = "baikaldb" ]]; 11 | then 12 | common_file="oltp_insert.lua" 13 | storage_engine="rocksdb" 14 | else 15 | common_file="oltp_insert.lua" 16 | storage_engine="innodb" 17 | fi 18 | 19 | cd ../lua 20 | echo `pwd` 21 | # run read-only 22 | 23 | ../sysbench --auto-inc=off --create_secondary=true --db-ps-mode=auto \ 24 | --mysql-host=$host --mysql-port=$port --mysql-user=$user --mysql-password=$passwd \ 25 | --mysql-storage-engine=$storage_engine --table_size=$table_size --tables=$tables \ 26 | --percentile=95 --skip_trx=on --mysql-ignore-errors=1022 \ 27 | --report-interval=$interval --events=$events --time=$time --threads=$threads --rand-type=uniform $common_file run 28 | -------------------------------------------------------------------------------- /sysbench/scripts/insert_noprepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #set -x 4 | workpath=`pwd` 5 | cd $workpath 6 | source ./config.conf 7 | 8 | #baikaldb or mysql 9 | 10 | if [[ $1 = "baikaldb" ]]; 11 | then 12 | common_file="oltp_insert.lua" 13 | storage_engine="rocksdb" 14 | else 15 | common_file="oltp_insert.lua" 16 | storage_engine="innodb" 17 | fi 18 | 19 | cd ../lua 20 | echo `pwd` 21 | # run read-only 22 | 23 | ../sysbench --auto-inc=off --create_secondary=true --db-ps-mode=disable \ 24 | --mysql-host=$host --mysql-port=$port --mysql-user=$user --mysql-password=$passwd \ 25 | --mysql-storage-engine=$storage_engine --table_size=$table_size --tables=$tables \ 26 | --percentile=95 --skip_trx=on --mysql-ignore-errors=1022 \ 27 | --report-interval=$interval --events=$events --time=$time --threads=$threads --rand-type=uniform $common_file run 28 | -------------------------------------------------------------------------------- /sysbench/scripts/prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #set -x 4 | workpath=`pwd` 5 | cd $workpath 6 | source ./config.conf 7 | 8 | #baikaldb or mysql 9 | 10 | if [[ $1 = "baikaldb" ]]; 11 | then 12 | common_file="lua/oltp_common.lua" 13 | storage_engine="rocksdb" 14 | else 15 | common_file="lua/oltp_common.lua" 16 | storage_engine="innodb" 17 | fi 18 | 19 | cd .. 20 | echo `pwd` 21 | # prepare 22 | 23 | ./sysbench --auto-inc=off --create_secondary=false --db-ps-mode=auto \ 24 | --mysql-host=$host --mysql-port=$port --mysql-user=$user --mysql-password=$passwd \ 25 | --mysql-storage-engine=$storage_engine --table_size=$table_size --tables=$tables $common_file prepare 26 | -------------------------------------------------------------------------------- /sysbench/scripts/read-only.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #set -x 4 | workpath=`pwd` 5 | cd $workpath 6 | source ./config.conf 7 | 8 | #baikaldb or mysql 9 | 10 | if [[ $1 = "baikaldb" ]]; 11 | then 12 | common_file="oltp_point_select.lua" 13 | storage_engine="rocksdb" 14 | else 15 | common_file="oltp_point_select.lua" 16 | storage_engine="innodb" 17 | fi 18 | 19 | cd ../lua 20 | echo `pwd` 21 | # run read-only 22 | 23 | ../sysbench --auto-inc=off --skip_trx=on --create_secondary=true --db-ps-mode=auto \ 24 | --mysql-host=$host --mysql-port=$port --mysql-user=$user --mysql-password=$passwd \ 25 | --mysql-storage-engine=$storage_engine --table_size=$table_size --tables=$tables \ 26 | --report-interval=$interval --events=$events --time=$time --threads=$threads --rand-type=uniform $common_file run 27 | -------------------------------------------------------------------------------- /sysbench/scripts/read-only_noprepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #set -x 4 | workpath=`pwd` 5 | cd $workpath 6 | source ./config.conf 7 | 8 | #baikaldb or mysql 9 | 10 | if [[ $1 = "baikaldb" ]]; 11 | then 12 | common_file="oltp_point_select.lua" 13 | storage_engine="rocksdb" 14 | else 15 | common_file="oltp_point_select.lua" 16 | storage_engine="innodb" 17 | fi 18 | 19 | cd ../lua 20 | echo `pwd` 21 | # run read-only 22 | 23 | ../sysbench --auto-inc=off --skip_trx=on --create_secondary=true --db-ps-mode=disable \ 24 | --mysql-host=$host --mysql-port=$port --mysql-user=$user --mysql-password=$passwd \ 25 | --mysql-storage-engine=$storage_engine --table_size=$table_size --tables=$tables \ 26 | --report-interval=$interval --events=$events --time=$time --threads=$threads --rand-type=uniform $common_file run 27 | -------------------------------------------------------------------------------- /sysbench/scripts/read-write.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #set -x 4 | workpath=`pwd` 5 | cd $workpath 6 | source ./config.conf 7 | 8 | #baikaldb or mysql 9 | 10 | if [[ $1 = "baikaldb" ]]; 11 | then 12 | common_file="oltp_read_write.lua" 13 | storage_engine="rocksdb" 14 | else 15 | common_file="oltp_read_write.lua" 16 | storage_engine="innodb" 17 | fi 18 | 19 | cd ../lua 20 | echo `pwd` 21 | # run read-only 22 | 23 | ../sysbench --auto-inc=off --create_secondary=true --db-ps-mode=auto \ 24 | --mysql-host=$host --mysql-port=$port --mysql-user=$user --mysql-password=$passwd \ 25 | --mysql-storage-engine=$storage_engine --table_size=$table_size --tables=$tables \ 26 | --report-interval=$interval --events=$events --skip_trx=on --mysql-ignore-errors=1022 \ 27 | --time=$time --threads=$threads --rand-type=uniform $common_file run 28 | -------------------------------------------------------------------------------- /sysbench/scripts/read-write_noprepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #set -x 4 | workpath=`pwd` 5 | cd $workpath 6 | source ./config.conf 7 | 8 | #baikaldb or mysql 9 | 10 | if [[ $1 = "baikaldb" ]]; 11 | then 12 | common_file="oltp_read_write.lua" 13 | storage_engine="rocksdb" 14 | else 15 | common_file="oltp_read_write.lua" 16 | storage_engine="innodb" 17 | fi 18 | 19 | cd ../lua 20 | echo `pwd` 21 | # run read-only 22 | 23 | ../sysbench --auto-inc=off --create_secondary=true --db-ps-mode=disable \ 24 | --mysql-host=$host --mysql-port=$port --mysql-user=$user --mysql-password=$passwd \ 25 | --mysql-storage-engine=$storage_engine --table_size=$table_size --tables=$tables \ 26 | --report-interval=$interval --events=$events --skip_trx=on --mysql-ignore-errors=1022 \ 27 | --time=$time --threads=$threads --rand-type=uniform $common_file run 28 | -------------------------------------------------------------------------------- /sysbench/scripts/select.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #set -x 4 | workpath=`pwd` 5 | cd $workpath 6 | source ./config.conf 7 | 8 | #baikaldb or mysql 9 | 10 | if [[ $1 = "baikaldb" ]]; 11 | then 12 | common_file="select_random_ranges.lua" 13 | storage_engine="rocksdb" 14 | else 15 | common_file="select_random_ranges.lua" 16 | storage_engine="innodb" 17 | fi 18 | 19 | cd ../lua 20 | echo `pwd` 21 | # run read-only 22 | 23 | ../sysbench --auto-inc=off --create_secondary=true --db-ps-mode=auto \ 24 | --mysql-host=$host --mysql-port=$port --mysql-user=$user --mysql-password=$passwd \ 25 | --mysql-storage-engine=$storage_engine --table_size=$table_size --tables=$tables \ 26 | --report-interval=$interval --percentile=95 \ 27 | --events=$events --time=$time --threads=$threads --rand-type=uniform $common_file run 28 | -------------------------------------------------------------------------------- /test/conf/data_gbk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baidu/BaikalDB/c04e446e4404ab142c50b042a91567062b4398c8/test/conf/data_gbk -------------------------------------------------------------------------------- /test/conf/data_utf8: -------------------------------------------------------------------------------- 1 | select '{\"商务通\"}'; | SELECT '{"商务通"}' 2 | select '{\"商务\"}'; | SELECT '{"商务"}' 3 | -------------------------------------------------------------------------------- /test/conf/punctuation.dic: -------------------------------------------------------------------------------- 1 | ` 2 | ^ 3 | ~ 4 | < 5 | = 6 | > 7 | | 8 | _ 9 | - 10 | , 11 | ; 12 | : 13 | ! 14 | ? 15 | / 16 | . 17 | ' 18 | " 19 | ( 20 | ) 21 | [ 22 | ] 23 | { 24 | } 25 | @ 26 | $ 27 | * 28 | \ 29 | & 30 | # 31 | % 32 | + 33 | -------------------------------------------------------------------------------- /test/conf/q2b_gbk.dic: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baidu/BaikalDB/c04e446e4404ab142c50b042a91567062b4398c8/test/conf/q2b_gbk.dic -------------------------------------------------------------------------------- /test/conf/q2b_utf8.dic: -------------------------------------------------------------------------------- 1 |   2 | 、 , 3 | 。 . 4 | — - 5 | ~ ~ 6 | ‖ | 7 | … . 8 | ‘ ' 9 | ’ ' 10 | “ " 11 | ” " 12 | 〔 ( 13 | 〕 ) 14 | 〈 < 15 | 〉 > 16 | 「 ' 17 | 」 ' 18 | 『 " 19 | 』 " 20 | 〖 [ 21 | 〗 ] 22 | 【 [ 23 | 】 ] 24 | ∶ : 25 | $ $ 26 | ! ! 27 | " " 28 | # # 29 | % % 30 | & & 31 | ' ' 32 | ( ( 33 | ) ) 34 | * * 35 | + + 36 | , , 37 | - - 38 | . . 39 | / / 40 | 0 0 41 | 1 1 42 | 2 2 43 | 3 3 44 | 4 4 45 | 5 5 46 | 6 6 47 | 7 7 48 | 8 8 49 | 9 9 50 | : : 51 | ; ; 52 | < < 53 | = = 54 | > > 55 | ? ? 56 | @ @ 57 | A a 58 | B b 59 | C c 60 | D d 61 | E e 62 | F f 63 | G g 64 | H h 65 | I i 66 | J j 67 | K k 68 | L l 69 | M m 70 | N n 71 | O o 72 | P p 73 | Q q 74 | R r 75 | S s 76 | T t 77 | U u 78 | V v 79 | W w 80 | X x 81 | Y y 82 | Z z 83 | [ [ 84 | \ \ 85 | ] ] 86 | ^ ^ 87 | _ _ 88 | ` ` 89 | a a 90 | b b 91 | c c 92 | d d 93 | e e 94 | f f 95 | g g 96 | h h 97 | i i 98 | j j 99 | k k 100 | l l 101 | m m 102 | n n 103 | o o 104 | p p 105 | q q 106 | r r 107 | s s 108 | t t 109 | u u 110 | v v 111 | w w 112 | x x 113 | y y 114 | z z 115 | { { 116 | | | 117 | } } 118 |  ̄ ~ 119 | 〝 " 120 | 〞 " 121 | ﹐ , 122 | ﹑ , 123 | ﹒ . 124 | ﹔ ; 125 | ﹕ : 126 | ﹖ ? 127 | ﹗ ! 128 | ﹙ ( 129 | ﹚ ) 130 | ﹛ { 131 | ﹜ { 132 | ﹝ [ 133 | ﹞ ] 134 | ﹟ # 135 | ﹠ & 136 | ﹡ * 137 | ﹢ + 138 | ﹣ - 139 | ﹤ < 140 | ﹥ > 141 | ﹦ = 142 | ﹨ \ 143 | ﹩ $ 144 | ﹪ % 145 | ﹫ @ 146 | -------------------------------------------------------------------------------- /test/fun/exec.conf: -------------------------------------------------------------------------------- 1 | # 定义测试执行顺序,不包括创建表 2 | # 执行case 是否验证结果 3 | create_table.sql 0 4 | fun_abnormal_branch.sql 2 5 | # FC_WORD 6 | origin_plan_unit_unitsetting_idea_insert.sql 2 7 | fun_select_plan_unit.sql 1 8 | # CRUD 9 | fun_insert.sql 1 10 | fun_insert_select.sql 1 11 | fun_select.sql 1 12 | fun_update.sql 1 13 | fun_delete.sql 1 14 | # Origin 15 | fun_o_createtable.sql 0 16 | fun_o_datain.sql 1 17 | fun_o_query.sql 1 18 | fun_o_write.sql 1 19 | # INNER 20 | fun_funx.sql 1 21 | fun_transaction.sql 1 22 | fun_ddl.sql 2 23 | # python 24 | global_index_consistent_check_cstore.py 2 25 | -------------------------------------------------------------------------------- /test/fun/exec_cstore.conf: -------------------------------------------------------------------------------- 1 | # 定义测试执行顺序,不包括创建表 2 | # 执行case 是否验证结果 3 | create_table_cstore.sql 0 4 | fun_abnormal_branch.sql 2 5 | # FC_WORD 6 | origin_plan_unit_unitsetting_idea_insert.sql 2 7 | fun_select_plan_unit.sql 1 8 | # CRUD 9 | fun_insert.sql 1 10 | fun_insert_select.sql 1 11 | fun_select.sql 1 12 | fun_update.sql 1 13 | fun_delete.sql 1 14 | # Origin 15 | fun_o_createtable.sql 0 16 | fun_o_datain.sql 1 17 | fun_o_query.sql 1 18 | fun_o_write.sql 1 19 | # INNER 20 | fun_funx.sql 1 21 | fun_transaction.sql 1 22 | fun_ddl.sql 2 23 | # python 24 | global_index_consistent_check_cstore.py 2 25 | -------------------------------------------------------------------------------- /test/fun/fun_abnormal_branch.sql: -------------------------------------------------------------------------------- 1 | # unix_timestamp 2 | select unix_timestamp(1); 3 | select unix_timestamp(); 4 | select unix_timestamp(null); 5 | # 随机数 6 | SELECT RAND(); 7 | select RAND(100); 8 | # 现在时间 9 | select now(); 10 | 11 | # sql_parser error 12 | select * Frmo Baikaltest.planinfo; 13 | select select * Frmo Baikaltest.planinfo; 14 | selectt * from Baikaltest.planinfo; 15 | insert ioto Baikaltest.t_student2 values(,,,,); 16 | replace ioto Baikaltest.t_student2 values(,,,,); 17 | delete Form Baikaltest.planinfo; 18 | # 不支持正则,现在报语法错误 19 | SELECT name1 FROM Baikaltest.t_student2 WHERE name1 REGEXP '^[aeiou]|ok$'; 20 | select 'beijing' REGEXP 'jing'; 21 | select 'beijing' REGEXP 'xi'; 22 | select 2<=>3; 23 | select null<=>null; 24 | 25 | # null 26 | select name1 from Baikaltest.t_student2 group by NULL; 27 | select name1,count(*) from Baikaltest.t_student2 group by NULL; 28 | 29 | -------------------------------------------------------------------------------- /test/fun/fun_ddl.sql: -------------------------------------------------------------------------------- 1 | # 添加列 2 | alter table Baikaltest.DDLtable add column status int(4) default 0 comment "状态:0-上线;1-下线"; 3 | sleep 10 4 | desc Baikaltest.DDLtable; 5 | 6 | # 修改列名 7 | alter table Baikaltest.DDLtable RENAME COLUMN status TO status2; 8 | sleep 10 9 | desc Baikaltest.DDLtable; 10 | 11 | # 删除列 12 | alter table Baikaltest.DDLtable drop column status2; 13 | sleep 10 14 | desc Baikaltest.DDLtable; 15 | 16 | 17 | # 添加索引 18 | alter table Baikaltest.DDLtable add unique index index_name1(name1); 19 | sleep 10 20 | desc Baikaltest.DDLtable; 21 | 22 | # 删除索引 23 | alter table BBaikaltest.DDLtable2 drop index class1_key ; 24 | sleep 10 25 | desc Baikaltest.DDLtable2; 26 | 27 | 28 | # 修改表名 29 | alter table Baikaltest.DDLtable rename to Baikaltest.DDLtable22; 30 | sleep 10 31 | use Baikaltest; 32 | show tables; 33 | # 表名再改回来 34 | alter table Baikaltest.DDLtable22 rename to Baikaltest.DDLtable; 35 | sleep 10 36 | use Baikaltest; 37 | show tables; 38 | 39 | # drop table 40 | drop table Baikaltest.DDLtable; 41 | sleep 10 42 | use Baikaltest; 43 | show tables; 44 | 45 | # restore table 46 | restore table Baikaltest.DDLtable; 47 | sleep 10 48 | use Baikaltest; 49 | show tables; 50 | 51 | alter table Baikaltest.DDLtable add unique index global global_index_name2(name2); 52 | alter table Baikaltest.DDLtable drop index global_index_name2 ; 53 | alter table Baikaltest.DDLtable add index global global_index_class1(class1); 54 | alter table Baikaltest.DDLtable drop index global_index_class1 ; 55 | sleep 10 56 | desc Baikaltest.DDLtable; 57 | use Baikaltest; 58 | show tables; 59 | 60 | # 权限异常分支验证 61 | -------------------------------------------------------------------------------- /test/fun/fun_o_write.sql: -------------------------------------------------------------------------------- 1 | update Baikaltest.test set uint32_val=123 where date_val='1990-11-29' and int64_val>=3914247183946417551; 2 | -------------------------------------------------------------------------------- /test/fun/fun_sub_select.sql: -------------------------------------------------------------------------------- 1 | ### 子查询 功能测试 ### 2 | # 初始化数据 3 | delete from `Baikaltest`.`subselect` ; 4 | insert into `Baikaltest`.`subselect`(id,name1,name2,age1,age2,class1,class2,address1,address2,height1,height2) values(1,'zhangsan1','zhangsan11',10,11,100,101,'zhangsanaddress1','zhangsanaddress11',1000,1001); 5 | insert into `Baikaltest`.`subselect`(id,name1,name2,age1,age2,class1,class2,address1,address2,height1,height2) values(2,'lisi2','lisi22',20,21,200,201,'lisiaddress2','lisiaddress22',2000,2001); 6 | insert into `Baikaltest`.`subselect`(id,name1,name2,age1,age2,class1,class2,address1,address2,height1,height2) values(3,'wangwu3','wangwu33',30,31,300,301,'wangwuaddress3','wangwuaddress33',3000,3001); 7 | insert into `Baikaltest`.`subselect`(id,name1,name2,age1,age2,class1,class2,address1,address2,height1,height2) values(4,'zhaoliu4','zhaoliu44',40,41,400,401,'zhaoliuaddress4','zhaoliuaddress44',4000,4001); 8 | select * from `Baikaltest`.`subselect`; 9 | 10 | 11 | # join 子查询语句 12 | SELECT t1.id, t2.name1 FROM `Baikaltest`.`subselect` as t1 join `Baikaltest`.`subselect` as t2 on t1.id=t2.id; 13 | SELECT t1.id, t2.name1 FROM `Baikaltest`.`subselect` as t1 join `Baikaltest`.`subselect` as t2 on t1.id=t2.id where t2.name1 = 'zhangsan1'; 14 | SELECT t1.id, t2.name1 FROM `Baikaltest`.`subselect` as t1 left join `Baikaltest`.`subselect` as t2 on t1.id=t2.id where t2.name1 = 'zhangsan1'; 15 | SELECT t1.id, t2.name1 FROM `Baikaltest`.`subselect` as t1 right join `Baikaltest`.`subselect` as t2 on t1.id=t2.id where t2.name1 = 'zhangsan1'; -------------------------------------------------------------------------------- /test/fun/fun_transaction.sql: -------------------------------------------------------------------------------- 1 | insert into Baikaltest.student(id,name1,name2,age1,age2,class1,class2,address,height) values(1, 'baikal1','baidu1',1, 1, 1, 1,'baidu1',1); 2 | insert into Baikaltest.student(id,name1,name2,age1,age2,class1,class2,address,height) values(2, 'baikal2','baidu2',2, 2, 2, 2,'baidu2',2); 3 | select * from Baikaltest.student; 4 | begin;insert into Baikaltest.student(id,name1,name2,age1,age2,class1,class2,address,height) values(3, 'baikal3','baidu3',3, 3, 3, 3,'baidu3',3);update student set id=4,name1='a',name2='b',class1=10,class2=11 where class1=2;update student set id=4,name1='a',name2='b',class1=10,class2=11 where class1=10;commit; 5 | select * from Baikaltest.student; 6 | begin;commit; 7 | begin;begin;commit; 8 | begin;rollback; 9 | begin;rollback;rollback; 10 | rollback;rollback; 11 | begin;commit;commit; 12 | commit;commit; 13 | set autocommit=0;insert into Baikaltest.student(id,name1,name2,age1,age2,class1,class2,address,height) values(5, 'baikal5','baidu5',5, 5, 5, 5,'baidu5',5);update Baikaltest.student set id=6,name1='a',name2='b',class1=10,class2=1 where class1=10;update Baikaltest.student set name1='a',name2='b',class1=3,class2=1 where class1=10;commit; 14 | select * from Baikaltest.student; 15 | begin;update Baikaltest.student set name1='a',name2='b',class1=3,class2=1 where class1=10;commit; 16 | select * from Baikaltest.student; 17 | set autocommit=1;update Baikaltest.student set name1='a',name2='b',class1=3,class2=1 where class1=10; 18 | select * from Baikaltest.student; 19 | select * from Baikaltest.student where class1=10; 20 | -------------------------------------------------------------------------------- /test/fun/long_transaction.sql: -------------------------------------------------------------------------------- 1 | delete from Baikaltest.student333; 2 | begin;insert into Baikaltest.student333(id,name1,name2,age1,age2,class1,class2,address,height) values(1, 'baikal','baidu',1, 2, 1, 1,'baidu',1); 3 | insert into Baikaltest.student333(id,name1,name2,age1,age2,class1,class2,address,height) values(1, 'baikaltest','baidutest',100, 200, 100, 100,'baidutest',100); 4 | -------------------------------------------------------------------------------- /test/test_binlog_storage.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include "meta_server_interact.hpp" 12 | 13 | namespace baikaldb { 14 | DECLARE_string(meta_server_bns); 15 | 16 | int64_t get_tso() { 17 | pb::TsoRequest request; 18 | pb::TsoResponse response; 19 | request.set_op_type(pb::OP_GEN_TSO); 20 | request.set_count(1); 21 | MetaServerInteract msi; 22 | msi.init_internal(FLAGS_meta_server_bns); 23 | //发送请求,收到响应 24 | if (msi.send_request("tso_service", request, response) == 0) { 25 | //处理响应 26 | if (response.errcode() != pb::SUCCESS) { 27 | std::cout << "faield line:" << __LINE__ << std::endl; 28 | return -1; 29 | } 30 | 31 | } else { 32 | std::cout << "faield line:" << __LINE__ << std::endl; 33 | return -1; 34 | } 35 | 36 | int64_t tso_physical = response.start_timestamp().physical(); 37 | int64_t tso_logical = response.start_timestamp().logical(); 38 | std::cout << "physical: " << tso_physical << " logical:" << tso_logical << std::endl; 39 | return tso_physical << 18 + tso_logical; 40 | } 41 | 42 | } //namespace baikaldb 43 | 44 | int main(int argc, char **argv) { 45 | google::ParseCommandLineFlags(&argc, &argv, true); 46 | int64_t tso = baikaldb::get_tso(); 47 | std::cout << "tso:" << tso << std::endl; 48 | 49 | return 0; 50 | } -------------------------------------------------------------------------------- /test/test_convert_charset_gbk.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baidu/BaikalDB/c04e446e4404ab142c50b042a91567062b4398c8/test/test_convert_charset_gbk.cpp -------------------------------------------------------------------------------- /test/test_predicate.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baidu/BaikalDB/c04e446e4404ab142c50b042a91567062b4398c8/test/test_predicate.cpp -------------------------------------------------------------------------------- /test/test_reverse_common.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baidu/BaikalDB/c04e446e4404ab142c50b042a91567062b4398c8/test/test_reverse_common.cpp -------------------------------------------------------------------------------- /third-party/com_github_RoaringBitmap_CRoaring/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | cc_library( 4 | name = "croaring", 5 | srcs = [ 6 | "src/array_util.c", 7 | "src/bitset_util.c", 8 | "src/containers/array.c", 9 | "src/containers/bitset.c", 10 | "src/containers/containers.c", 11 | "src/containers/convert.c", 12 | "src/containers/mixed_intersection.c", 13 | "src/containers/mixed_union.c", 14 | "src/containers/mixed_equal.c", 15 | "src/containers/mixed_subset.c", 16 | "src/containers/mixed_negation.c", 17 | "src/containers/mixed_xor.c", 18 | "src/containers/mixed_andnot.c", 19 | "src/containers/run.c", 20 | "src/roaring.c", 21 | "src/roaring_priority_queue.c", 22 | "src/roaring_array.c", 23 | ], 24 | hdrs = glob([ 25 | "include/roaring/**/*.h", 26 | "include/roaring/*.h", 27 | "cpp/*.hh", 28 | ]), 29 | includes = [ 30 | ".", 31 | "include", 32 | "cpp", 33 | ], 34 | defines = [ 35 | "OS_LINUX", 36 | ], 37 | copts = [ 38 | "-fno-omit-frame-pointer", 39 | "-momit-leaf-frame-pointer", 40 | "-msse", 41 | "-msse4.2", 42 | "-Werror", 43 | "-mpclmul", 44 | "-O2", 45 | "-std=c99", 46 | ], 47 | visibility = ["//visibility:public"], 48 | ) 49 | 50 | -------------------------------------------------------------------------------- /third-party/com_github_facebookresearch_faiss/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | cc_library( 4 | name = "faiss", 5 | srcs = glob([ 6 | "faiss/*.cpp", 7 | "faiss/impl/*.cpp", 8 | "faiss/impl/**/*.cpp", 9 | "faiss/utils/*.cpp", 10 | "faiss/utils/**/*.cpp", 11 | "faiss/invlists/*.cpp", 12 | ]), 13 | hdrs = glob([ 14 | "faiss/*.h", 15 | "faiss/impl/*.h", 16 | "faiss/impl/**/*.h", 17 | "faiss/utils/*.h", 18 | "faiss/utils/**/*.h", 19 | "faiss/invlists/*.h", 20 | ]), 21 | includes = [ 22 | ".", 23 | "include", 24 | "faiss", 25 | ], 26 | defines = [ 27 | "OS_LINUX", 28 | "FINTEGER=int" 29 | ], 30 | copts = [ 31 | "-Wno-unused-parameter", 32 | "-Wno-sign-compare", 33 | "-Wno-unused-variable", 34 | "-Wno-unused-function", 35 | "-Wno-type-limits", 36 | "-Wno-implicit-fallthrough", 37 | "-Wno-maybe-uninitialized", 38 | "-pipe", 39 | "-fPIC", 40 | "-fopenmp", 41 | "-mavx2", 42 | "-mfma", 43 | "-mf16c", 44 | "-mpopcnt", 45 | "-O2", 46 | ], 47 | linkopts = [ 48 | "-fopenmp", 49 | "-ldl", 50 | "-lpthread", 51 | ], 52 | deps = [ 53 | "//external:openblas", 54 | ], 55 | visibility = ["//visibility:public"], 56 | ) 57 | 58 | -------------------------------------------------------------------------------- /third-party/com_github_xianyi_OpenBLAS/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | genrule( 3 | name = "build", 4 | srcs = glob(["**/*"]), 5 | outs = ["include/cblas.h"] + 6 | ["lib/libopenblas.a"], 7 | cmd = "\n".join([ 8 | "blas=\"external/com_github_xianyi_OpenBLAS\"", 9 | "outs=($(OUTS))", 10 | "cp -r $${blas} build ", 11 | "(cd build " + 12 | "&& make --silent -j4 && make install PREFIX=.)", 13 | "cp build/include/*.h $$(dirname $(location include/cblas.h))", 14 | "cp build/lib/libopenblas.a $(location lib/libopenblas.a)", 15 | ]), 16 | ) 17 | 18 | cc_library( 19 | name = "openblas", 20 | srcs = ["lib/libopenblas.a"], 21 | hdrs = glob(["include/*.h"]), 22 | linkopts = ["-pthread"], 23 | linkstatic = True, 24 | visibility = ["//visibility:public"], 25 | ) 26 | 27 | -------------------------------------------------------------------------------- /third-party/gperftools.BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) # Apache v2 2 | # @see: https://github.com/bazelbuild/bazel/issues/818 3 | # @see: https://github.com/envoyproxy/envoy/issues/1069 4 | genrule( 5 | name = "build", 6 | srcs = glob(["**/*"]), 7 | outs = [ 8 | "src/gperftools/tcmalloc.h", 9 | ".libs/libtcmalloc_and_profiler.a", 10 | ], 11 | cmd = "\n".join([ 12 | "gperf=\"external/com_github_gperftools_gperftools\"", 13 | "outs=($(OUTS))", 14 | "cp -r $${gperf} build", 15 | "(cd build" + 16 | " && ./autogen.sh >/dev/null 2>&1" + 17 | " && ./configure -q --enable-frame-pointers --disable-libunwind" + 18 | " && make --silent libtcmalloc_and_profiler.la)", 19 | "cp build/src/gperftools/tcmalloc.h $(location src/gperftools/tcmalloc.h)", 20 | "cp build/.libs/libtcmalloc_and_profiler.a $(location .libs/libtcmalloc_and_profiler.a)", 21 | ]), 22 | ) 23 | 24 | cc_library( 25 | name = "tcmalloc_and_profiler", 26 | srcs = [".libs/libtcmalloc_and_profiler.a"], 27 | hdrs = glob(["src/gperftools/*.h"]) + 28 | ["src/gperftools/tcmalloc.h"], 29 | strip_include_prefix = "src", 30 | linkopts = ["-pthread",], 31 | linkstatic = True, 32 | visibility = ["//visibility:public"], 33 | ) 34 | 35 | -------------------------------------------------------------------------------- /third-party/gtest.BUILD: -------------------------------------------------------------------------------- 1 | cc_library( 2 | name = "main", 3 | srcs = glob( 4 | ["src/*.cc"], 5 | exclude = ["src/gtest-all.cc"] 6 | ), 7 | hdrs = glob([ 8 | "include/**/*.h", 9 | "src/*.h" 10 | ]), 11 | copts = ["-Iexternal/gtest/include"], 12 | linkopts = ["-pthread"], 13 | visibility = ["//visibility:public"], 14 | ) 15 | -------------------------------------------------------------------------------- /third-party/lz4.BUILD: -------------------------------------------------------------------------------- 1 | cc_library( 2 | name = "lz4", 3 | # We include the lz4.c as a header as lz4hc.c actually does #include "lz4.c". 4 | hdrs = glob(["lib/*.h"]) + ["lib/lz4.c"], 5 | srcs = glob(["lib/*.c"]), 6 | visibility = ["//visibility:public"], 7 | strip_include_prefix = "lib", 8 | ) 9 | -------------------------------------------------------------------------------- /third-party/rapidjson.BUILD: -------------------------------------------------------------------------------- 1 | cc_library( 2 | name = "rapidjson", 3 | hdrs = glob(["include/rapidjson/**/*.h"]), 4 | includes = ["include"], 5 | visibility = ["//visibility:public"], 6 | ) -------------------------------------------------------------------------------- /third-party/snappy.BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | # Modified from https://github.com/mzhaom/trunk/blob/master/third_party/snappy/BUILD 4 | # https://github.com/smyte/smyte-db/blob/master/third_party/snappy.BUILD 5 | 6 | genrule( 7 | name = "snappy_stubs_public_h", 8 | srcs = [ 9 | "snappy-stubs-public.h.in", 10 | ], 11 | outs = [ 12 | "snappy-stubs-public.h", 13 | ], 14 | cmd = "sed 's/@ac_cv_have_stdint_h@/1/g' $(<) | " + 15 | "sed 's/@ac_cv_have_stddef_h@/1/g' | " + 16 | "sed 's/@ac_cv_have_sys_uio_h@/1/g' | " + 17 | "sed 's/@SNAPPY_MAJOR@/1/g' | " + 18 | "sed 's/@SNAPPY_MINOR@/1/g' | " + 19 | "sed 's/@SNAPPY_PATCHLEVEL@/3/g' >$(@)", 20 | ) 21 | 22 | cc_library( 23 | name = "snappy", 24 | srcs = [ 25 | "snappy-c.cc", 26 | "snappy-internal.h", 27 | "snappy-sinksource.cc", 28 | "snappy-stubs-internal.cc", 29 | "snappy-stubs-internal.h", 30 | "snappy.cc", 31 | ], 32 | hdrs = [ 33 | ":snappy_stubs_public_h", 34 | "snappy.h", 35 | "snappy-c.h", 36 | "snappy-sinksource.h", 37 | ], 38 | includes = [ 39 | "." 40 | ], 41 | copts = [ 42 | "-DHAVE_CONFIG_H", 43 | "-Wno-sign-compare", 44 | ], 45 | deps = [ 46 | "//external:snappy_config", 47 | ], 48 | visibility = ["//visibility:public"], 49 | ) -------------------------------------------------------------------------------- /third-party/snappy_config/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | # Modified from https://github.com/smyte/smyte-db/blob/master/third_party/snappy/BUILD 3 | 4 | cc_library( 5 | name = "config", 6 | hdrs = [ 7 | "config.h", 8 | ], 9 | includes = [ 10 | ".", 11 | ], 12 | visibility = ["//visibility:public"], 13 | ) -------------------------------------------------------------------------------- /third-party/zlib.BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) # BSD/MIT-like license (for zlib) 2 | 3 | # Modified from https://github.com/tensorflow/tensorflow/blob/master/zlib.BUILD 4 | # https://github.com/smyte/smyte-db/edit/master/third_party/zlib.BUILD 5 | 6 | cc_library( 7 | name = "zlib", 8 | srcs = [ 9 | "adler32.c", 10 | "compress.c", 11 | "crc32.c", 12 | "crc32.h", 13 | "deflate.c", 14 | "deflate.h", 15 | "gzclose.c", 16 | "gzguts.h", 17 | "gzlib.c", 18 | "gzread.c", 19 | "gzwrite.c", 20 | "infback.c", 21 | "inffast.c", 22 | "inffast.h", 23 | "inffixed.h", 24 | "inflate.c", 25 | "inflate.h", 26 | "inftrees.c", 27 | "inftrees.h", 28 | "trees.c", 29 | "trees.h", 30 | "uncompr.c", 31 | "zconf.h", 32 | "zutil.c", 33 | "zutil.h", 34 | ], 35 | hdrs = [ 36 | "zlib.h", 37 | ], 38 | includes = [ 39 | ".", 40 | ], 41 | copts = [ 42 | "-D_LARGEFILE64_SOURCE=1", 43 | ], 44 | visibility = ["//visibility:public"], 45 | ) -------------------------------------------------------------------------------- /watt_proto/base_subscribe.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto2"; 2 | 3 | option cc_generic_services = true; 4 | 5 | package fengmai; 6 | 7 | message BaseSubscribeSubtask { 8 | optional int64 region_id = 1; 9 | optional bytes start_key = 2; 10 | optional bytes end_key = 3; 11 | }; 12 | 13 | message BaseSubscribeSubtasks { 14 | repeated BaseSubscribeSubtask subtasks = 1; 15 | }; 16 | 17 | enum BaseErrCode { 18 | TASK_SUCC = 0; 19 | TASK_FAIL = 1; 20 | TASK_EXIST = 2; 21 | }; 22 | 23 | message BaseTaskResponse { 24 | required BaseErrCode errcode = 1; 25 | optional string errmsg = 2; 26 | optional string subtasks_pb = 3; 27 | }; 28 | 29 | message BaseTaskRequest { 30 | required string product = 1; 31 | required string stream = 2; 32 | required string profile = 3; 33 | required int64 partition_id = 4; 34 | required string database = 5; 35 | required string table = 6; 36 | required string version = 7; 37 | required string subtasks_pb = 8; 38 | }; 39 | 40 | service BaseTaskService { 41 | rpc commit_base_task (BaseTaskRequest) returns (BaseTaskResponse); 42 | }; --------------------------------------------------------------------------------