├── .gitattributes ├── .gitignore ├── CMakeLists.txt ├── Commons_Clause_License ├── LICENSE ├── NOTICE ├── README.md ├── doc └── Interface.md ├── resource ├── .DS_Store ├── ._.DS_Store ├── cluster_mgr.cnf └── script │ ├── .DS_Store │ ├── ._.DS_Store │ ├── restart_cluster_mgr.sh │ ├── start_cluster_mgr.sh │ └── stop_cluster_mgr.sh ├── src ├── .DS_Store ├── ._.DS_Store ├── ._main.cc ├── .gitignore ├── backup_storage │ ├── CMakeLists.txt │ ├── backup_storage.cc │ └── backup_storage.h ├── cluster_auto │ ├── CMakeLists.txt │ ├── refresh_shard.cc │ └── refresh_shard.h ├── cluster_collection │ ├── .DS_Store │ ├── CMakeLists.txt │ ├── machine_alive.cc │ ├── machine_alive.h │ ├── prometheus_control.cc │ └── prometheus_control.h ├── cluster_debug │ ├── CMakeLists.txt │ ├── cluster_debug.cc │ └── cluster_debug.h ├── cluster_expand │ ├── CMakeLists.txt │ ├── expand_mission.cc │ ├── expand_mission.h │ ├── table_pick.cc │ └── table_pick.h ├── cluster_mission │ ├── .DS_Store │ ├── CMakeLists.txt │ ├── add_computer.cc │ ├── add_computer.h │ ├── add_node.cc │ ├── add_node.h │ ├── add_shard.cc │ ├── add_shard.h │ ├── cluster_comm.cc │ ├── cluster_comm.h │ ├── cluster_mission.cc │ ├── cluster_mission.h │ ├── create_cluster.cc │ ├── create_cluster.h │ ├── delete_cluster.cc │ ├── delete_cluster.h │ ├── delete_computer.cc │ ├── delete_computer.h │ ├── delete_node.cc │ ├── delete_node.h │ ├── delete_shard.cc │ ├── delete_shard.h │ ├── machine_mission.cc │ └── machine_mission.h ├── cluster_operator │ ├── CMakeLists.txt │ ├── computeInstallMission.cc │ ├── computeInstallMission.h │ ├── mysqlInstallRemote.cc │ ├── mysqlInstallRemote.h │ ├── postgresInstallRemote.cc │ ├── postgresInstallRemote.h │ ├── shardInstallMission.cc │ └── shardInstallMission.h ├── coldbackup │ ├── CMakeLists.txt │ ├── coldbackup.cc │ └── coldbackup.h ├── example_mission │ ├── CMakeLists.txt │ ├── example_mission.cc │ └── example_mission.h ├── http_server │ ├── .DS_Store │ ├── CMakeLists.txt │ ├── http_server.cc │ ├── http_server.h │ ├── node_channel.cc │ ├── node_channel.h │ └── proto │ │ ├── clustermng.pb.cc │ │ ├── clustermng.pb.h │ │ └── clustermng.proto ├── kl_mentain │ ├── .DS_Store │ ├── CMakeLists.txt │ ├── async_mysql.cc │ ├── async_mysql.h │ ├── config.cc │ ├── config.h │ ├── func_timer.cc │ ├── func_timer.h │ ├── global.h │ ├── kl_cluster.cc │ ├── kl_cluster.h │ ├── log.cc │ ├── log.h │ ├── machine_info.cc │ ├── machine_info.h │ ├── os.cc │ ├── os.h │ ├── query_list.h │ ├── shard.cc │ ├── shard.h │ ├── sys.cc │ ├── sys.h │ ├── thread_manager.cc │ ├── thread_manager.h │ ├── thread_pool.h │ ├── txn.cc │ └── txn.h ├── machine_mission │ ├── CMakeLists.txt │ ├── machine_mission.cc │ └── machine_mission.h ├── main.cc ├── other_mission │ ├── CMakeLists.txt │ ├── other_mission.cc │ └── other_mission.h ├── raft_ha │ ├── .DS_Store │ ├── CMakeLists.txt │ ├── generate_id.cc │ ├── generate_id.h │ ├── proto │ │ ├── raft_msg.pb.cc │ │ ├── raft_msg.pb.h │ │ └── raft_msg.proto │ ├── raft_ha.cc │ ├── raft_ha.h │ ├── raft_mission.cc │ └── raft_mission.h ├── rebuild_node │ ├── CMakeLists.txt │ ├── rebuild_node.cc │ └── rebuild_node.h ├── request_framework │ ├── .DS_Store │ ├── CMakeLists.txt │ ├── handleRequestThread.cc │ ├── handleRequestThread.h │ ├── missionRequest.cc │ ├── missionRequest.h │ ├── remoteTask.cc │ ├── remoteTask.h │ ├── requestBase.cc │ ├── requestBase.h │ ├── requestValueDefine.h │ ├── syncBrpc.cc │ └── syncBrpc.h ├── restore_cluster │ ├── CMakeLists.txt │ ├── restoreClusterMission.cc │ ├── restoreClusterMission.h │ ├── restoreComputeTask.cc │ ├── restoreComputeTask.h │ ├── restoreShardTask.cc │ └── restoreShardTask.h ├── sync_mission │ ├── .DS_Store │ ├── CMakeLists.txt │ ├── sync_mission.cc │ └── sync_mission.h ├── sys_config.h.in └── util_func │ ├── .DS_Store │ ├── CMakeLists.txt │ ├── error_code.cc │ ├── error_code.h │ ├── kl_mutex.h │ ├── meta_info.cc │ ├── meta_info.h │ ├── object_ptr.h │ ├── test_object_func.cc │ └── test_oject_ptr.cc └── test ├── hellentest ├── kunlun_test1.1.2 │ ├── include │ │ ├── check_by_get_state.inc │ │ ├── check_cn_write_read.inc │ │ ├── check_create_cluster_state.inc │ │ ├── check_delete_cluster_state.inc │ │ ├── check_delete_cluster_state.inc.bak │ │ ├── check_rbr_shard_state.inc │ │ ├── check_shard_state.inc │ │ ├── get_backup_status.inc │ │ ├── get_cluster_name.inc │ │ ├── get_restore_status.inc │ │ └── wait_http_request_finish.inc │ ├── r │ │ ├── add_comps.result │ │ ├── add_comps_existcluster.result │ │ ├── add_nodes.result │ │ ├── add_nodes_existcluster.result │ │ ├── add_shards.result │ │ ├── add_shards_existcluster.result │ │ ├── cluster_backup_restore.result │ │ ├── cluster_backup_restore_trans.result │ │ ├── creat_machine.result │ │ ├── del_comps_existcluster.result │ │ ├── del_nodes_existcluster.result │ │ ├── del_rbrcluster.result │ │ ├── del_shard_existcluster.result │ │ ├── expand_cluster.result │ │ ├── expand_cluster_trans.result │ │ ├── expand_cluster_trans_abnormal1.result │ │ ├── expand_cluster_trans_abnormal10.result │ │ ├── expand_cluster_trans_abnormal11.result │ │ ├── expand_cluster_trans_abnormal12.result │ │ ├── expand_cluster_trans_abnormal13.result │ │ ├── expand_cluster_trans_abnormal14.result │ │ ├── expand_cluster_trans_abnormal2.result │ │ ├── expand_cluster_trans_abnormal3.result │ │ ├── expand_cluster_trans_abnormal4.result │ │ ├── expand_cluster_trans_abnormal5.result │ │ ├── expand_cluster_trans_abnormal6.result │ │ ├── expand_cluster_trans_abnormal8.result │ │ ├── expand_cluster_trans_abnormal9.result │ │ ├── expand_cluster_trans_diffdb.result │ │ ├── expand_cluster_trans_samedb.result │ │ ├── get_status_rbr.result │ │ ├── install_delete_rbrcluster.result │ │ ├── install_rbrcluster.result │ │ ├── remote_control_killmysql.result │ │ └── remote_control_stopmysql.result │ └── t │ │ ├── add_comps.test │ │ ├── add_comps_existcluster.test │ │ ├── add_nodes.test │ │ ├── add_nodes_existcluster.test │ │ ├── add_shards.test │ │ ├── add_shards_existcluster.test │ │ ├── cluster_backup_restore.test │ │ ├── cluster_backup_restore_add_shard_trans3.test │ │ ├── cluster_backup_restore_add_shard_trans31.test │ │ ├── cluster_backup_restore_trans.test │ │ ├── cluster_table_repartition.test │ │ ├── creat_machine.test │ │ ├── del_comps_existcluster.test │ │ ├── del_debugpoint1.test │ │ ├── del_debugpoint2.test │ │ ├── del_debugpoint3.test │ │ ├── del_debugpoint4.test │ │ ├── del_debugpoint5.test │ │ ├── del_debugpoint6.test │ │ ├── del_nodes_existcluster.test │ │ ├── del_rbrcluster.test │ │ ├── del_shard_existcluster.test │ │ ├── expand_cluster.test │ │ ├── expand_cluster_trans.test │ │ ├── expand_cluster_trans_abnormal1.test │ │ ├── expand_cluster_trans_abnormal10.test │ │ ├── expand_cluster_trans_abnormal11.test │ │ ├── expand_cluster_trans_abnormal12.test │ │ ├── expand_cluster_trans_abnormal13.test │ │ ├── expand_cluster_trans_abnormal14.test │ │ ├── expand_cluster_trans_abnormal2.test │ │ ├── expand_cluster_trans_abnormal3.test │ │ ├── expand_cluster_trans_abnormal4.test │ │ ├── expand_cluster_trans_abnormal5.test │ │ ├── expand_cluster_trans_abnormal6.test │ │ ├── expand_cluster_trans_abnormal8.test │ │ ├── expand_cluster_trans_abnormal9.test │ │ ├── expand_cluster_trans_diffdb.test │ │ ├── expand_cluster_trans_samedb.test │ │ ├── get_status_rbr.test │ │ ├── install_delete_rbrcluster.test │ │ ├── install_rbrcluster.test │ │ ├── install_rbrcluster_proxysql.test │ │ ├── install_rbrcluster_share.test │ │ ├── install_rbrclustercpu.test │ │ ├── logical_backup.test │ │ ├── logicalbackup_restore_add_expand_seq.test │ │ ├── logicalbackup_restore_trans.test │ │ ├── manual_switch.test │ │ ├── masterslave_readonly.test │ │ ├── masterslave_stopslave.test │ │ ├── mirror_table.test │ │ ├── mirror_table_seq.test │ │ ├── remote_control_killcluster.test │ │ ├── remote_control_killmysql.test │ │ ├── remote_control_killpg.test │ │ ├── remote_control_stopmysql.test │ │ ├── set_noswitch.test │ │ ├── stop_slave_iothreadall.test │ │ ├── table_repartition.test │ │ └── update_instance_cgroup.test ├── kunlun_test1.2.1 │ ├── include │ │ ├── check_by_get_state.inc │ │ ├── check_cn_write_read.inc │ │ ├── check_create_cluster_state.inc │ │ ├── check_delete_cluster_state.inc │ │ ├── check_delete_cluster_state.inc.bak │ │ ├── check_rbr_shard_state.inc │ │ ├── check_shard_state.inc │ │ ├── get_backup_status.inc │ │ ├── get_cluster_name.inc │ │ ├── get_restore_status.inc │ │ └── wait_http_request_finish.inc │ └── t │ │ ├── add_comps_existcluster.test │ │ ├── add_nodes_existcluster.test │ │ ├── add_shards_existcluster.test │ │ ├── cluster_backup_restore_trans.test │ │ ├── cluster_backup_restore_trans_encrypt.test │ │ ├── cluster_table_repartition.test │ │ ├── create_rcr.test │ │ ├── del_comps_existcluster.test │ │ ├── del_nodes_existcluster.test │ │ ├── del_rbrcluster.test │ │ ├── del_rcr.test │ │ ├── del_shard_existcluster.test │ │ ├── expand_cluster_trans.test │ │ ├── install_rbrcluster.test │ │ ├── install_rbrcluster_degrade.test │ │ ├── logicalbackup_restore_trans.test │ │ ├── manualsw_rcr.test │ │ ├── manualsw_rcr2.test │ │ ├── mirror_table.test │ │ ├── modify_rcr1.test │ │ ├── modify_rcr11.test │ │ ├── modify_rcr2.test │ │ ├── modify_rcr22.test │ │ ├── modify_rcr3.test │ │ ├── modify_rcr33.test │ │ ├── rebuild_node.test │ │ ├── remote_control_killcluster.test │ │ ├── remote_control_killmysql.test │ │ ├── remote_control_killpg.test │ │ ├── remote_control_stopmysql.test │ │ └── smokeTest.py └── testcase_reference_tidb │ └── testcase │ ├── constraint_test5.test │ ├── constraint_test6.test │ ├── constraint_test7.test │ ├── datatype_test12.test │ ├── dual_function13.test │ ├── index_test8.test │ ├── index_test9.test │ ├── mode_management_test1.test │ ├── mvcc_test10.test │ ├── partition_tabele14.test │ ├── pl_sql15.test │ ├── sql92_test11.test │ ├── table_structure16.test │ ├── table_structure17.test │ ├── tableobject_management_test2.test │ ├── usergrant_management_test4.test │ └── viewobject_management_test3.test ├── include └── wait_http_request_finish.inc ├── r └── action_1st.result ├── suite ├── cluster_manager │ ├── include │ │ ├── README │ │ ├── add_computer_assign_computer.inc │ │ ├── add_computer_without_assign.inc │ │ ├── add_node_assign_storage.inc │ │ ├── add_node_without_assign.inc │ │ ├── add_shard_assign_storage.inc │ │ ├── add_shard_without_assign.inc │ │ ├── check_computer_state.inc │ │ ├── check_create_cluster_state.inc │ │ ├── check_delete_cluster_state.inc │ │ ├── check_delete_computer_state.inc │ │ ├── check_delete_node_state.inc │ │ ├── check_delete_shard_state.inc │ │ ├── check_rbr_shard_state.inc │ │ ├── create_cluster_assign_computer.inc │ │ ├── create_cluster_assign_storage.inc │ │ ├── create_cluster_assign_storage_computer.inc │ │ ├── create_cluster_without_assign.inc │ │ ├── delete_cluster_input_params.inc │ │ ├── delete_computer_input_params.inc │ │ ├── delete_node_input_params.inc │ │ ├── delete_shard_input_params.inc │ │ ├── get_http_request_result.inc │ │ ├── pre_install_cluster.inc │ │ └── rebuild_node_input_params.inc │ ├── r │ │ └── README │ └── t │ │ ├── README │ │ ├── create_cluster_assign_storage_computer.test │ │ ├── create_cluster_without_assign.test │ │ ├── delete_cluster_by_cluster_id.test │ │ ├── loop_create_and_delete_comps.test │ │ └── loop_create_and_delete_shards.test └── consfailover │ ├── README │ └── loop_multi_process_transfer.py └── t └── action_1st.test /.gitattributes: -------------------------------------------------------------------------------- 1 | libbrpc.a filter=lfs diff=lfs merge=lfs -text 2 | libprotoc.a filter=lfs diff=lfs merge=lfs -text 3 | libprotobuf.a filter=lfs diff=lfs merge=lfs -text 4 | *.a filter=lfs diff=lfs merge=lfs -text 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | bld 2 | bld/* 3 | bld* 4 | build 5 | build/* 6 | build* 7 | *.so 8 | .clang_complete 9 | install/* 10 | *.o 11 | *.a 12 | .vscode/* 13 | CsdbBuild.sh 14 | GPATH 15 | GRTAGS 16 | GTAGS 17 | -------------------------------------------------------------------------------- /Commons_Clause_License: -------------------------------------------------------------------------------- 1 | “Commons Clause” License Condition v1.0 2 | 3 | The Software is provided to you by the Licensor under the License, as defined below, subject to the following condition. 4 | 5 | Without limiting other conditions in the License, the grant of rights under the License will not include, and the License does not grant to you, the right to Sell the Software. 6 | 7 | For purposes of the foregoing, “Sell” means practicing any or all of the rights granted to you under the License to provide to third parties, for a fee or other consideration (including without limitation fees for hosting or consulting/ support services related to the Software), a product or service whose value derives, entirely or substantially, from the functionality of the Software. Any license notice or attribution required by the License must also include this Commons Clause License Condition notice. 8 | 9 | Software: kunlun distributed DBMS cluster manager (cluster_mgr) 10 | 11 | License: Apache 2.0 12 | 13 | Licensor: zettadb.com 泽拓科技(深圳)有限责任公司 ZettaDB inc. 14 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | This source code is licensed under both the Apache 2.0 License as detailed in the 2 | LICENSE file, and combined with Common Clause Condition 1.0 as detailed in the 3 | Commons_Clause_License file, by ZettaDB inc, and/or equivalently the 4 | 泽拓科技(深圳)有限责任公司 incorporated in People's Republic of China. 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | The cluster manager process maintains correct working status for storage shards and meta data shard of one or more Kunlun DDCs. 4 | You can build this software from source or download it from www.zettadb.com. 5 | 6 | # Building from source 7 | 8 | Use cmake to build. Typically one should create a build directory in source root directory, call it 'bld', then in bld, do 9 | cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_DEBUG=0 -DCMAKE_INSTALL_PREFIX= && make -j8 10 | then do below, possibly as root, depending on where one is installing this software: 11 | make install 12 | 13 | # Usage 14 | 15 | Give cluster_mgr a metadata cluster's connection parameters and a list of cluster IDs for it to work on, plus a config file with properly set parameters. Edit a copy of the 'cluster_mgr.cnf' file and set parameters properly according to comments in it. and then startup cluster_manager program with correct parameters. 16 | 17 | Start only one instance/process for each metadata shard, otherwise errors could occur in extreme conditions. 18 | For this software to work correctly you must keep its assumptions below true: 19 | 0. The meta-data shard node provided in config file (meta_svr_ip and meta_svr_port) is really a current effective node of the metadata shard and it contains latest meta data nodes. 20 | 1. A shard's mysqld processes are all up and running, it's the local Linux system's service manager or cron's responsibility to keep them up and running. 21 | 2. Metadata shard always has almost latest meta/storage shard topology info or will be updated very soon. Inconsistency is allowed but the time window should be small otherwise leftover prepared transactions may not be discovered and ended. So if you added/removed a node to/from a shard you must update the metadata in metadata shard ASAP. 22 | 23 | -------------------------------------------------------------------------------- /doc/Interface.md: -------------------------------------------------------------------------------- 1 | ### 1 发起扩容任务 2 | #### 请求参数 3 | ``` 4 | { 5 | "version": "1.0", 6 | "job_id":"", 7 | "job_type": "expand_cluster", 8 | "timestamp" : "1435749309", 9 | "user_name": "kunlun_test", 10 | "paras": { 11 | "cluster_id": "1", 12 | "dst_shard_id": "5", 13 | "src_shard_id": "2", 14 | "table_list": [ 15 | "sbtest.public.sbtest1", 16 | "sbtest.public.sbtest2" 17 | ] 18 | } 19 | } 20 | ``` 21 | #### 应答参数 22 | ``` 23 | { 24 | "version": "1.0", 25 | "error_code":"", 26 | "error_info":"", 27 | "extra_info":"", 28 | "timestamp" : "1435749309" 29 | "status":"", 30 | "job_id":"", 31 | "attachment":{} 32 | } 33 | ``` 34 | ### 2 查询扩容任务状态 35 | #### 请求参数 36 | ``` 37 | { 38 | "version": "1.0", 39 | "job_id": "1", 40 | "job_type":"", 41 | "timestamp" : "1435749309", 42 | "paras": {} 43 | } 44 | ``` 45 | #### 应答参数 46 | ``` 47 | { 48 | "version": "1.0", 49 | "error_code":"", 50 | "error_info":"", 51 | "extra_info":"", 52 | "timestamp" : "1435749309" 53 | "status":"", 54 | "job_id":"", 55 | "attachment":{} 56 | } 57 | ``` 58 | -------------------------------------------------------------------------------- /resource/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zettadb/cluster_mgr/74b6554d8cee720736f251e29fc3c7a7bc12fc36/resource/.DS_Store -------------------------------------------------------------------------------- /resource/._.DS_Store: -------------------------------------------------------------------------------- 1 | Mac OS X  2Fx @ATTRxx -------------------------------------------------------------------------------- /resource/script/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zettadb/cluster_mgr/74b6554d8cee720736f251e29fc3c7a7bc12fc36/resource/script/.DS_Store -------------------------------------------------------------------------------- /resource/script/._.DS_Store: -------------------------------------------------------------------------------- 1 | Mac OS X  2Fx @ATTRxx -------------------------------------------------------------------------------- /resource/script/restart_cluster_mgr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ./stop_cluster_mgr.sh 3 | ./start_cluster_mgr.sh 4 | -------------------------------------------------------------------------------- /resource/script/start_cluster_mgr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cnfpath=`realpath ../conf/cluster_mgr.cnf` 4 | 5 | num=`ps -ef | grep ${cnfpath} | grep -v grep | grep -v vim | wc -l` 6 | [ $num -gt 0 ] && { 7 | echo `pwd`"/cluster_mgr is running, so quit to start again" 8 | exit 0 9 | } 10 | 11 | ./cluster_mgr ${cnfpath} >../log/std.log 2>&1 12 | 13 | ps -ef | grep ${cnfpath} | grep -v grep 14 | -------------------------------------------------------------------------------- /resource/script/stop_cluster_mgr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #killall -9 cluster_mgr; 4 | BINPATH=`pwd` 5 | SBINPATH=`eval echo "${BINPATH}"` 6 | FPATH=${SBINPATH%/*} 7 | 8 | ps -ef | grep cluster_mgr | grep ${FPATH} | grep -v grep | grep -v vim | awk '{print $2}' | xargs kill -9 9 | -------------------------------------------------------------------------------- /src/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zettadb/cluster_mgr/74b6554d8cee720736f251e29fc3c7a7bc12fc36/src/.DS_Store -------------------------------------------------------------------------------- /src/._.DS_Store: -------------------------------------------------------------------------------- 1 | Mac OS X  2Fx @ATTRxx -------------------------------------------------------------------------------- /src/._main.cc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zettadb/cluster_mgr/74b6554d8cee720736f251e29fc3c7a7bc12fc36/src/._main.cc -------------------------------------------------------------------------------- /src/.gitignore: -------------------------------------------------------------------------------- 1 | bld 2 | bld/* 3 | bld* 4 | .clang_complete 5 | -------------------------------------------------------------------------------- /src/backup_storage/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(backup_storage OBJECT backup_storage.cc) 2 | target_include_directories(backup_storage INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 3 | target_include_directories(backup_storage PUBLIC "${PROJECT_SOURCE_DIR}/src") 4 | target_include_directories(backup_storage PUBLIC "${VENDOR_OUTPUT_PATH}/include") 5 | target_include_directories(backup_storage PUBLIC "${PROJECT_BINARY_DIR}") 6 | target_include_directories(backup_storage PUBLIC "${ZETTALIB_INCLUDE_PATH}") 7 | -------------------------------------------------------------------------------- /src/backup_storage/backup_storage.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2021 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | #ifndef _BACKUP_STORAGE_H_ 8 | #define _BACKUP_STORAGE_H_ 9 | #include "http_server/node_channel.h" 10 | #include "request_framework/missionRequest.h" 11 | 12 | namespace kunlun { 13 | 14 | class BackupStorage : public ::MissionRequest { 15 | typedef MissionRequest super; 16 | 17 | private: 18 | ClusterRequestTypes request_type_; 19 | std::string job_id; 20 | std::string job_status; 21 | std::string job_error_code; 22 | std::string job_error_info; 23 | 24 | public: 25 | explicit BackupStorage(Json::Value *doc) : super(doc){}; 26 | ~BackupStorage(){}; 27 | 28 | void CreateBackupStorage(); 29 | void UpdateBackupStorage(); 30 | void DeleteBackupStorage(); 31 | bool update_operation_record(); 32 | virtual bool ArrangeRemoteTask() override final; 33 | virtual bool SetUpMisson() override { return true; } 34 | virtual bool TearDownMission() override { return true; } 35 | virtual bool FillRequestBodyStImpl() override { return true; } 36 | virtual void ReportStatus() override {} 37 | }; 38 | } // namespace kunlun 39 | #endif /*_BACKUP_STORAGE_H_*/ 40 | -------------------------------------------------------------------------------- /src/cluster_auto/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(cluster_auto OBJECT 2 | refresh_shard.cc ) 3 | target_include_directories(cluster_auto INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 4 | target_include_directories(cluster_auto PUBLIC "${PROJECT_SOURCE_DIR}/src") 5 | target_include_directories(cluster_auto PUBLIC "${VENDOR_OUTPUT_PATH}/include") 6 | target_include_directories(cluster_auto PUBLIC "${PROJECT_BINARY_DIR}") 7 | target_include_directories(cluster_auto PUBLIC "${ZETTALIB_INCLUDE_PATH}") -------------------------------------------------------------------------------- /src/cluster_auto/refresh_shard.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2021 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | 8 | #ifndef _CLUSTER_MGR_REFRESH_SHARD_ 9 | #define _CLUSTER_MGR_REFRESH_SHARD_ 10 | 11 | #include 12 | #include 13 | #include "kl_mentain/async_mysql.h" 14 | #include "zettalib/errorcup.h" 15 | #include "zettalib/zthread.h" 16 | #include "kl_mentain/shard.h" 17 | #include "kl_mentain/thread_pool.h" 18 | #include "util_func/object_ptr.h" 19 | 20 | using namespace kunlun; 21 | 22 | namespace kunlun 23 | { 24 | 25 | class CRefreshShard : public ZThread, public ErrorCup { 26 | public: 27 | CRefreshShard() {} 28 | virtual ~CRefreshShard() { 29 | if(tpool_) 30 | delete tpool_; 31 | } 32 | 33 | int run(); 34 | 35 | void CheckAndDispatchAutoBackupMission(int state); 36 | void CheckAndDispatchAutoBackupComputeMission(int state); 37 | 38 | void CheckShardBackupState(ObjectPtr shard); 39 | bool BackupByRpc(const std::string& host); 40 | 41 | void CheckMetaShardBackupState(ObjectPtr shard); 42 | 43 | void LoopShardBackupState(int state); 44 | 45 | CThreadPool* GetThreadPool() { 46 | return tpool_; 47 | } 48 | 49 | public: 50 | static void BackupCallBack(Json::Value& root, void *arg); 51 | 52 | private: 53 | CThreadPool* tpool_; 54 | }; 55 | 56 | } 57 | #endif -------------------------------------------------------------------------------- /src/cluster_collection/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zettadb/cluster_mgr/74b6554d8cee720736f251e29fc3c7a7bc12fc36/src/cluster_collection/.DS_Store -------------------------------------------------------------------------------- /src/cluster_collection/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(cluster_collection OBJECT 2 | machine_alive.cc 3 | prometheus_control.cc) 4 | target_include_directories(cluster_collection INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 5 | target_include_directories(cluster_collection PUBLIC "${PROJECT_SOURCE_DIR}/src") 6 | target_include_directories(cluster_collection PUBLIC "${VENDOR_OUTPUT_PATH}/include") 7 | target_include_directories(cluster_collection PUBLIC "${PROJECT_BINARY_DIR}") 8 | target_include_directories(cluster_collection PUBLIC "${ZETTALIB_INCLUDE_PATH}") -------------------------------------------------------------------------------- /src/cluster_collection/machine_alive.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2022 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | 8 | #ifndef _CLUSTER_MGR_MACHINE_ALIVE_H_ 9 | #define _CLUSTER_MGR_MACHINE_ALIVE_H_ 10 | #include 11 | #include 12 | #include 13 | #include "zettalib/errorcup.h" 14 | #include "zettalib/zthread.h" 15 | #include "http_server/node_channel.h" 16 | #include "util_func/object_ptr.h" 17 | #include 18 | 19 | namespace kunlun 20 | { 21 | 22 | typedef std::tuple Machine_Type; 23 | 24 | class CMachineAlive : public ZThread, public ErrorCup { 25 | public: 26 | CMachineAlive(int scan_interval) : scan_interval_(scan_interval) {} 27 | virtual ~CMachineAlive() {} 28 | 29 | int run(); 30 | 31 | void GetCheckMachineIpLists(std::vector& hosts); 32 | bool SyncPingNodeMgrWithTimeout(const std::string& hostaddr); 33 | void ScanMachineJob(const Machine_Type& mach_host); 34 | 35 | private: 36 | int scan_interval_; 37 | std::map mach_states_; 38 | std::map > host_channels_; 39 | }; 40 | 41 | } 42 | 43 | #endif -------------------------------------------------------------------------------- /src/cluster_debug/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(cluster_debug OBJECT 2 | cluster_debug.cc) 3 | target_include_directories(cluster_debug INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 4 | target_include_directories(cluster_debug PUBLIC "${PROJECT_SOURCE_DIR}/src") 5 | target_include_directories(cluster_debug PUBLIC "${VENDOR_OUTPUT_PATH}/include") 6 | target_include_directories(cluster_debug PUBLIC "${PROJECT_BINARY_DIR}") 7 | target_include_directories(cluster_debug PUBLIC "${ZETTALIB_INCLUDE_PATH}") -------------------------------------------------------------------------------- /src/cluster_debug/cluster_debug.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2022 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | 8 | #ifndef _CLUSTER_MGR_CLUSTER_DEBUG_H_ 9 | #define _CLUSTER_MGR_CLUSTER_DEBUG_H_ 10 | 11 | #ifndef NDEBUG 12 | #include "request_framework/missionRequest.h" 13 | 14 | namespace kunlun { 15 | 16 | class ClusterDebug : public ::MissionRequest { 17 | typedef MissionRequest super; 18 | 19 | public: 20 | explicit ClusterDebug(Json::Value *doc) : super(doc) { 21 | } 22 | virtual ~ClusterDebug(){}; 23 | 24 | virtual bool ArrangeRemoteTask() override { return true;} 25 | virtual bool SetUpMisson() override { return true; } 26 | virtual bool TearDownMission() override { return true; } 27 | virtual bool FillRequestBodyStImpl() override { return true; } 28 | virtual void ReportStatus() override {} 29 | virtual bool SyncTaskImpl() override { 30 | ParseJson(); 31 | return true; 32 | } 33 | 34 | private: 35 | void ParseJson(); 36 | }; 37 | 38 | } 39 | 40 | int _cm_keyword(const char* keyword, int state); 41 | 42 | #define CM_DEBUG_EXECUTE_IF(keyword, a1) \ 43 | do { \ 44 | if(_cm_keyword((keyword), 1)) { \ 45 | a1 \ 46 | } \ 47 | } while(0) 48 | 49 | #else 50 | 51 | #define CM_DEBUG_EXECUTE_IF(keywork, a1) \ 52 | do { \ 53 | } while(0) 54 | 55 | #endif 56 | 57 | #endif -------------------------------------------------------------------------------- /src/cluster_expand/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(cluster_expand OBJECT expand_mission.cc table_pick.cc) 2 | target_include_directories(cluster_expand INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 3 | target_include_directories(cluster_expand PUBLIC "${PROJECT_SOURCE_DIR}/src") 4 | target_include_directories(cluster_expand PUBLIC "${VENDOR_OUTPUT_PATH}/include") 5 | target_include_directories(cluster_expand PUBLIC "${PROJECT_BINARY_DIR}") 6 | target_include_directories(cluster_expand PUBLIC "${ZETTALIB_INCLUDE_PATH}") 7 | -------------------------------------------------------------------------------- /src/cluster_mission/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zettadb/cluster_mgr/74b6554d8cee720736f251e29fc3c7a7bc12fc36/src/cluster_mission/.DS_Store -------------------------------------------------------------------------------- /src/cluster_mission/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(cluster_mission OBJECT 2 | machine_mission.cc 3 | cluster_comm.cc 4 | create_cluster.cc 5 | delete_cluster.cc 6 | add_computer.cc 7 | add_shard.cc 8 | delete_computer.cc 9 | delete_shard.cc 10 | add_node.cc 11 | delete_node.cc) 12 | target_include_directories(cluster_mission INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 13 | target_include_directories(cluster_mission PUBLIC "${PROJECT_SOURCE_DIR}/src") 14 | target_include_directories(cluster_mission PUBLIC "${VENDOR_OUTPUT_PATH}/include") 15 | target_include_directories(cluster_mission PUBLIC "${PROJECT_BINARY_DIR}") 16 | target_include_directories(cluster_mission PUBLIC "${ZETTALIB_INCLUDE_PATH}") 17 | -------------------------------------------------------------------------------- /src/cluster_mission/machine_mission.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2022 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | 8 | #ifndef _CLUSTER_MGR_MACHINE_MISSION_H_ 9 | #define _CLUSTER_MGR_MACHINE_MISSION_H_ 10 | #include "http_server/node_channel.h" 11 | #include "request_framework/missionRequest.h" 12 | 13 | namespace kunlun 14 | { 15 | 16 | class MachineRequest : public MissionRequest { 17 | typedef MissionRequest super; 18 | public: 19 | explicit MachineRequest(Json::Value *doc) : super(doc), 20 | err_code_(0) {} 21 | virtual ~MachineRequest() {} 22 | 23 | virtual bool ArrangeRemoteTask() override final { return true; } 24 | virtual bool SetUpMisson() override final; 25 | virtual void DealLocal() override final; 26 | virtual bool TearDownMission() override final { return true; } 27 | virtual bool FillRequestBodyStImpl() override final { return true; } 28 | virtual void ReportStatus() override final { return; } 29 | virtual bool SyncTaskImpl() override final { return true; } 30 | 31 | virtual int GetErrorCode() override final { 32 | return err_code_; 33 | } 34 | bool UpdateOperationRecord(); 35 | protected: 36 | bool CreateMachine(); 37 | bool CreateStorageMachine(Json::Value& doc); 38 | bool CreateComputerMachine(Json::Value& doc); 39 | bool CheckMachineState(); 40 | bool DeleteMachine(); 41 | bool UpdateMachine(); 42 | 43 | private: 44 | int err_code_; 45 | std::string job_id_; 46 | std::string job_status_; 47 | std::string hostaddr_; 48 | std::string machine_type_; 49 | }; 50 | 51 | } 52 | 53 | #endif -------------------------------------------------------------------------------- /src/cluster_operator/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(cluster_operator OBJECT mysqlInstallRemote.cc shardInstallMission.cc postgresInstallRemote.cc computeInstallMission.cc) 2 | target_include_directories(cluster_operator INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 3 | target_include_directories(cluster_operator PUBLIC "${PROJECT_SOURCE_DIR}/src") 4 | target_include_directories(cluster_operator PUBLIC "${VENDOR_OUTPUT_PATH}/include") 5 | target_include_directories(cluster_operator PUBLIC "${PROJECT_BINARY_DIR}") 6 | target_include_directories(cluster_operator PUBLIC "${ZETTALIB_INCLUDE_PATH}") 7 | -------------------------------------------------------------------------------- /src/coldbackup/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(coldbackup OBJECT coldbackup.cc) 2 | target_include_directories(coldbackup INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 3 | target_include_directories(coldbackup PUBLIC "${PROJECT_SOURCE_DIR}/src") 4 | target_include_directories(coldbackup PUBLIC "${VENDOR_OUTPUT_PATH}/include") 5 | target_include_directories(coldbackup PUBLIC "${PROJECT_BINARY_DIR}") 6 | target_include_directories(coldbackup PUBLIC "${ZETTALIB_INCLUDE_PATH}") 7 | -------------------------------------------------------------------------------- /src/example_mission/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(example_mission OBJECT example_mission.cc) 2 | target_include_directories(example_mission INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 3 | target_include_directories(example_mission PUBLIC "${PROJECT_SOURCE_DIR}/src") 4 | target_include_directories(example_mission PUBLIC "${VENDOR_OUTPUT_PATH}/include") 5 | target_include_directories(example_mission PUBLIC "${PROJECT_BINARY_DIR}") 6 | target_include_directories(example_mission PUBLIC "${ZETTALIB_INCLUDE_PATH}") 7 | -------------------------------------------------------------------------------- /src/example_mission/example_mission.cc: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2021 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | #include "example_mission/example_mission.h" 8 | -------------------------------------------------------------------------------- /src/http_server/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zettadb/cluster_mgr/74b6554d8cee720736f251e29fc3c7a7bc12fc36/src/http_server/.DS_Store -------------------------------------------------------------------------------- /src/http_server/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(http_server OBJECT http_server.cc node_channel.cc proto/clustermng.pb.cc) 2 | target_include_directories(http_server INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 3 | target_include_directories(http_server PUBLIC "${PROJECT_SOURCE_DIR}/src") 4 | target_include_directories(http_server PUBLIC "${VENDOR_OUTPUT_PATH}/include") 5 | target_include_directories(http_server PUBLIC "${PROJECT_BINARY_DIR}") 6 | target_include_directories(http_server PUBLIC "${ZETTALIB_INCLUDE_PATH}") 7 | -------------------------------------------------------------------------------- /src/http_server/proto/clustermng.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto2"; 2 | package kunlunrpc; 3 | 4 | option cc_generic_services = true; 5 | 6 | message HttpRequest { 7 | }; 8 | message HttpResponse { 9 | }; 10 | 11 | //Rest of the http request content in body fetched from `attachment` 12 | 13 | service HttpService { rpc Emit(HttpRequest) returns (HttpResponse); }; 14 | -------------------------------------------------------------------------------- /src/kl_mentain/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zettadb/cluster_mgr/74b6554d8cee720736f251e29fc3c7a7bc12fc36/src/kl_mentain/.DS_Store -------------------------------------------------------------------------------- /src/kl_mentain/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(kl_mentain OBJECT 2 | config.cc 3 | os.cc 4 | shard.cc 5 | sys.cc 6 | txn.cc 7 | kl_cluster.cc 8 | async_mysql.cc 9 | func_timer.cc 10 | ) 11 | target_include_directories(kl_mentain INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 12 | target_include_directories(kl_mentain PUBLIC "${PROJECT_SOURCE_DIR}/src") 13 | target_include_directories(kl_mentain PUBLIC "${VENDOR_OUTPUT_PATH}/include") 14 | target_include_directories(kl_mentain PUBLIC "${PROJECT_BINARY_DIR}") 15 | target_include_directories(kl_mentain PUBLIC "${ZETTALIB_INCLUDE_PATH}") 16 | -------------------------------------------------------------------------------- /src/kl_mentain/log.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2021 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | 8 | #ifndef LOG_H 9 | #define LOG_H 10 | #include "sys_config.h" 11 | #include "global.h" 12 | #include 13 | #include 14 | #include 15 | 16 | class Logger 17 | { 18 | public: 19 | enum enum_log_verbosity 20 | { 21 | ERROR, WARNING, INFO, LOG, DEBUG1, DEBUG2, DEBUG3 22 | }; 23 | 24 | static const char *log_verbosity_options[7]; 25 | private: 26 | std::atomic doing_rotation; 27 | pthread_mutex_t mtx; 28 | int log_fd; 29 | size_t cur_bytes; 30 | std::string log_file_path; 31 | int rotate(); 32 | Logger() : doing_rotation(false), 33 | log_fd(-1), cur_bytes(0) 34 | { 35 | pthread_mutex_init(&mtx, NULL); 36 | } 37 | static Logger*m_inst; 38 | int generate_log_fn(const std::string&lfp, std::string&fn); 39 | public: 40 | ~Logger() { pthread_mutex_destroy(&mtx); } 41 | static int create_instance(); 42 | int init(const std::string&lfp); 43 | static Logger*get_instance() { return m_inst; } 44 | ssize_t do_syslog(const char *file, const char *func, int lineno, 45 | int elevel, const char *fmt,...); 46 | }; 47 | 48 | extern std::string log_file_path; 49 | extern int64_t max_log_file_size; 50 | extern Logger::enum_log_verbosity log_verbosity; 51 | #define syslog(elevel, ...) Logger::get_instance()->do_syslog(__FILE__, __func__, __LINE__, elevel, __VA_ARGS__) 52 | #endif // !LOG_H 53 | -------------------------------------------------------------------------------- /src/kl_mentain/os.cc: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2021 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | 8 | #include "sys_config.h" 9 | #include "global.h" 10 | #include "os.h" 11 | #include 12 | #include 13 | 14 | /* 15 | Make sure count bytes have been written, when write() is interrupted 16 | by a signal. 17 | */ 18 | ssize_t my_write(int fd, const void *buf, size_t count) 19 | { 20 | int ret = 0; 21 | retry: 22 | ret = write(fd, buf, count); 23 | if (ret > 0 && ret < count) 24 | { 25 | buf = ((char*)buf) + ret; 26 | count -= ret; 27 | goto retry; 28 | } 29 | if (ret < 0 && errno == EINTR) 30 | goto retry; 31 | return ret; 32 | } 33 | 34 | /* 35 | * Set up a signal handler, with SA_RESTART, for signal "signo" 36 | * 37 | * Returns the previous handler. 38 | */ 39 | sigfunc_t handle_signal(int signo, sigfunc_t func) 40 | { 41 | struct sigaction act, oact; 42 | 43 | act.sa_handler = func; 44 | sigemptyset(&act.sa_mask); 45 | act.sa_flags = SA_RESTART; 46 | //#ifdef SA_NOCLDSTOP 47 | if (signo == SIGCHLD) 48 | act.sa_flags |= SA_NOCLDSTOP; 49 | //#endif 50 | if (sigaction(signo, &act, &oact) < 0) 51 | return SIG_ERR; 52 | return oact.sa_handler; 53 | } 54 | 55 | -------------------------------------------------------------------------------- /src/kl_mentain/os.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2021 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | 8 | #ifndef OS_H 9 | #define OS_H 10 | #include "sys_config.h" 11 | #include 12 | #include "global.h" 13 | 14 | #define TLS_VAR __thread 15 | 16 | typedef void (*sigfunc_t) (int signo); 17 | sigfunc_t handle_signal(int signo, sigfunc_t func); 18 | 19 | ssize_t my_write(int fd, const void *buf, size_t count); 20 | #endif // !OS_H 21 | -------------------------------------------------------------------------------- /src/kl_mentain/thread_manager.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2021 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | class Shard; 12 | class Thread; 13 | 14 | void mask_signals(); 15 | 16 | class Thread_manager 17 | { 18 | public: 19 | static int do_exit; 20 | const static unsigned long pthread_stack_size = 65535; 21 | private: 22 | friend class Thread; 23 | std::vector thrds; 24 | pthread_mutex_t mtx; 25 | pthread_cond_t cond; 26 | pthread_mutexattr_t mtx_attr; 27 | pthread_attr_t thr_attr; 28 | pthread_t main_thread_id; 29 | pthread_t signal_hdlr_thrd; 30 | static Thread_manager *m_inst; 31 | Thread_manager(); 32 | public: 33 | ~Thread_manager(); 34 | static Thread_manager *get_instance() 35 | { 36 | if (!m_inst) m_inst = new Thread_manager(); 37 | return m_inst; 38 | } 39 | 40 | void join_all(); 41 | void start_worker_thread(); 42 | void start_signal_handler(); 43 | void sleep_wait(Thread*thrd, int milli_seconds); 44 | void wakeup_all(); 45 | }; 46 | 47 | 48 | class Thread 49 | { 50 | friend class Thread_manager; 51 | pthread_t m_hdl; 52 | Shard *cur_shard; 53 | int kicks; 54 | public: 55 | Thread() : cur_shard(NULL), kicks(0) 56 | {} 57 | 58 | ~Thread() 59 | { 60 | 61 | } 62 | 63 | /* 64 | Return true if there are more kicks. 65 | */ 66 | bool decr_kicks() 67 | { 68 | bool ret = (kicks-- > 0); 69 | 70 | if (kicks < 0) kicks = 0; 71 | return ret; 72 | } 73 | 74 | int incr_kicks() 75 | { 76 | return ++kicks; 77 | } 78 | 79 | void set_pthread_hdl(pthread_t hdl) 80 | { 81 | Scopped_mutex(Thread_manager::get_instance()->mtx); 82 | m_hdl = hdl; 83 | } 84 | 85 | void set_shard(Shard*s) 86 | { 87 | Scopped_mutex(Thread_manager::get_instance()->mtx); 88 | cur_shard = s; 89 | } 90 | 91 | void run(); 92 | }; 93 | -------------------------------------------------------------------------------- /src/kl_mentain/txn.cc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zettadb/cluster_mgr/74b6554d8cee720736f251e29fc3c7a7bc12fc36/src/kl_mentain/txn.cc -------------------------------------------------------------------------------- /src/kl_mentain/txn.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2022 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | 8 | #include "sys_config.h" 9 | #include "global.h" 10 | -------------------------------------------------------------------------------- /src/machine_mission/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(machine_mission OBJECT machine_mission.cc) 2 | target_include_directories(machine_mission INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 3 | target_include_directories(machine_mission PUBLIC "${PROJECT_SOURCE_DIR}/src") 4 | target_include_directories(machine_mission PUBLIC "${VENDOR_OUTPUT_PATH}/include") 5 | target_include_directories(machine_mission PUBLIC "${PROJECT_BINARY_DIR}") 6 | target_include_directories(machine_mission PUBLIC "${ZETTALIB_INCLUDE_PATH}") 7 | -------------------------------------------------------------------------------- /src/other_mission/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(other_mission OBJECT other_mission.cc) 2 | target_include_directories(other_mission INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 3 | target_include_directories(other_mission PUBLIC "${PROJECT_SOURCE_DIR}/src") 4 | target_include_directories(other_mission PUBLIC "${VENDOR_OUTPUT_PATH}/include") 5 | target_include_directories(other_mission PUBLIC "${PROJECT_BINARY_DIR}") 6 | target_include_directories(other_mission PUBLIC "${ZETTALIB_INCLUDE_PATH}") 7 | -------------------------------------------------------------------------------- /src/raft_ha/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zettadb/cluster_mgr/74b6554d8cee720736f251e29fc3c7a7bc12fc36/src/raft_ha/.DS_Store -------------------------------------------------------------------------------- /src/raft_ha/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(raft_ha OBJECT 2 | raft_ha.cc 3 | raft_mission.cc 4 | proto/raft_msg.pb.cc 5 | generate_id.cc) 6 | target_include_directories(raft_ha INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 7 | target_include_directories(raft_ha PUBLIC "${PROJECT_SOURCE_DIR}/src") 8 | target_include_directories(raft_ha PUBLIC "${VENDOR_OUTPUT_PATH}/include") 9 | target_include_directories(raft_ha PUBLIC "${PROJECT_BINARY_DIR}") 10 | target_include_directories(raft_ha PUBLIC "${ZETTALIB_INCLUDE_PATH}") 11 | -------------------------------------------------------------------------------- /src/raft_ha/generate_id.cc: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2022 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | 8 | //#include "generate_id.h" 9 | #include "raft_ha.h" 10 | 11 | extern NodeHa* g_cluster_ha_handler; 12 | 13 | void CGenerateId::Init(uint64_t id) { 14 | id_num_.store(id + 1000); 15 | } 16 | 17 | uint64_t CGenerateId::GetInt64Id() { 18 | uint64_t id = id_num_.fetch_add(1, std::memory_order_relaxed); 19 | g_cluster_ha_handler->InsertMsg("KUNLUN_GLOBAL_ID", std::to_string(id_num_.load(std::memory_order_acquire))); 20 | return id; 21 | } 22 | 23 | std::string CGenerateId::GetStringId() { 24 | uint64_t id = id_num_.fetch_add(1, std::memory_order_relaxed); 25 | g_cluster_ha_handler->InsertMsg("KUNLUN_GLOBAL_ID", std::to_string(id_num_.load(std::memory_order_acquire))); 26 | return std::to_string(id); 27 | } 28 | 29 | void CGenerateId::SlaveClusterMgrUpdateId(uint64_t id) { 30 | id_num_.store(id); 31 | } -------------------------------------------------------------------------------- /src/raft_ha/generate_id.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2022 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | 8 | /* 9 | * generate global id for consfailover 10 | * id must increase 11 | */ 12 | #include 13 | #include 14 | 15 | class CGenerateId { 16 | public: 17 | CGenerateId() { 18 | std::atomic_init(&id_num_, 0); 19 | } 20 | virtual ~CGenerateId() {} 21 | 22 | void Init(uint64_t id); 23 | uint64_t GetInt64Id(); 24 | std::string GetStringId(); 25 | 26 | //slave cluster_mgr update id 27 | void SlaveClusterMgrUpdateId(uint64_t id); 28 | 29 | private: 30 | std::atomic id_num_; 31 | }; -------------------------------------------------------------------------------- /src/raft_ha/proto/raft_msg.proto: -------------------------------------------------------------------------------- 1 | syntax="proto2"; 2 | package raft_ha; 3 | 4 | message raft_msg { 5 | required int32 op = 1; 6 | required string key = 2; 7 | optional string msg = 3; 8 | }; -------------------------------------------------------------------------------- /src/raft_ha/raft_mission.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2022 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | 8 | #ifndef _CLUSTER_MGR_RAFT_MISSION_H_ 9 | #define _CLUSTER_MGR_RAFT_MISSION_H_ 10 | #include "http_server/node_channel.h" 11 | #include "request_framework/missionRequest.h" 12 | 13 | namespace kunlun 14 | { 15 | 16 | class RaftMission : public MissionRequest { 17 | typedef MissionRequest super; 18 | public: 19 | RaftMission(Json::Value *doc) : super(doc), err_code_(0) {} 20 | virtual ~RaftMission() {} 21 | 22 | virtual bool ArrangeRemoteTask() override final { return true; } 23 | virtual bool SetUpMisson() override final; 24 | virtual void DealLocal() override final; 25 | virtual bool InitFromInternal() override { return true; } 26 | virtual void CompleteGeneralJobInfo() override {} 27 | virtual bool TearDownMission() override final { return true; } 28 | virtual bool FillRequestBodyStImpl() override final { return true; } 29 | virtual void ReportStatus() override final { return; } 30 | 31 | bool UpdateOperationRecord(); 32 | 33 | private: 34 | bool TransferLeaderParse(Json::Value& doc); 35 | bool AddPeerParse(Json::Value& doc); 36 | bool RemovePeerParse(Json::Value& doc); 37 | 38 | void TransferLeader(); 39 | void AddPeer(); 40 | void RemovePeer(); 41 | 42 | void GetCurrentConfPeer(); 43 | bool SyncNewConfToDisk(); 44 | private: 45 | int err_code_; 46 | std::string job_id_; 47 | std::string job_status_; 48 | std::string error_info_; 49 | 50 | std::string task_type_; 51 | std::string target_leader_; 52 | std::string peer_; 53 | std::vector cur_peers_; 54 | }; 55 | 56 | } // namespace kunlun 57 | 58 | #endif -------------------------------------------------------------------------------- /src/rebuild_node/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(rebuild_node OBJECT 2 | rebuild_node.cc) 3 | target_include_directories(rebuild_node INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 4 | target_include_directories(rebuild_node PUBLIC "${PROJECT_SOURCE_DIR}/src") 5 | target_include_directories(rebuild_node PUBLIC "${VENDOR_OUTPUT_PATH}/include") 6 | target_include_directories(rebuild_node PUBLIC "${PROJECT_BINARY_DIR}") 7 | target_include_directories(rebuild_node PUBLIC "${ZETTALIB_INCLUDE_PATH}") -------------------------------------------------------------------------------- /src/request_framework/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zettadb/cluster_mgr/74b6554d8cee720736f251e29fc3c7a7bc12fc36/src/request_framework/.DS_Store -------------------------------------------------------------------------------- /src/request_framework/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(request_framework OBJECT 2 | handleRequestThread.cc 3 | missionRequest.cc 4 | requestBase.cc 5 | remoteTask.cc) 6 | target_include_directories(request_framework INTERFACE "${PROJECT_SOURCE_DIR}") 7 | target_include_directories(request_framework PUBLIC "${PROJECT_SOURCE_DIR}/src") 8 | target_include_directories(request_framework PUBLIC "${VENDOR_OUTPUT_PATH}/include") 9 | target_include_directories(request_framework PUBLIC "${PROJECT_BINARY_DIR}") 10 | target_include_directories(request_framework PUBLIC "${ZETTALIB_INCLUDE_PATH}") 11 | -------------------------------------------------------------------------------- /src/request_framework/handleRequestThread.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2021 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | #ifndef _CLUSTER_MGR_HANDLEREQUEST_THREAD_H_ 8 | #define _CLUSTER_MGR_HANDLEREQUEST_THREAD_H_ 9 | 10 | #include "boost/lockfree/spsc_queue.hpp" 11 | #include "requestBase.h" 12 | #include "zettalib/errorcup.h" 13 | #include "zettalib/zthread.h" 14 | #define CLUSTER_MANAGER_MAX_WAIT_REQUEST 10240 15 | using namespace kunlun; 16 | class HandleRequestThread : public ZThread, public ErrorCup { 17 | static void* AsyncDealRequest(void *arg); 18 | public: 19 | HandleRequestThread(){}; 20 | ~HandleRequestThread(){}; 21 | void DispatchRequest(ObjectPtr); 22 | int run(); 23 | 24 | private: 25 | boost::lockfree::spsc_queue< 26 | ClusterRequest *, 27 | boost::lockfree::capacity > 28 | request_queue_; 29 | }; 30 | 31 | #endif /*_CLUSTER_MGR_HANDLEREQUEST_THREAD_H_*/ 32 | -------------------------------------------------------------------------------- /src/request_framework/missionRequest.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2021 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | #ifndef _CLUSTER_MNG_MISSION_REQUEST_H_ 8 | #define _CLUSTER_MNG_MISSION_REQUEST_H_ 9 | 10 | #include "remoteTask.h" 11 | #include "requestBase.h" 12 | 13 | class MissionRequest : public ClusterRequest { 14 | typedef ClusterRequest super; 15 | 16 | public: 17 | MissionRequest():call_back_(nullptr),cb_context_(nullptr){}; 18 | explicit MissionRequest(Json::Value *doc) : super(doc),call_back_(nullptr),cb_context_(nullptr) { 19 | task_manager_ = nullptr; 20 | } 21 | virtual ~MissionRequest(); 22 | 23 | // sync task impl 24 | virtual bool SetUpSyncTaskImpl() { return SyncTaskImpl(); }; 25 | virtual bool SyncTaskImpl() { return true; }; 26 | virtual int GetErrorCode() { return 0; } 27 | virtual void SetUpImpl() override final; 28 | // user should add arrange remote task logic 29 | virtual bool ArrangeRemoteTask() = 0; 30 | // user shold add setup logic here 31 | virtual bool SetUpMisson() = 0; 32 | void DealRequestImpl() override final; 33 | virtual void DealLocal(); 34 | virtual void TearDownImpl() override final; 35 | virtual bool TearDownMission() = 0; 36 | 37 | TaskManager *get_task_manager() { return task_manager_; } 38 | 39 | void SetSerializeResult(const std::string& result); 40 | void set_cb(void (*call_back)(Json::Value& root, void *)) { 41 | call_back_ = call_back; 42 | } 43 | void set_cb_context(void *ctx) { 44 | cb_context_ = ctx; 45 | } 46 | protected: 47 | void (*call_back_)(Json::Value& root, void *); 48 | void *cb_context_; 49 | 50 | private: 51 | TaskManager *task_manager_; 52 | }; 53 | 54 | #endif /*_CLUSTER_MNG_MISSION_REQUEST_H_*/ 55 | -------------------------------------------------------------------------------- /src/request_framework/syncBrpc.cc: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2021 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | #include "syncBrpc.h" 8 | #include "http_server/node_channel.h" 9 | #include "kl_mentain/log.h" 10 | #include "stdio.h" 11 | #include "string.h" 12 | 13 | static void CallBC(brpc::Controller *cntl, SyncBrpc *syncBrpc) { 14 | if (!cntl->Failed()) { 15 | syncBrpc->response = cntl->response_attachment().to_string(); 16 | syncBrpc->result = true; 17 | return; 18 | }else 19 | syncBrpc->result = false; 20 | 21 | syslog(Logger::ERROR, "%s", cntl->ErrorText().c_str()); 22 | } 23 | 24 | bool SyncBrpc::syncBrpcToNode(std::string &hostaddr, Json::Value ¶) { 25 | 26 | auto channel = g_node_channel_manager.getNodeChannel(hostaddr.c_str()); 27 | if(channel == nullptr) 28 | return false; 29 | 30 | kunlunrpc::HttpService_Stub stub(channel); 31 | brpc::Controller *cntl = new brpc::Controller(); 32 | google::protobuf::Closure *done = brpc::NewCallback(&CallBC, cntl, this); 33 | 34 | Json::FastWriter writer; 35 | writer.omitEndingLineFeed(); 36 | std::string body = writer.write(para); 37 | cntl->request_attachment().append(body); 38 | cntl->http_request().set_method(brpc::HTTP_METHOD_POST); 39 | 40 | stub.Emit(cntl, nullptr, nullptr, done); 41 | brpc::Join(cntl->call_id()); 42 | 43 | return true; 44 | } 45 | -------------------------------------------------------------------------------- /src/request_framework/syncBrpc.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2021 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | #ifndef _SYNC_BRPC_H_ 8 | #define _SYNC_BRPC_H_ 9 | 10 | #include "brpc/channel.h" 11 | #include "brpc/controller.h" 12 | #include "brpc/parallel_channel.h" 13 | #include "brpc/server.h" 14 | #include "bthread/bthread.h" 15 | #include "butil/logging.h" 16 | #include "butil/macros.h" 17 | #include "butil/string_printf.h" 18 | #include "butil/time.h" 19 | #include "http_server/proto/clustermng.pb.h" 20 | #include "request_framework/requestValueDefine.h" 21 | #include "util_func/error_code.h" 22 | #include "zettalib/errorcup.h" 23 | #include "zettalib/op_mysql.h" 24 | #include "zettalib/zthread.h" 25 | #include "json/json.h" 26 | #include 27 | #include 28 | 29 | // Not thread-safe yet 30 | class SyncBrpc { 31 | public: 32 | SyncBrpc(){}; 33 | ~SyncBrpc(){}; 34 | 35 | bool syncBrpcToNode(std::string &hostaddr, Json::Value ¶); 36 | std::string response; 37 | bool result; 38 | }; 39 | 40 | #endif /*_SYNC_BRPC_H_*/ 41 | -------------------------------------------------------------------------------- /src/restore_cluster/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(restore_cluster OBJECT restoreShardTask.cc restoreComputeTask.cc restoreClusterMission.cc) 2 | target_include_directories(restore_cluster INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 3 | target_include_directories(restore_cluster PUBLIC "${PROJECT_SOURCE_DIR}/src") 4 | target_include_directories(restore_cluster PUBLIC "${VENDOR_OUTPUT_PATH}/include") 5 | target_include_directories(restore_cluster PUBLIC "${PROJECT_BINARY_DIR}") 6 | target_include_directories(restore_cluster PUBLIC "${ZETTALIB_INCLUDE_PATH}") 7 | -------------------------------------------------------------------------------- /src/restore_cluster/restoreComputeTask.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2022 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | #pragma once 8 | #include "cluster_operator/postgresInstallRemote.h" 9 | #include "request_framework/remoteTask.h" 10 | #include 11 | #include 12 | 13 | namespace kunlun { 14 | class ComputeRestoreRemoteTask : public RemoteTask { 15 | typedef RemoteTask super; 16 | 17 | public: 18 | ComputeRestoreRemoteTask(const char *task_name, 19 | std::string general_request_id) 20 | : super(task_name), request_id_(general_request_id) { 21 | srcClusterName_ = ""; 22 | restore_time_ = ""; 23 | }; 24 | virtual ~ComputeRestoreRemoteTask(){}; 25 | bool InitComputesInfo(int srcClusterId, int dstClusterId, 26 | std::string shard_id_map, std::string restore_time); 27 | 28 | bool virtual TaskReportImpl() override; 29 | void virtual SetUpStatus() override; 30 | 31 | private: 32 | bool ComposeOneNodeChannelAndPara(); 33 | bool ComposeNodeChannelsAndParas(); 34 | bool InitRestoreLogId(); 35 | 36 | private: 37 | std::string request_id_; 38 | std::string srcClusterName_; 39 | std::unordered_map dstPostgresInfos_; 40 | std::string hdfs_addr_; 41 | std::string restore_log_id_; 42 | // relation bettwen src cluster shard and dest Cluster shard 43 | std::string shard_id_map_; 44 | std::string restore_time_; 45 | }; 46 | } // namespace kunlun 47 | -------------------------------------------------------------------------------- /src/restore_cluster/restoreShardTask.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2019-2022 ZettaDB inc. All rights reserved. 3 | 4 | This source code is licensed under Apache 2.0 License, 5 | combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 6 | */ 7 | #pragma once 8 | #include "cluster_operator/mysqlInstallRemote.h" 9 | #include "request_framework/remoteTask.h" 10 | #include 11 | #include 12 | 13 | namespace kunlun { 14 | extern void rebuildRbrAfterRestore(void *para); 15 | class ShardRestoreRemoteTask : public RemoteTask { 16 | typedef RemoteTask super; 17 | 18 | public: 19 | ShardRestoreRemoteTask(const char *task_name, std::string general_request_id) 20 | : super(task_name), request_id_(general_request_id) { 21 | srcShardName_ = ""; 22 | srcClusterName_ = ""; 23 | restore_time_str_ = ""; 24 | // this->Set_call_back(rebuildRbrAfterRestore); 25 | // this->Set_cb_context((void *)this); 26 | }; 27 | virtual ~ShardRestoreRemoteTask(){}; 28 | bool InitShardsInfo(int srcShardId, int dstShardId, std::string restore_time); 29 | 30 | bool virtual TaskReportImpl() override; 31 | void virtual SetUpStatus() override; 32 | struct InstanceInfoSt getMasterInfo(); 33 | bool setUpRBR(); 34 | 35 | private: 36 | bool ComposeOneNodeChannelAndPara(); 37 | bool ComposeNodeChannelsAndParas(); 38 | bool InitRestoreLogId(); 39 | // set super_read_only = on; 40 | bool enableMaster(struct InstanceInfoSt &master); 41 | bool doChangeMasterOnSlave(struct InstanceInfoSt &master, 42 | struct InstanceInfoSt &slave); 43 | 44 | private: 45 | std::string request_id_; 46 | std::string srcShardName_; 47 | std::string srcClusterName_; 48 | std::unordered_map destMysqlInfos_; 49 | std::string hdfs_addr_; 50 | std::string restore_log_id_; 51 | std::string restore_time_str_; 52 | }; 53 | } // namespace kunlun 54 | -------------------------------------------------------------------------------- /src/sync_mission/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zettadb/cluster_mgr/74b6554d8cee720736f251e29fc3c7a7bc12fc36/src/sync_mission/.DS_Store -------------------------------------------------------------------------------- /src/sync_mission/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(sync_mission OBJECT sync_mission.cc) 2 | target_include_directories(sync_mission INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 3 | target_include_directories(sync_mission PUBLIC "${PROJECT_SOURCE_DIR}/src") 4 | target_include_directories(sync_mission PUBLIC "${VENDOR_OUTPUT_PATH}/include") 5 | target_include_directories(sync_mission PUBLIC "${PROJECT_BINARY_DIR}") 6 | target_include_directories(sync_mission PUBLIC "${ZETTALIB_INCLUDE_PATH}") 7 | -------------------------------------------------------------------------------- /src/sys_config.h.in: -------------------------------------------------------------------------------- 1 | #define Cluster_manager_VERSION_MAJOR @Cluster_manager_VERSION_MAJOR@ 2 | #define Cluster_manager_VERSION_MINOR @Cluster_manager_VERSION_MINOR@ 3 | -------------------------------------------------------------------------------- /src/util_func/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zettadb/cluster_mgr/74b6554d8cee720736f251e29fc3c7a7bc12fc36/src/util_func/.DS_Store -------------------------------------------------------------------------------- /src/util_func/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(util_func OBJECT meta_info.cc error_code.cc) 2 | target_include_directories(util_func INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") 3 | target_include_directories(util_func PUBLIC "${PROJECT_SOURCE_DIR}/src") 4 | target_include_directories(util_func PUBLIC "${VENDOR_OUTPUT_PATH}/include") 5 | target_include_directories(util_func PUBLIC "${PROJECT_BINARY_DIR}") 6 | target_include_directories(util_func PUBLIC "${ZETTALIB_INCLUDE_PATH}") 7 | -------------------------------------------------------------------------------- /src/util_func/test_oject_ptr.cc: -------------------------------------------------------------------------------- 1 | #include "object_ptr.h" 2 | #include "../kl_mentain/thread_pool.h" 3 | #include 4 | #include 5 | 6 | using namespace kunlun; 7 | using namespace kunlun_rbr; 8 | 9 | class A : public ObjectRef { 10 | public: 11 | A() : a(0) {} 12 | virtual ~A() {} 13 | 14 | void print() { 15 | std::cout << "a: " << a << std::endl; 16 | } 17 | private: 18 | int a; 19 | }; 20 | 21 | int test_func(ObjectPtr ptr) { 22 | std::cout << "func A ref: " << ptr.GetTRef() << std::endl; 23 | ptr->print(); 24 | return 0; 25 | } 26 | 27 | int main(int argc, char** argv) { 28 | ObjectPtr ptrA( new A()); 29 | std::cout << "ptrA is valid: " << ptrA.Invalid() << std::endl; 30 | { 31 | ObjectPtr ptrB = ptrA; 32 | std::cout << "A ref: " << ptrA.GetTRef() << std::endl; 33 | } 34 | 35 | A* a = ptrA.GetTRaw(); 36 | a->print(); 37 | 38 | { 39 | ObjectPtr ptrC; 40 | std::cout << "ptrC is valid: " << ptrC.Invalid() << std::endl; 41 | ObjectPtr ptrD(ptrA); 42 | std::cout << "A ref: " << ptrA.GetTRef() << std::endl; 43 | } 44 | 45 | CThreadPool tpool(1); 46 | tpool.commit([ptrA]{ 47 | sleep(10); 48 | std::cout << "pool A ref: " << ptrA.GetTRef() << std::endl; 49 | }); 50 | 51 | 52 | std::cout << "1.A ref: " << ptrA.GetTRef() << std::endl; 53 | ptrA->print(); 54 | 55 | sleep(20); 56 | test_func(ptrA); 57 | 58 | std::vector > A_vec; 59 | A_vec.push_back(ptrA); 60 | for(auto it : A_vec) { 61 | std::cout << "vec A ref: " << it.GetTRef() << std::endl; 62 | } 63 | 64 | std::cout << "2.A ref: " << ptrA.GetTRef() << std::endl; 65 | return 0; 66 | } -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/include/check_by_get_state.inc: -------------------------------------------------------------------------------- 1 | --http 2 | request_type: POST 3 | header:Content-Type:application/json 4 | body:{ 5 | "version":"1.0", 6 | "job_id":"{$job_id}", 7 | "job_type":"get_status", 8 | "timestamp" : "202205131532", 9 | "paras":{ } 10 | } 11 | EOF 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/include/check_cn_write_read.inc: -------------------------------------------------------------------------------- 1 | SET client_min_messages TO 'warning'; 2 | drop table if exists t1111; 3 | RESET client_min_messages; 4 | create table t1111(id int primary key, info text, wt int); 5 | insert into t1111(id,info,wt) values(1, 'record1', 1); 6 | insert into t1111(id,info,wt) values(2, 'record2', 2); 7 | update t1111 set wt = 12 where id = 1; 8 | select * from t1111; 9 | delete from t1111 where id = 1; 10 | select * from t1111; 11 | prepare q1(int) as select*from t1111 where id=$1; 12 | begin; 13 | execute q1(1); 14 | execute q1(2); 15 | prepare q2(text,int, int) as update t1111 set info=$1 , wt=$2 where id=$3; 16 | execute q2('Rec1',2,1); 17 | commit; 18 | execute q2('Rec2',3,2); 19 | drop table t1111; 20 | 21 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/include/check_create_cluster_state.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # 1. check shard status 4 | # 2. check pg status 5 | # 3. check metadb shards, shard_nodes, comp_nodes 6 | # 7 | 8 | --connection default 9 | 10 | --let $shard_nums = `SELECT COUNT(*) FROM shards` 11 | --let $comp_nums = `SELECT COUNT(*) FROM comp_nodes` 12 | 13 | if($ha_mode == rbr) 14 | { 15 | 16 | while($shard_nums > 0) 17 | { 18 | --let $shardid=shard.$shard_nums 19 | --source kunlun-test/suite/cluster_manager/check_rbr_shard_state.inc 20 | dec $shard_nums; 21 | } 22 | 23 | } 24 | 25 | --let c_inc=1 26 | while($comp_nums > 0) 27 | { 28 | let $cconn_name=cc_pg_conn.$c_inc; 29 | let $comp = computer.$comp_nums; 30 | --pg_connect($cconn_name, $comp, $comp_user, $comp_pwd) 31 | --source kunlun-test/suite/cluster_manager/check_computer_state.inc 32 | dec $comp_nums; 33 | inc $c_inc; 34 | --connection default 35 | } 36 | 37 | 38 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/include/check_delete_cluster_state.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # 1. check shards, shard_nodes 4 | # 2. check comp_nodes 5 | # 3. check metadb commit_log_xxx and ddl_ops_log_xxxx 6 | # 7 | 8 | --connection default 9 | 10 | --let $cn_num=`SELECT COUNT(*) FROM comp_nodes;` 11 | if($cn_num != 0) 12 | { 13 | --echo "comp is not empty" 14 | --exit 15 | } 16 | 17 | --let $sds_num=`SELECT COUNT(*) FROM shards;` 18 | if($sds_num != 0) 19 | { 20 | --echo "shards is not empty" 21 | --exit 22 | } 23 | 24 | --let $sn_num=`SELECT COUNT(*) FROM shard_nodes;` 25 | if($sn_num != 0) 26 | { 27 | --echo "shard_nods is not empty" 28 | --exit 29 | } 30 | 31 | --let $dc_num=`SELECT COUNT(*) FROM db_clusters;` 32 | if($dc_num != 0) 33 | { 34 | --echo "db_clusters is not empty" 35 | --exit 36 | } 37 | 38 | --let $commit_name='commit_log_$cluster_name' 39 | --let $ddlops_name='ddl_ops_log_$cluster_name' 40 | --let $commit_exist=`SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME=$commit_name;` 41 | if($commit_exist != 0) 42 | { 43 | --echo "commit_log is not deleted, please check" 44 | --exit 45 | } 46 | 47 | --let $ddlops_exist=`SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME=$ddlops_name;` 48 | if($ddlops_exist != 0) 49 | { 50 | --echo "ddl_ops_log is not deleted, please check" 51 | --exit 52 | } 53 | 54 | --echo "==== delete cluster ok ====" 55 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/include/check_delete_cluster_state.inc.bak: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # 1. check shards, shard_nodes 4 | # 2. check comp_nodes 5 | # 3. check metadb commit_log_xxx and ddl_ops_log_xxxx 6 | # 7 | 8 | --connection default 9 | 10 | --let $sds_num=query_get_value(SELECT COUNT(*) FROM shards WHERE db_cluster_id=$cluster_id, COUNT(*), 1) 11 | if($sds_num != 0) 12 | { 13 | --echo "shards is not empty" 14 | --exit 15 | } 16 | 17 | --let $sn_num=query_get_value(SELECT COUNT(*) FROM shard_nodes WHERE db_cluster_id=$cluster_id, COUNT(*), 1) 18 | if($sn_num != 0) 19 | { 20 | --echo "shard_nods is not empty" 21 | --exit 22 | } 23 | 24 | --let $dc_num=query_get_value(SELECT COUNT(*) FROM db_clusters WHERE id=$cluster_id, COUNT(*), 1) 25 | if($dc_num != 0) 26 | { 27 | --echo "db_clusters is not empty" 28 | --exit 29 | } 30 | 31 | --let $commit_name='commit_log_$cluster_name' 32 | --let $ddlops_name='ddl_ops_log_$cluster_name' 33 | --let $commit_exist=query_get_value(SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME=$commit_name, COUNT(*), 1) 34 | if($commit_exist != 0) 35 | { 36 | --echo "commit_log is not deleted, please check" 37 | --exit 38 | } 39 | 40 | --let $ddlops_exist=query_get_value(SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME=$ddlops_name, COUNT(*), 1) 41 | if($ddlops_exist != 0) 42 | { 43 | --echo "ddl_ops_log is not deleted, please check" 44 | --exit 45 | } 46 | 47 | --echo "==== delete cluster ok ====" 48 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/include/check_rbr_shard_state.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # check rbr mode state 4 | # 1. check metadb shard_nodes 5 | # 2. check m/s 6 | 7 | --connect(shard_master, $shardid, clustmgr, clustmgr_pwd) 8 | --let $slave_host = query_get_value(SHOW SLAVE HOSTS, Host, 1) 9 | 10 | if($slave_host == "No such row") 11 | { 12 | --echo "connect master host and get slave hosts failed" 13 | --exit 14 | } 15 | 16 | if($slave_host == "NULL") 17 | { 18 | --echo "connect master host and get slave hosts empty" 19 | --exit 20 | } 21 | 22 | disconnect shard_master; 23 | 24 | --connection default 25 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/include/check_shard_state.inc: -------------------------------------------------------------------------------- 1 | --connection default 2 | --let $shard_nums = `SELECT COUNT(*) FROM shards ` 3 | #--let $i = 0 4 | 5 | while($shard_nums > 0) 6 | { 7 | #--let $shardid = `select shard_id from shard_nodes where member_state ="source" and status = "creating" limit $i,1;` 8 | --let $shardid=shard.$shard_nums 9 | --echo $shardid 10 | #--set_cluster_id(4) 11 | #--kl_cluster_id(1) 12 | --connect(shard_master,$shardid, clustmgr, clustmgr_pwd) 13 | --let $slave_host = query_get_value(SHOW SLAVE HOSTS, Host, 1) 14 | if($slave_host == "No such row") 15 | { 16 | --echo "connect master host and get slave hosts failed" 17 | --exit 18 | } 19 | 20 | if($slave_host == "NULL") 21 | { 22 | --echo "connect master host and get slave hosts empty" 23 | --exit 24 | } 25 | disconnect shard_master; 26 | --connection default 27 | dec $shard_nums; 28 | #inc $i; 29 | } 30 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/include/get_backup_status.inc: -------------------------------------------------------------------------------- 1 | 2 | if(!$http_req_timeout) 3 | { 4 | --let $http_req_timeout = 500 5 | } 6 | 7 | #--echo $job_id 8 | --sleep 5 9 | 10 | --let $condition=1 11 | while ($condition == 1) 12 | { 13 | --connection default 14 | --let $status = `select status from cluster_general_job_log where job_type='shard_coldbackup' order by id desc limit 1;` 15 | 16 | if ($status != ongoing) 17 | { 18 | --let $condition=0 19 | } 20 | 21 | --sleep 10 22 | #--echo status: $status 23 | --dec $http_req_timeout 24 | if($http_req_timeout <= 0) 25 | { 26 | --let $condition=0 27 | --exit 28 | } 29 | 30 | } 31 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/include/get_cluster_name.inc: -------------------------------------------------------------------------------- 1 | --http 2 | request_type: POST 3 | header:Content-Type:application/json 4 | body:{ 5 | "version":"1.0", 6 | "job_id":"", 7 | "job_type":"get_cluster_summary", 8 | "timestamp" : "202205131532", 9 | "paras":{ } 10 | } 11 | EOF 12 | --let $cluster_name = `http_reap(attachment.list_cluster[0].cluster_name)` 13 | --let $cluster_id = `http_reap(attachment.list_cluster[0].cluster_id)` 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/include/get_restore_status.inc: -------------------------------------------------------------------------------- 1 | 2 | if(!$http_req_timeout) 3 | { 4 | --let $http_req_timeout = 500 5 | } 6 | 7 | --echo $job_id 8 | --sleep 5 9 | 10 | --let $condition=1 11 | while ($condition == 1) 12 | { 13 | --connection default 14 | --let $status = `select status from restore_log where general_log_id = $job_id order by id desc limit 1;` 15 | 16 | if ($status != ongoing) 17 | { 18 | --let $condition=0 19 | } 20 | 21 | --sleep 10 22 | #--echo status: $status 23 | --dec $http_req_timeout 24 | if($http_req_timeout <= 0) 25 | { 26 | --let $condition=0 27 | --exit 28 | } 29 | 30 | } 31 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/include/wait_http_request_finish.inc: -------------------------------------------------------------------------------- 1 | 2 | if(!$http_req_timeout) 3 | { 4 | --let $http_req_timeout = 500 5 | } 6 | 7 | #--echo $job_id 8 | --sleep 5 9 | 10 | --let $condition=1 11 | while ($condition == 1) 12 | { 13 | --http 14 | request_type: POST 15 | header:Content-Type:application/json 16 | body:{ 17 | "version": "1.0", 18 | "job_id":"${job_id}", 19 | "job_type": "get_status", 20 | "timestamp" : "1435749309", 21 | "paras": {} 22 | } 23 | EOF 24 | 25 | --let $status = `http_reap(status)` 26 | if ($status != ongoing) 27 | { 28 | --let $condition=0 29 | } 30 | 31 | --sleep 10 32 | #--echo status: $status 33 | --dec $http_req_timeout 34 | if($http_req_timeout <= 0) 35 | { 36 | --let $condition=0 37 | --exit 38 | } 39 | 40 | --let $cluster_name = `http_reap(attachment.cluster_name)` 41 | --let $cluster_id = `http_reap(attachment.cluster_id)` 42 | } 43 | 44 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/add_comps_existcluster.result: -------------------------------------------------------------------------------- 1 | SET client_min_messages TO 'warning'; 2 | 3 | drop table if exists t1111; 4 | 5 | RESET client_min_messages; 6 | 7 | create table t1111(id int primary key, info text, wt int); 8 | 9 | insert into t1111(id,info,wt) values(1, 'record1', 1); 10 | 11 | insert into t1111(id,info,wt) values(2, 'record2', 2); 12 | 13 | update t1111 set wt = 12 where id = 1; 14 | 15 | select * from t1111; 16 | 1 record1 122 record2 2 17 | delete from t1111 where id = 1; 18 | 19 | select * from t1111; 20 | 2 record2 2 21 | prepare q1(int) as select*from t1111 where id=$1; 22 | 23 | begin; 24 | 25 | execute q1(1); 26 | 27 | execute q1(2); 28 | 2 record2 2 29 | prepare q2(text,int, int) as update t1111 set info=$1 , wt=$2 where id=$3; 30 | 31 | execute q2('Rec1',2,1); 32 | 33 | commit; 34 | 35 | execute q2('Rec2',3,2); 36 | 37 | drop table t1111; 38 | 39 | SET client_min_messages TO 'warning'; 40 | 41 | drop table if exists t1111; 42 | 43 | RESET client_min_messages; 44 | 45 | create table t1111(id int primary key, info text, wt int); 46 | 47 | insert into t1111(id,info,wt) values(1, 'record1', 1); 48 | 49 | insert into t1111(id,info,wt) values(2, 'record2', 2); 50 | 51 | update t1111 set wt = 12 where id = 1; 52 | 53 | select * from t1111; 54 | 1 record1 122 record2 2 55 | delete from t1111 where id = 1; 56 | 57 | select * from t1111; 58 | 2 record2 2 59 | prepare q1(int) as select*from t1111 where id=$1; 60 | 61 | begin; 62 | 63 | execute q1(1); 64 | 65 | execute q1(2); 66 | 2 record2 2 67 | prepare q2(text,int, int) as update t1111 set info=$1 , wt=$2 where id=$3; 68 | 69 | execute q2('Rec1',2,1); 70 | 71 | commit; 72 | 73 | execute q2('Rec2',3,2); 74 | 75 | drop table t1111; 76 | 77 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/add_nodes.result: -------------------------------------------------------------------------------- 1 | shard.1 2 | SET client_min_messages TO 'warning'; 3 | 4 | drop table if exists t1111; 5 | 6 | RESET client_min_messages; 7 | 8 | create table t1111(id int primary key, info text, wt int); 9 | 10 | insert into t1111(id,info,wt) values(1, 'record1', 1); 11 | 12 | insert into t1111(id,info,wt) values(2, 'record2', 2); 13 | 14 | update t1111 set wt = 12 where id = 1; 15 | 16 | select * from t1111; 17 | 1 record1 122 record2 2 18 | delete from t1111 where id = 1; 19 | 20 | select * from t1111; 21 | 2 record2 2 22 | prepare q1(int) as select*from t1111 where id=$1; 23 | 24 | begin; 25 | 26 | execute q1(1); 27 | 28 | execute q1(2); 29 | 2 record2 2 30 | prepare q2(text,int, int) as update t1111 set info=$1 , wt=$2 where id=$3; 31 | 32 | execute q2('Rec1',2,1); 33 | 34 | commit; 35 | 36 | execute q2('Rec2',3,2); 37 | 38 | drop table t1111; 39 | 40 | shard.1 41 | "==== delete cluster ok ====" 42 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/add_nodes_existcluster.result: -------------------------------------------------------------------------------- 1 | 10 2 | shard.2 3 | shard.1 4 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/add_shards.result: -------------------------------------------------------------------------------- 1 | shard.1 2 | SET client_min_messages TO 'warning'; 3 | 4 | drop table if exists t1111; 5 | 6 | RESET client_min_messages; 7 | 8 | create table t1111(id int primary key, info text, wt int); 9 | 10 | insert into t1111(id,info,wt) values(1, 'record1', 1); 11 | 12 | insert into t1111(id,info,wt) values(2, 'record2', 2); 13 | 14 | update t1111 set wt = 12 where id = 1; 15 | 16 | select * from t1111; 17 | 1 record1 122 record2 2 18 | delete from t1111 where id = 1; 19 | 20 | select * from t1111; 21 | 2 record2 2 22 | prepare q1(int) as select*from t1111 where id=$1; 23 | 24 | begin; 25 | 26 | execute q1(1); 27 | 28 | execute q1(2); 29 | 2 record2 2 30 | prepare q2(text,int, int) as update t1111 set info=$1 , wt=$2 where id=$3; 31 | 32 | execute q2('Rec1',2,1); 33 | 34 | commit; 35 | 36 | execute q2('Rec2',3,2); 37 | 38 | drop table t1111; 39 | 40 | SET client_min_messages TO 'warning'; 41 | 42 | drop table if exists t1111; 43 | 44 | RESET client_min_messages; 45 | 46 | create table t1111(id int primary key, info text, wt int); 47 | 48 | insert into t1111(id,info,wt) values(1, 'record1', 1); 49 | 50 | insert into t1111(id,info,wt) values(2, 'record2', 2); 51 | 52 | update t1111 set wt = 12 where id = 1; 53 | 54 | select * from t1111; 55 | 1 record1 122 record2 2 56 | delete from t1111 where id = 1; 57 | 58 | select * from t1111; 59 | 2 record2 2 60 | prepare q1(int) as select*from t1111 where id=$1; 61 | 62 | begin; 63 | 64 | execute q1(1); 65 | 66 | execute q1(2); 67 | 2 record2 2 68 | prepare q2(text,int, int) as update t1111 set info=$1 , wt=$2 where id=$3; 69 | 70 | execute q2('Rec1',2,1); 71 | 72 | commit; 73 | 74 | execute q2('Rec2',3,2); 75 | 76 | drop table t1111; 77 | 78 | shard.2 79 | shard.1 80 | "==== delete cluster ok ====" 81 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/add_shards_existcluster.result: -------------------------------------------------------------------------------- 1 | 10 2 | shard.2 3 | shard.1 4 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/cluster_backup_restore.result: -------------------------------------------------------------------------------- 1 | shard.1 2 | SET client_min_messages TO 'warning'; 3 | 4 | drop table if exists t1111; 5 | 6 | RESET client_min_messages; 7 | 8 | create table t1111(id int primary key, info text, wt int); 9 | 10 | insert into t1111(id,info,wt) values(1, 'record1', 1); 11 | 12 | insert into t1111(id,info,wt) values(2, 'record2', 2); 13 | 14 | update t1111 set wt = 12 where id = 1; 15 | 16 | select * from t1111; 17 | 1 record1 122 record2 2 18 | delete from t1111 where id = 1; 19 | 20 | select * from t1111; 21 | 2 record2 2 22 | prepare q1(int) as select*from t1111 where id=$1; 23 | 24 | begin; 25 | 26 | execute q1(1); 27 | 28 | execute q1(2); 29 | 2 record2 2 30 | prepare q2(text,int, int) as update t1111 set info=$1 , wt=$2 where id=$3; 31 | 32 | execute q2('Rec1',2,1); 33 | 34 | commit; 35 | 36 | execute q2('Rec2',3,2); 37 | 38 | drop table t1111; 39 | 40 | create table student(id int primary key, info text, wt int); 41 | 42 | insert into student(id,info,wt) values(1, 'record1', 1); 43 | 44 | insert into student(id,info,wt) values(2, 'record2', 2); 45 | 46 | No such row 47 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/cluster_backup_restore_trans.result: -------------------------------------------------------------------------------- 1 | shard.2 2 | shard.1 3 | drop table if exists transfer_account; 4 | 5 | create table transfer_account(id int primary key,tradedate varchar(255), money int default 1000)partition by range(id) with (shard = 13); 6 | 7 | create table transfer_account_01 partition of transfer_account for values from (MINVALUE) to(500001) with (shard = 13); 8 | 9 | create table transfer_account_02 partition of transfer_account for values from (500001) to(1000001) with (shard = 13); 10 | 11 | create table transfer_account_03 partition of transfer_account for values from (1000001) to(1500001) with (shard = 14); 12 | 13 | create table transfer_account_04 partition of transfer_account for values from (1500001) to(2000001) with (shard = 14); 14 | 15 | insert into transfer_account select generate_series(1,200),('2022-01-05'); 16 | 17 | insert into transfer_account select generate_series(201,300),('2022-04-06'); 18 | 19 | insert into transfer_account select generate_series(301,400),('2022-05-09'); 20 | 21 | insert into transfer_account select generate_series(401,450),('2022-07-02'); 22 | 23 | insert into transfer_account select generate_series(451,500),('2022-08-01'); 24 | 25 | insert into transfer_account select generate_series(501,700),('2022-09-25'); 26 | 27 | insert into transfer_account select generate_series(701,900),('2022-11-02'); 28 | 29 | insert into transfer_account select generate_series(901,1000),('2022-12-30'); 30 | 31 | 2022-10-08 17:55:56 32 | 106 33 | select sum(money) as moneytotal from transfer_account; 34 | 1000000 35 | drop table if exists student; 36 | 37 | create table student(id int primary key, info text, wt int); 38 | 39 | insert into student(id,info,wt) values(1, 'record1', 1); 40 | 41 | insert into student(id,info,wt) values(2, 'record2', 2); 42 | 43 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/creat_machine.result: -------------------------------------------------------------------------------- 1 | "create machine end" 2 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/del_comps_existcluster.result: -------------------------------------------------------------------------------- 1 | 15 2 | 2 3 | SET client_min_messages TO 'warning'; 4 | 5 | drop table if exists t1111; 6 | 7 | RESET client_min_messages; 8 | 9 | create table t1111(id int primary key, info text, wt int); 10 | 11 | insert into t1111(id,info,wt) values(1, 'record1', 1); 12 | 13 | insert into t1111(id,info,wt) values(2, 'record2', 2); 14 | 15 | update t1111 set wt = 12 where id = 1; 16 | 17 | select * from t1111; 18 | 1 record1 122 record2 2 19 | delete from t1111 where id = 1; 20 | 21 | select * from t1111; 22 | 2 record2 2 23 | prepare q1(int) as select*from t1111 where id=$1; 24 | 25 | begin; 26 | 27 | execute q1(1); 28 | 29 | execute q1(2); 30 | 2 record2 2 31 | prepare q2(text,int, int) as update t1111 set info=$1 , wt=$2 where id=$3; 32 | 33 | execute q2('Rec1',2,1); 34 | 35 | commit; 36 | 37 | execute q2('Rec2',3,2); 38 | 39 | drop table t1111; 40 | 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/del_nodes_existcluster.result: -------------------------------------------------------------------------------- 1 | 13 2 | 51406 3 | 1 4 | shard.1 5 | "connect master host and get slave hosts failed" 6 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/del_rbrcluster.result: -------------------------------------------------------------------------------- 1 | "test elete cluste ok" 2 | "==== delete cluster ok ====" 3 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/del_shard_existcluster.result: -------------------------------------------------------------------------------- 1 | 12 2 | 2 3 | shard.1 4 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/expand_cluster.result: -------------------------------------------------------------------------------- 1 | shard.1 2 | SET client_min_messages TO 'warning'; 3 | 4 | drop table if exists t1111; 5 | 6 | RESET client_min_messages; 7 | 8 | create table t1111(id int primary key, info text, wt int); 9 | 10 | insert into t1111(id,info,wt) values(1, 'record1', 1); 11 | 12 | insert into t1111(id,info,wt) values(2, 'record2', 2); 13 | 14 | update t1111 set wt = 12 where id = 1; 15 | 16 | select * from t1111; 17 | 1 record1 122 record2 2 18 | delete from t1111 where id = 1; 19 | 20 | select * from t1111; 21 | 2 record2 2 22 | prepare q1(int) as select*from t1111 where id=$1; 23 | 24 | begin; 25 | 26 | execute q1(1); 27 | 28 | execute q1(2); 29 | 2 record2 2 30 | prepare q2(text,int, int) as update t1111 set info=$1 , wt=$2 where id=$3; 31 | 32 | execute q2('Rec1',2,1); 33 | 34 | commit; 35 | 36 | execute q2('Rec2',3,2); 37 | 38 | drop table t1111; 39 | 40 | create table student(id int primary key, info text, wt int); 41 | 42 | insert into student(id,info,wt) values(1, 'record1', 1); 43 | 44 | insert into student(id,info,wt) values(2, 'record2', 2); 45 | 46 | shard.2 47 | shard.1 48 | 11 49 | 10 50 | No such row 51 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/expand_cluster_trans_abnormal1.result: -------------------------------------------------------------------------------- 1 | shard.2 2 | shard.1 3 | drop table if exists student; 4 | 5 | create table student(id int primary key, info text, wt int) with (shard = 19); 6 | 7 | insert into student(id,info,wt) values(1, 'record1', 1); 8 | 9 | insert into student(id,info,wt) values(2, 'record2', 2); 10 | 11 | insert into student(id,info,wt) values(3, 'record3', 3); 12 | 13 | drop table if exists transfer_account; 14 | 15 | create table transfer_account(id int primary key,tradedate varchar(255), money int default 1000)partition by range(id) with (shard = 19); 16 | 17 | create table transfer_account_01 partition of transfer_account for values from (MINVALUE) to(250) with (shard = 19); 18 | 19 | create table transfer_account_02 partition of transfer_account for values from (250) to(500) with (shard = 19); 20 | 21 | create table transfer_account_03 partition of transfer_account for values from (500) to(750) with (shard = 20); 22 | 23 | create table transfer_account_04 partition of transfer_account for values from (750) to(1001) with (shard = 20); 24 | 25 | insert into transfer_account select generate_series(1,200),('2022-01-05'); 26 | 27 | insert into transfer_account select generate_series(201,300),('2022-04-06'); 28 | 29 | insert into transfer_account select generate_series(301,400),('2022-05-09'); 30 | 31 | insert into transfer_account select generate_series(401,450),('2022-07-02'); 32 | 33 | insert into transfer_account select generate_series(451,500),('2022-08-01'); 34 | 35 | insert into transfer_account select generate_series(501,700),('2022-09-25'); 36 | 37 | insert into transfer_account select generate_series(701,900),('2022-11-02'); 38 | 39 | insert into transfer_account select generate_series(901,1000),('2022-12-30'); 40 | 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/expand_cluster_trans_abnormal10.result: -------------------------------------------------------------------------------- 1 | shard.2 2 | shard.1 3 | drop table if exists student; 4 | 5 | create table student(id int primary key, info text, wt int) with (shard = 39); 6 | 7 | insert into student(id,info,wt) values(1, 'record1', 1); 8 | 9 | insert into student(id,info,wt) values(2, 'record2', 2); 10 | 11 | insert into student(id,info,wt) values(3, 'record3', 3); 12 | 13 | drop table if exists transfer_account; 14 | 15 | create table transfer_account(id int primary key,tradedate varchar(255), money int default 1000)partition by range(id) with (shard = 39); 16 | 17 | create table transfer_account_01 partition of transfer_account for values from (MINVALUE) to(250) with (shard = 39); 18 | 19 | create table transfer_account_02 partition of transfer_account for values from (250) to(500) with (shard = 39); 20 | 21 | create table transfer_account_03 partition of transfer_account for values from (500) to(750) with (shard = 40); 22 | 23 | create table transfer_account_04 partition of transfer_account for values from (750) to(1001) with (shard = 40); 24 | 25 | insert into transfer_account select generate_series(1,200),('2022-01-05'); 26 | 27 | insert into transfer_account select generate_series(201,300),('2022-04-06'); 28 | 29 | insert into transfer_account select generate_series(301,400),('2022-05-09'); 30 | 31 | insert into transfer_account select generate_series(401,450),('2022-07-02'); 32 | 33 | insert into transfer_account select generate_series(451,500),('2022-08-01'); 34 | 35 | insert into transfer_account select generate_series(501,700),('2022-09-25'); 36 | 37 | insert into transfer_account select generate_series(701,900),('2022-11-02'); 38 | 39 | insert into transfer_account select generate_series(901,1000),('2022-12-30'); 40 | 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/expand_cluster_trans_abnormal11.result: -------------------------------------------------------------------------------- 1 | shard.2 2 | shard.1 3 | drop table if exists student; 4 | 5 | create table student(id int primary key, info text, wt int) with (shard = 41); 6 | 7 | insert into student(id,info,wt) values(1, 'record1', 1); 8 | 9 | insert into student(id,info,wt) values(2, 'record2', 2); 10 | 11 | insert into student(id,info,wt) values(3, 'record3', 3); 12 | 13 | drop table if exists transfer_account; 14 | 15 | create table transfer_account(id int primary key,tradedate varchar(255), money int default 1000)partition by range(id) with (shard = 41); 16 | 17 | create table transfer_account_01 partition of transfer_account for values from (MINVALUE) to(250) with (shard = 41); 18 | 19 | create table transfer_account_02 partition of transfer_account for values from (250) to(500) with (shard = 41); 20 | 21 | create table transfer_account_03 partition of transfer_account for values from (500) to(750) with (shard = 42); 22 | 23 | create table transfer_account_04 partition of transfer_account for values from (750) to(1001) with (shard = 42); 24 | 25 | insert into transfer_account select generate_series(1,200),('2022-01-05'); 26 | 27 | insert into transfer_account select generate_series(201,300),('2022-04-06'); 28 | 29 | insert into transfer_account select generate_series(301,400),('2022-05-09'); 30 | 31 | insert into transfer_account select generate_series(401,450),('2022-07-02'); 32 | 33 | insert into transfer_account select generate_series(451,500),('2022-08-01'); 34 | 35 | insert into transfer_account select generate_series(501,700),('2022-09-25'); 36 | 37 | insert into transfer_account select generate_series(701,900),('2022-11-02'); 38 | 39 | insert into transfer_account select generate_series(901,1000),('2022-12-30'); 40 | 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/expand_cluster_trans_abnormal12.result: -------------------------------------------------------------------------------- 1 | shard.2 2 | shard.1 3 | drop table if exists student; 4 | 5 | create table student(id int primary key, info text, wt int) with (shard = 43); 6 | 7 | insert into student(id,info,wt) values(1, 'record1', 1); 8 | 9 | insert into student(id,info,wt) values(2, 'record2', 2); 10 | 11 | insert into student(id,info,wt) values(3, 'record3', 3); 12 | 13 | drop table if exists transfer_account; 14 | 15 | create table transfer_account(id int primary key,tradedate varchar(255), money int default 1000)partition by range(id) with (shard = 43); 16 | 17 | create table transfer_account_01 partition of transfer_account for values from (MINVALUE) to(250) with (shard = 43); 18 | 19 | create table transfer_account_02 partition of transfer_account for values from (250) to(500) with (shard = 43); 20 | 21 | create table transfer_account_03 partition of transfer_account for values from (500) to(750) with (shard = 44); 22 | 23 | create table transfer_account_04 partition of transfer_account for values from (750) to(1001) with (shard = 44); 24 | 25 | insert into transfer_account select generate_series(1,200),('2022-01-05'); 26 | 27 | insert into transfer_account select generate_series(201,300),('2022-04-06'); 28 | 29 | insert into transfer_account select generate_series(301,400),('2022-05-09'); 30 | 31 | insert into transfer_account select generate_series(401,450),('2022-07-02'); 32 | 33 | insert into transfer_account select generate_series(451,500),('2022-08-01'); 34 | 35 | insert into transfer_account select generate_series(501,700),('2022-09-25'); 36 | 37 | insert into transfer_account select generate_series(701,900),('2022-11-02'); 38 | 39 | insert into transfer_account select generate_series(901,1000),('2022-12-30'); 40 | 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/expand_cluster_trans_abnormal13.result: -------------------------------------------------------------------------------- 1 | shard.2 2 | shard.1 3 | drop table if exists student; 4 | 5 | create table student(id int primary key, info text, wt int) with (shard = 45); 6 | 7 | insert into student(id,info,wt) values(1, 'record1', 1); 8 | 9 | insert into student(id,info,wt) values(2, 'record2', 2); 10 | 11 | insert into student(id,info,wt) values(3, 'record3', 3); 12 | 13 | drop table if exists transfer_account; 14 | 15 | create table transfer_account(id int primary key,tradedate varchar(255), money int default 1000)partition by range(id) with (shard = 45); 16 | 17 | create table transfer_account_01 partition of transfer_account for values from (MINVALUE) to(250) with (shard = 45); 18 | 19 | create table transfer_account_02 partition of transfer_account for values from (250) to(500) with (shard = 45); 20 | 21 | create table transfer_account_03 partition of transfer_account for values from (500) to(750) with (shard = 46); 22 | 23 | create table transfer_account_04 partition of transfer_account for values from (750) to(1001) with (shard = 46); 24 | 25 | insert into transfer_account select generate_series(1,200),('2022-01-05'); 26 | 27 | insert into transfer_account select generate_series(201,300),('2022-04-06'); 28 | 29 | insert into transfer_account select generate_series(301,400),('2022-05-09'); 30 | 31 | insert into transfer_account select generate_series(401,450),('2022-07-02'); 32 | 33 | insert into transfer_account select generate_series(451,500),('2022-08-01'); 34 | 35 | insert into transfer_account select generate_series(501,700),('2022-09-25'); 36 | 37 | insert into transfer_account select generate_series(701,900),('2022-11-02'); 38 | 39 | insert into transfer_account select generate_series(901,1000),('2022-12-30'); 40 | 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/expand_cluster_trans_abnormal14.result: -------------------------------------------------------------------------------- 1 | shard.2 2 | shard.1 3 | drop table if exists student; 4 | 5 | create table student(id int primary key, info text, wt int) with (shard = 51); 6 | 7 | insert into student(id,info,wt) values(1, 'record1', 1); 8 | 9 | insert into student(id,info,wt) values(2, 'record2', 2); 10 | 11 | insert into student(id,info,wt) values(3, 'record3', 3); 12 | 13 | drop table if exists transfer_account; 14 | 15 | create table transfer_account(id int primary key,tradedate varchar(255), money int default 1000)partition by range(id) with (shard = 51); 16 | 17 | create table transfer_account_01 partition of transfer_account for values from (MINVALUE) to(250) with (shard = 51); 18 | 19 | create table transfer_account_02 partition of transfer_account for values from (250) to(500) with (shard = 51); 20 | 21 | create table transfer_account_03 partition of transfer_account for values from (500) to(750) with (shard = 52); 22 | 23 | create table transfer_account_04 partition of transfer_account for values from (750) to(1001) with (shard = 52); 24 | 25 | insert into transfer_account select generate_series(1,200),('2022-01-05'); 26 | 27 | insert into transfer_account select generate_series(201,300),('2022-04-06'); 28 | 29 | insert into transfer_account select generate_series(301,400),('2022-05-09'); 30 | 31 | insert into transfer_account select generate_series(401,450),('2022-07-02'); 32 | 33 | insert into transfer_account select generate_series(451,500),('2022-08-01'); 34 | 35 | insert into transfer_account select generate_series(501,700),('2022-09-25'); 36 | 37 | insert into transfer_account select generate_series(701,900),('2022-11-02'); 38 | 39 | insert into transfer_account select generate_series(901,1000),('2022-12-30'); 40 | 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/expand_cluster_trans_abnormal2.result: -------------------------------------------------------------------------------- 1 | shard.2 2 | shard.1 3 | drop table if exists student; 4 | 5 | create table student(id int primary key, info text, wt int) with (shard = 23); 6 | 7 | insert into student(id,info,wt) values(1, 'record1', 1); 8 | 9 | insert into student(id,info,wt) values(2, 'record2', 2); 10 | 11 | insert into student(id,info,wt) values(3, 'record3', 3); 12 | 13 | drop table if exists transfer_account; 14 | 15 | create table transfer_account(id int primary key,tradedate varchar(255), money int default 1000)partition by range(id) with (shard = 23); 16 | 17 | create table transfer_account_01 partition of transfer_account for values from (MINVALUE) to(250) with (shard = 23); 18 | 19 | create table transfer_account_02 partition of transfer_account for values from (250) to(500) with (shard = 23); 20 | 21 | create table transfer_account_03 partition of transfer_account for values from (500) to(750) with (shard = 24); 22 | 23 | create table transfer_account_04 partition of transfer_account for values from (750) to(1001) with (shard = 24); 24 | 25 | insert into transfer_account select generate_series(1,200),('2022-01-05'); 26 | 27 | insert into transfer_account select generate_series(201,300),('2022-04-06'); 28 | 29 | insert into transfer_account select generate_series(301,400),('2022-05-09'); 30 | 31 | insert into transfer_account select generate_series(401,450),('2022-07-02'); 32 | 33 | insert into transfer_account select generate_series(451,500),('2022-08-01'); 34 | 35 | insert into transfer_account select generate_series(501,700),('2022-09-25'); 36 | 37 | insert into transfer_account select generate_series(701,900),('2022-11-02'); 38 | 39 | insert into transfer_account select generate_series(901,1000),('2022-12-30'); 40 | 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/expand_cluster_trans_abnormal3.result: -------------------------------------------------------------------------------- 1 | shard.2 2 | shard.1 3 | drop table if exists student; 4 | 5 | create table student(id int primary key, info text, wt int) with (shard = 17); 6 | 7 | insert into student(id,info,wt) values(1, 'record1', 1); 8 | 9 | insert into student(id,info,wt) values(2, 'record2', 2); 10 | 11 | insert into student(id,info,wt) values(3, 'record3', 3); 12 | 13 | drop table if exists transfer_account; 14 | 15 | create table transfer_account(id int primary key,tradedate varchar(255), money int default 1000)partition by range(id) with (shard = 17); 16 | 17 | create table transfer_account_01 partition of transfer_account for values from (MINVALUE) to(250) with (shard = 17); 18 | 19 | create table transfer_account_02 partition of transfer_account for values from (250) to(500) with (shard = 17); 20 | 21 | create table transfer_account_03 partition of transfer_account for values from (500) to(750) with (shard = 18); 22 | 23 | create table transfer_account_04 partition of transfer_account for values from (750) to(1001) with (shard = 18); 24 | 25 | insert into transfer_account select generate_series(1,200),('2022-01-05'); 26 | 27 | insert into transfer_account select generate_series(201,300),('2022-04-06'); 28 | 29 | insert into transfer_account select generate_series(301,400),('2022-05-09'); 30 | 31 | insert into transfer_account select generate_series(401,450),('2022-07-02'); 32 | 33 | insert into transfer_account select generate_series(451,500),('2022-08-01'); 34 | 35 | insert into transfer_account select generate_series(501,700),('2022-09-25'); 36 | 37 | insert into transfer_account select generate_series(701,900),('2022-11-02'); 38 | 39 | insert into transfer_account select generate_series(901,1000),('2022-12-30'); 40 | 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/expand_cluster_trans_abnormal4.result: -------------------------------------------------------------------------------- 1 | shard.2 2 | shard.1 3 | drop table if exists student; 4 | 5 | create table student(id int primary key, info text, wt int) with (shard = 27); 6 | 7 | insert into student(id,info,wt) values(1, 'record1', 1); 8 | 9 | insert into student(id,info,wt) values(2, 'record2', 2); 10 | 11 | insert into student(id,info,wt) values(3, 'record3', 3); 12 | 13 | drop table if exists transfer_account; 14 | 15 | create table transfer_account(id int primary key,tradedate varchar(255), money int default 1000)partition by range(id) with (shard = 27); 16 | 17 | create table transfer_account_01 partition of transfer_account for values from (MINVALUE) to(250) with (shard = 27); 18 | 19 | create table transfer_account_02 partition of transfer_account for values from (250) to(500) with (shard = 27); 20 | 21 | create table transfer_account_03 partition of transfer_account for values from (500) to(750) with (shard = 28); 22 | 23 | create table transfer_account_04 partition of transfer_account for values from (750) to(1001) with (shard = 28); 24 | 25 | insert into transfer_account select generate_series(1,200),('2022-01-05'); 26 | 27 | insert into transfer_account select generate_series(201,300),('2022-04-06'); 28 | 29 | insert into transfer_account select generate_series(301,400),('2022-05-09'); 30 | 31 | insert into transfer_account select generate_series(401,450),('2022-07-02'); 32 | 33 | insert into transfer_account select generate_series(451,500),('2022-08-01'); 34 | 35 | insert into transfer_account select generate_series(501,700),('2022-09-25'); 36 | 37 | insert into transfer_account select generate_series(701,900),('2022-11-02'); 38 | 39 | insert into transfer_account select generate_series(901,1000),('2022-12-30'); 40 | 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/expand_cluster_trans_abnormal5.result: -------------------------------------------------------------------------------- 1 | shard.2 2 | shard.1 3 | drop table if exists student; 4 | 5 | create table student(id int primary key, info text, wt int) with (shard = 29); 6 | 7 | insert into student(id,info,wt) values(1, 'record1', 1); 8 | 9 | insert into student(id,info,wt) values(2, 'record2', 2); 10 | 11 | insert into student(id,info,wt) values(3, 'record3', 3); 12 | 13 | drop table if exists transfer_account; 14 | 15 | create table transfer_account(id int primary key,tradedate varchar(255), money int default 1000)partition by range(id) with (shard = 29); 16 | 17 | create table transfer_account_01 partition of transfer_account for values from (MINVALUE) to(250) with (shard = 29); 18 | 19 | create table transfer_account_02 partition of transfer_account for values from (250) to(500) with (shard = 29); 20 | 21 | create table transfer_account_03 partition of transfer_account for values from (500) to(750) with (shard = 30); 22 | 23 | create table transfer_account_04 partition of transfer_account for values from (750) to(1001) with (shard = 30); 24 | 25 | insert into transfer_account select generate_series(1,200),('2022-01-05'); 26 | 27 | insert into transfer_account select generate_series(201,300),('2022-04-06'); 28 | 29 | insert into transfer_account select generate_series(301,400),('2022-05-09'); 30 | 31 | insert into transfer_account select generate_series(401,450),('2022-07-02'); 32 | 33 | insert into transfer_account select generate_series(451,500),('2022-08-01'); 34 | 35 | insert into transfer_account select generate_series(501,700),('2022-09-25'); 36 | 37 | insert into transfer_account select generate_series(701,900),('2022-11-02'); 38 | 39 | insert into transfer_account select generate_series(901,1000),('2022-12-30'); 40 | 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/expand_cluster_trans_abnormal6.result: -------------------------------------------------------------------------------- 1 | shard.2 2 | shard.1 3 | drop table if exists student; 4 | 5 | create table student(id int primary key, info text, wt int) with (shard = 31); 6 | 7 | insert into student(id,info,wt) values(1, 'record1', 1); 8 | 9 | insert into student(id,info,wt) values(2, 'record2', 2); 10 | 11 | insert into student(id,info,wt) values(3, 'record3', 3); 12 | 13 | drop table if exists transfer_account; 14 | 15 | create table transfer_account(id int primary key,tradedate varchar(255), money int default 1000)partition by range(id) with (shard = 31); 16 | 17 | create table transfer_account_01 partition of transfer_account for values from (MINVALUE) to(250) with (shard = 31); 18 | 19 | create table transfer_account_02 partition of transfer_account for values from (250) to(500) with (shard = 31); 20 | 21 | create table transfer_account_03 partition of transfer_account for values from (500) to(750) with (shard = 32); 22 | 23 | create table transfer_account_04 partition of transfer_account for values from (750) to(1001) with (shard = 32); 24 | 25 | insert into transfer_account select generate_series(1,200),('2022-01-05'); 26 | 27 | insert into transfer_account select generate_series(201,300),('2022-04-06'); 28 | 29 | insert into transfer_account select generate_series(301,400),('2022-05-09'); 30 | 31 | insert into transfer_account select generate_series(401,450),('2022-07-02'); 32 | 33 | insert into transfer_account select generate_series(451,500),('2022-08-01'); 34 | 35 | insert into transfer_account select generate_series(501,700),('2022-09-25'); 36 | 37 | insert into transfer_account select generate_series(701,900),('2022-11-02'); 38 | 39 | insert into transfer_account select generate_series(901,1000),('2022-12-30'); 40 | 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/expand_cluster_trans_abnormal8.result: -------------------------------------------------------------------------------- 1 | shard.2 2 | shard.1 3 | drop table if exists student; 4 | 5 | create table student(id int primary key, info text, wt int) with (shard = 35); 6 | 7 | insert into student(id,info,wt) values(1, 'record1', 1); 8 | 9 | insert into student(id,info,wt) values(2, 'record2', 2); 10 | 11 | insert into student(id,info,wt) values(3, 'record3', 3); 12 | 13 | drop table if exists transfer_account; 14 | 15 | create table transfer_account(id int primary key,tradedate varchar(255), money int default 1000)partition by range(id) with (shard = 35); 16 | 17 | create table transfer_account_01 partition of transfer_account for values from (MINVALUE) to(250) with (shard = 35); 18 | 19 | create table transfer_account_02 partition of transfer_account for values from (250) to(500) with (shard = 35); 20 | 21 | create table transfer_account_03 partition of transfer_account for values from (500) to(750) with (shard = 36); 22 | 23 | create table transfer_account_04 partition of transfer_account for values from (750) to(1001) with (shard = 36); 24 | 25 | insert into transfer_account select generate_series(1,200),('2022-01-05'); 26 | 27 | insert into transfer_account select generate_series(201,300),('2022-04-06'); 28 | 29 | insert into transfer_account select generate_series(301,400),('2022-05-09'); 30 | 31 | insert into transfer_account select generate_series(401,450),('2022-07-02'); 32 | 33 | insert into transfer_account select generate_series(451,500),('2022-08-01'); 34 | 35 | insert into transfer_account select generate_series(501,700),('2022-09-25'); 36 | 37 | insert into transfer_account select generate_series(701,900),('2022-11-02'); 38 | 39 | insert into transfer_account select generate_series(901,1000),('2022-12-30'); 40 | 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/expand_cluster_trans_abnormal9.result: -------------------------------------------------------------------------------- 1 | shard.2 2 | shard.1 3 | drop table if exists student; 4 | 5 | create table student(id int primary key, info text, wt int) with (shard = 37); 6 | 7 | insert into student(id,info,wt) values(1, 'record1', 1); 8 | 9 | insert into student(id,info,wt) values(2, 'record2', 2); 10 | 11 | insert into student(id,info,wt) values(3, 'record3', 3); 12 | 13 | drop table if exists transfer_account; 14 | 15 | create table transfer_account(id int primary key,tradedate varchar(255), money int default 1000)partition by range(id) with (shard = 37); 16 | 17 | create table transfer_account_01 partition of transfer_account for values from (MINVALUE) to(250) with (shard = 37); 18 | 19 | create table transfer_account_02 partition of transfer_account for values from (250) to(500) with (shard = 37); 20 | 21 | create table transfer_account_03 partition of transfer_account for values from (500) to(750) with (shard = 38); 22 | 23 | create table transfer_account_04 partition of transfer_account for values from (750) to(1001) with (shard = 38); 24 | 25 | insert into transfer_account select generate_series(1,200),('2022-01-05'); 26 | 27 | insert into transfer_account select generate_series(201,300),('2022-04-06'); 28 | 29 | insert into transfer_account select generate_series(301,400),('2022-05-09'); 30 | 31 | insert into transfer_account select generate_series(401,450),('2022-07-02'); 32 | 33 | insert into transfer_account select generate_series(451,500),('2022-08-01'); 34 | 35 | insert into transfer_account select generate_series(501,700),('2022-09-25'); 36 | 37 | insert into transfer_account select generate_series(701,900),('2022-11-02'); 38 | 39 | insert into transfer_account select generate_series(901,1000),('2022-12-30'); 40 | 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/expand_cluster_trans_diffdb.result: -------------------------------------------------------------------------------- 1 | shard.2 2 | shard.1 3 | drop table if exists transfer_account; 4 | 5 | create table transfer_account(id int primary key,tradedate varchar(255), money int default 1000)partition by range(id) with (shard = 23); 6 | 7 | create table transfer_account_01 partition of transfer_account for values from (MINVALUE) to(250) with (shard = 23); 8 | 9 | create table transfer_account_02 partition of transfer_account for values from (250) to(500) with (shard = 23); 10 | 11 | create table transfer_account_03 partition of transfer_account for values from (500) to(750) with (shard = 24); 12 | 13 | create table transfer_account_04 partition of transfer_account for values from (750) to(1001) with (shard = 24); 14 | 15 | insert into transfer_account select generate_series(1,200),('2022-01-05'); 16 | 17 | insert into transfer_account select generate_series(201,300),('2022-04-06'); 18 | 19 | insert into transfer_account select generate_series(301,400),('2022-05-09'); 20 | 21 | insert into transfer_account select generate_series(401,450),('2022-07-02'); 22 | 23 | insert into transfer_account select generate_series(451,500),('2022-08-01'); 24 | 25 | insert into transfer_account select generate_series(501,700),('2022-09-25'); 26 | 27 | insert into transfer_account select generate_series(701,900),('2022-11-02'); 28 | 29 | insert into transfer_account select generate_series(901,1000),('2022-12-30'); 30 | 31 | create database testdb; 32 | 33 | drop table if exists student; 34 | 35 | create table student(id int primary key, info text, wt int) with (shard = 23); 36 | 37 | insert into student(id,info,wt) values(1, 'record1', 1); 38 | 39 | insert into student(id,info,wt) values(2, 'record2', 2); 40 | 41 | insert into student(id,info,wt) values(3, 'record3', 3); 42 | 43 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/expand_cluster_trans_samedb.result: -------------------------------------------------------------------------------- 1 | shard.2 2 | shard.1 3 | drop table if exists student; 4 | 5 | create table student(id int primary key, info text, wt int) with (shard = 63); 6 | 7 | insert into student(id,info,wt) values(1, 'record1', 1); 8 | 9 | insert into student(id,info,wt) values(2, 'record2', 2); 10 | 11 | insert into student(id,info,wt) values(3, 'record3', 3); 12 | 13 | drop table if exists transfer_account; 14 | 15 | create table transfer_account(id int primary key,tradedate varchar(255), money int default 1000)partition by range(id) with (shard = 63); 16 | 17 | create table transfer_account_01 partition of transfer_account for values from (MINVALUE) to(250) with (shard = 63); 18 | 19 | create table transfer_account_02 partition of transfer_account for values from (250) to(500) with (shard = 63); 20 | 21 | create table transfer_account_03 partition of transfer_account for values from (500) to(750) with (shard = 64); 22 | 23 | create table transfer_account_04 partition of transfer_account for values from (750) to(1001) with (shard = 64); 24 | 25 | insert into transfer_account select generate_series(1,200),('2022-01-05'); 26 | 27 | insert into transfer_account select generate_series(201,300),('2022-04-06'); 28 | 29 | insert into transfer_account select generate_series(301,400),('2022-05-09'); 30 | 31 | insert into transfer_account select generate_series(401,450),('2022-07-02'); 32 | 33 | insert into transfer_account select generate_series(451,500),('2022-08-01'); 34 | 35 | insert into transfer_account select generate_series(501,700),('2022-09-25'); 36 | 37 | insert into transfer_account select generate_series(701,900),('2022-11-02'); 38 | 39 | insert into transfer_account select generate_series(901,1000),('2022-12-30'); 40 | 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/get_status_rbr.result: -------------------------------------------------------------------------------- 1 | "test create cluster" 2 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/install_delete_rbrcluster.result: -------------------------------------------------------------------------------- 1 | SET client_min_messages TO 'warning'; 2 | 3 | drop table if exists t1111; 4 | 5 | RESET client_min_messages; 6 | 7 | create table t1111(id int primary key, info text, wt int); 8 | 9 | insert into t1111(id,info,wt) values(1, 'record1', 1); 10 | 11 | insert into t1111(id,info,wt) values(2, 'record2', 2); 12 | 13 | update t1111 set wt = 12 where id = 1; 14 | 15 | select * from t1111; 16 | 1 record1 122 record2 2 17 | delete from t1111 where id = 1; 18 | 19 | select * from t1111; 20 | 2 record2 2 21 | prepare q1(int) as select*from t1111 where id=$1; 22 | 23 | begin; 24 | 25 | execute q1(1); 26 | 27 | execute q1(2); 28 | 2 record2 2 29 | prepare q2(text,int, int) as update t1111 set info=$1 , wt=$2 where id=$3; 30 | 31 | execute q2('Rec1',2,1); 32 | 33 | commit; 34 | 35 | execute q2('Rec2',3,2); 36 | 37 | drop table t1111; 38 | 39 | shard.1 40 | "==== delete cluster ok ====" 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/install_rbrcluster.result: -------------------------------------------------------------------------------- 1 | shard.1 2 | SET client_min_messages TO 'warning'; 3 | 4 | drop table if exists t1111; 5 | 6 | RESET client_min_messages; 7 | 8 | create table t1111(id int primary key, info text, wt int); 9 | 10 | insert into t1111(id,info,wt) values(1, 'record1', 1); 11 | 12 | insert into t1111(id,info,wt) values(2, 'record2', 2); 13 | 14 | update t1111 set wt = 12 where id = 1; 15 | 16 | select * from t1111; 17 | 1 record1 122 record2 2 18 | delete from t1111 where id = 1; 19 | 20 | select * from t1111; 21 | 2 record2 2 22 | prepare q1(int) as select*from t1111 where id=$1; 23 | 24 | begin; 25 | 26 | execute q1(1); 27 | 28 | execute q1(2); 29 | 2 record2 2 30 | prepare q2(text,int, int) as update t1111 set info=$1 , wt=$2 where id=$3; 31 | 32 | execute q2('Rec1',2,1); 33 | 34 | commit; 35 | 36 | execute q2('Rec2',3,2); 37 | 38 | drop table t1111; 39 | 40 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/r/remote_control_stopmysql.result: -------------------------------------------------------------------------------- 1 | 2022-08-08 16:51:01 2 | 2 3 | ===STDOUT=== 4 | : 0 5 | ===STDOUT=== 6 | : 0 7 | select step from rbr_consfailover where shard_id = 1 and step ="Fail" order by id desc limit 15; 8 | step 9 | show slave hosts; 10 | Server_id Host Port Master_id Slave_UUID 11 | 8710155 192.168.0.132 59403 9234443 6691bb43-16ce-11ed-91bc-7c10c93f0c7e 12 | 8513547 192.168.0.129 59403 9234443 64acfd8a-16ce-11ed-93f8-f02f7422ae6a 13 | Warnings: 14 | Warning 1287 'SHOW SLAVE HOSTS' is deprecated and will be removed in a future release. Please use SHOW REPLICAS instead 15 | show slave hosts; 16 | Server_id Host Port Master_id Slave_UUID 17 | Warnings: 18 | Warning 1287 'SHOW SLAVE HOSTS' is deprecated and will be removed in a future release. Please use SHOW REPLICAS instead 19 | show slave hosts; 20 | Server_id Host Port Master_id Slave_UUID 21 | Warnings: 22 | Warning 1287 'SHOW SLAVE HOSTS' is deprecated and will be removed in a future release. Please use SHOW REPLICAS instead 23 | select step from rbr_consfailover where shard_id = 2 and step ="Fail" order by id desc limit 15; 24 | step 25 | show slave hosts; 26 | Server_id Host Port Master_id Slave_UUID 27 | Warnings: 28 | Warning 1287 'SHOW SLAVE HOSTS' is deprecated and will be removed in a future release. Please use SHOW REPLICAS instead 29 | show slave hosts; 30 | Server_id Host Port Master_id Slave_UUID 31 | Warnings: 32 | Warning 1287 'SHOW SLAVE HOSTS' is deprecated and will be removed in a future release. Please use SHOW REPLICAS instead 33 | show slave hosts; 34 | Server_id Host Port Master_id Slave_UUID 35 | 8710153 192.168.0.132 59401 8513545 6691b416-16ce-11ed-928e-7c10c93f0c7e 36 | 9234441 192.168.0.140 59401 8513545 64b2c626-16ce-11ed-929e-04421aec4c44 37 | Warnings: 38 | Warning 1287 'SHOW SLAVE HOSTS' is deprecated and will be removed in a future release. Please use SHOW REPLICAS instead 39 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/add_comps_existcluster.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.add computer node 3 | #2.check all computer node write and read 4 | 5 | --connection default 6 | --let $cluster_id = `select id from db_clusters limit 0,1;` 7 | 8 | --http_connect(cluster_mgr_http1, cluster_mgr) 9 | #add comps 10 | --http 11 | request_type: POST 12 | header:Content-Type:application/json 13 | body:{ 14 | "version":"1.0", 15 | "job_id":"", 16 | "job_type":"add_comps", 17 | "user_name":"kunlun_test", 18 | "timestamp":"202205131532", 19 | "paras":{ 20 | "cluster_id":"${cluster_id}", 21 | "comps":"2", 22 | "computer_iplists":[ 23 | "${node_mgr.1}" 24 | ] 25 | } 26 | } 27 | EOF 28 | 29 | --let $job_id = `http_reap(job_id)` 30 | --sleep 20 31 | --source kunlun-test/include/wait_http_request_finish.inc 32 | 33 | --kl_cluster_id(1) 34 | --pg_connect(testrbr2,computer.2, abc, abc) 35 | --source kunlun-test/include/check_cn_write_read.inc 36 | 37 | --pg_connect(testrbr3,computer.3, abc, abc) 38 | --source kunlun-test/include/check_cn_write_read.inc 39 | 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/add_nodes_existcluster.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.add nodes 3 | #2.check msyld master and slave 4 | 5 | --connection default 6 | --let $cluster_id = `select id from db_clusters limit 0,1;` 7 | --echo $cluster_id 8 | --let $shard_id = `select shard_id from shard_nodes limit 0,1;` 9 | 10 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 11 | #add nodes 12 | --http 13 | request_type: POST 14 | header:Content-Type:application/json 15 | body:{ 16 | "version":"1.0", 17 | "job_id":"", 18 | "job_type":"add_nodes", 19 | "user_name":"kunlun_test", 20 | "timestamp":"202205131532", 21 | "paras":{ 22 | "cluster_id":"${cluster_id}", 23 | "shard_id":"${shard_id}", 24 | "nodes":"2", 25 | "storage_iplists":[ 26 | "${node_mgr.1}" 27 | ] 28 | } 29 | } 30 | EOF 31 | 32 | --let $job_id = `http_reap(job_id)` 33 | --sleep 20 34 | --source kunlun-test/include/wait_http_request_finish.inc 35 | 36 | --kl_cluster_id(1) 37 | --source kunlun-test/include/check_shard_state.inc 38 | 39 | 40 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/add_shards_existcluster.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.add shard 3 | #2.check mysyld master and slave 4 | 5 | 6 | 7 | --connection default 8 | --let $cluster_id = `select id from db_clusters limit 0,1;` 9 | --echo $cluster_id 10 | 11 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 12 | #add shards 13 | --http 14 | request_type: POST 15 | header:Content-Type:application/json 16 | body:{ 17 | "version":"1.0", 18 | "job_id":"", 19 | "job_type":"add_shards", 20 | "user_name":"kunlun_test", 21 | "timestamp":"202205131532", 22 | "paras":{ 23 | "cluster_id":"${cluster_id}", 24 | "shards":"1", 25 | "nodes":"3", 26 | "storage_iplists":[ 27 | "${node_mgr.1}" 28 | ] 29 | } 30 | } 31 | EOF 32 | 33 | --let $job_id = `http_reap(job_id)` 34 | --sleep 20 35 | --source kunlun-test/include/wait_http_request_finish.inc 36 | 37 | --kl_cluster_id(1) 38 | --source kunlun-test/include/check_shard_state.inc 39 | 40 | 41 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/del_comps_existcluster.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.delete_comp 3 | #2.check the remained comps write and read 4 | 5 | --connection default 6 | --let $cluster_id = `select id from db_clusters limit 0,1;` 7 | --let $comp_id = `select id from comp_nodes limit 0,1;` 8 | --echo $comp_id 9 | --let $cn_num=`SELECT COUNT(*) FROM comp_nodes;` 10 | --echo $cn_num 11 | 12 | #delete comp 13 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 14 | --http 15 | request_type: POST 16 | header:Content-Type:application/json 17 | body:{ 18 | "ver":"1.0", 19 | "job_id":"", 20 | "job_type":"delete_comp", 21 | "user_name":"kunlun_test", 22 | "timestamp":"202205131532", 23 | "paras":{ 24 | "cluster_id":"${cluster_id}", 25 | "comp_id":"${comp_id}" 26 | } 27 | } 28 | EOF 29 | 30 | --let $job_id = `http_reap(job_id)` 31 | --sleep 20 32 | --source kunlun-test/include/wait_http_request_finish.inc 33 | 34 | 35 | --kl_cluster_id(1) 36 | --pg_connect(testrbr6,computer.1, abc, abc) 37 | --source kunlun-test/include/check_cn_write_read.inc 38 | 39 | #--pg_connect(testrbr5,computer.2, abc, abc) 40 | #--source kunlun-test/include/check_cn_write_read.inc 41 | 42 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/del_debugpoint1.test: -------------------------------------------------------------------------------- 1 | #--set_cluster_id(3) 2 | #--kl_cluster_id(1) 3 | --connection default 4 | --let $cluster_id = `select id from db_clusters limit 0,1;` 5 | 6 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 7 | --http 8 | request_type: POST 9 | header:Content-Type:application/json 10 | body:{ 11 | "version":"1.0", 12 | "job_id":"", 13 | "job_type":"cluster_debug", 14 | "timestamp":"1435749309", 15 | "user_name":"kunlun_test", 16 | "paras":{ 17 | "keyword":"Mock_Expand_Dump_Table_Failed", 18 | "op_type":"del" 19 | } 20 | } 21 | EOF 22 | 23 | --let $job_id = `http_reap(job_id)` 24 | --sleep 20 25 | --echo "del cluster" 26 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/del_debugpoint2.test: -------------------------------------------------------------------------------- 1 | #--set_cluster_id(3) 2 | #--kl_cluster_id(1) 3 | --connection default 4 | --let $cluster_id = `select id from db_clusters limit 0,1;` 5 | 6 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 7 | --http 8 | request_type: POST 9 | header:Content-Type:application/json 10 | body:{ 11 | "version":"1.0", 12 | "job_id":"", 13 | "job_type":"cluster_debug", 14 | "timestamp":"1435749309", 15 | "user_name":"kunlun_test", 16 | "paras":{ 17 | "keyword":"Mock_Expand_Compress_file_Failed", 18 | "op_type":"del" 19 | } 20 | } 21 | EOF 22 | 23 | --let $job_id = `http_reap(job_id)` 24 | --sleep 20 25 | --echo "del cluster" 26 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/del_debugpoint3.test: -------------------------------------------------------------------------------- 1 | #--set_cluster_id(3) 2 | #--kl_cluster_id(1) 3 | --connection default 4 | --let $cluster_id = `select id from db_clusters limit 0,1;` 5 | 6 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 7 | --http 8 | request_type: POST 9 | header:Content-Type:application/json 10 | body:{ 11 | "version":"1.0", 12 | "job_id":"", 13 | "job_type":"cluster_debug", 14 | "timestamp":"1435749309", 15 | "user_name":"kunlun_test", 16 | "paras":{ 17 | "keyword":"Mock_Expand_Transfer_dumped_file_Failed", 18 | "op_type":"del" 19 | } 20 | } 21 | EOF 22 | 23 | --let $job_id = `http_reap(job_id)` 24 | --sleep 20 25 | --echo "del cluster" 26 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/del_debugpoint4.test: -------------------------------------------------------------------------------- 1 | #--set_cluster_id(3) 2 | #--kl_cluster_id(1) 3 | --connection default 4 | --let $cluster_id = `select id from db_clusters limit 0,1;` 5 | 6 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 7 | --http 8 | request_type: POST 9 | header:Content-Type:application/json 10 | body:{ 11 | "version":"1.0", 12 | "job_id":"", 13 | "job_type":"cluster_debug", 14 | "timestamp":"1435749309", 15 | "user_name":"kunlun_test", 16 | "paras":{ 17 | "keyword":"Mock_Expand_Load_Table_Failed", 18 | "op_type":"del" 19 | } 20 | } 21 | EOF 22 | 23 | --let $job_id = `http_reap(job_id)` 24 | --sleep 20 25 | --echo "del cluster" 26 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/del_debugpoint5.test: -------------------------------------------------------------------------------- 1 | #--set_cluster_id(3) 2 | #--kl_cluster_id(1) 3 | --connection default 4 | --let $cluster_id = `select id from db_clusters limit 0,1;` 5 | 6 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 7 | --http 8 | request_type: POST 9 | header:Content-Type:application/json 10 | body:{ 11 | "version":"1.0", 12 | "job_id":"", 13 | "job_type":"cluster_debug", 14 | "timestamp":"1435749309", 15 | "user_name":"kunlun_test", 16 | "paras":{ 17 | "keyword":"Mock_Expand_Catchup_Table_Failed", 18 | "op_type":"del" 19 | } 20 | } 21 | EOF 22 | 23 | --let $job_id = `http_reap(job_id)` 24 | --sleep 20 25 | --echo "del cluster" 26 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/del_debugpoint6.test: -------------------------------------------------------------------------------- 1 | #--set_cluster_id(3) 2 | #--kl_cluster_id(1) 3 | --connection default 4 | --let $cluster_id = `select id from db_clusters limit 0,1;` 5 | 6 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 7 | --http 8 | request_type: POST 9 | header:Content-Type:application/json 10 | body:{ 11 | "version":"1.0", 12 | "job_id":"", 13 | "job_type":"cluster_debug", 14 | "timestamp":"1435749309", 15 | "user_name":"kunlun_test", 16 | "paras":{ 17 | "keyword":"Mock_Expand_Rename_Table_Failed", 18 | "op_type":"del" 19 | } 20 | } 21 | EOF 22 | 23 | --let $job_id = `http_reap(job_id)` 24 | --sleep 20 25 | --echo "del cluster" 26 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/del_nodes_existcluster.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.delete_comp 3 | #2.check the remained nodes master and slave 4 | 5 | --connection default 6 | --let $cluster_id = `select id from db_clusters limit 0,1;` 7 | --let $shard_id = `select shard_id from shard_nodes limit 0,1;` 8 | --echo $shard_id 9 | --let $port_id = `select port from shard_nodes limit 1,1;` 10 | --echo $port_id 11 | --let $sn_num=`select count(distinct shard_id) from shard_nodes;` 12 | --echo $sn_num 13 | 14 | #delete shard 15 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 16 | --http 17 | request_type: POST 18 | header:Content-Type:application/json 19 | body:{ 20 | "ver":"1.0", 21 | "job_id":"", 22 | "job_type":"delete_node", 23 | "user_name":"kunlun_test", 24 | "timestamp":"202205131532", 25 | "paras":{ 26 | "cluster_id":"${cluster_id}", 27 | "shard_id":"${shard_id}", 28 | "hostaddr":"${node_mgr.1}", 29 | "port": "${port_id}" 30 | } 31 | } 32 | EOF 33 | 34 | --let $job_id = `http_reap(job_id)` 35 | --sleep 20 36 | --source kunlun-test/include/wait_http_request_finish.inc 37 | 38 | 39 | --kl_cluster_id(1) 40 | --source kunlun-test/include/check_shard_state.inc 41 | #--connection default 42 | #--let $sn_num=`select count(distinct shard_id) from shard_nodes;` 43 | #--echo $sn_num 44 | 45 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/del_rbrcluster.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.delete_cluster 3 | #2.check delete cluster 4 | 5 | --connection default 6 | --let $cluster_id = `select id from db_clusters limit 0,1;` 7 | 8 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 9 | --http 10 | request_type: POST 11 | header:Content-Type:application/json 12 | body:{ 13 | "version":"1.0", 14 | "job_id":"", 15 | "job_type":"delete_cluster", 16 | "timestamp" : "1435749309", 17 | "user_name":"zhangsan", 18 | "paras":{ "cluster_id":"${cluster_id}"} 19 | } 20 | EOF 21 | 22 | --let $job_id = `http_reap(job_id)` 23 | --sleep 20 24 | --source kunlun-test/include/wait_http_request_finish.inc 25 | --echo "test elete cluste ok" 26 | 27 | --source kunlun-test/include/check_delete_cluster_state.inc 28 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/del_shard_existcluster.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.delete_comp 3 | #2.check the remained shards master and slave 4 | 5 | 6 | --connection default 7 | --let $cluster_id = `select id from db_clusters limit 0,1;` 8 | --let $shard_id = `select shard_id from shard_nodes limit 1,1;` 9 | --echo $shard_id 10 | --let $sn_num=`select count(distinct shard_id) from shard_nodes;` 11 | --echo $sn_num 12 | 13 | #delete shard 14 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 15 | --http 16 | request_type: POST 17 | header:Content-Type:application/json 18 | body:{ 19 | "ver":"1.0", 20 | "job_id":"", 21 | "job_type":"delete_shard", 22 | "user_name":"kunlun_test", 23 | "timestamp":"202205131532", 24 | "paras":{ 25 | "cluster_id":"${cluster_id}", 26 | "shard_id":"${shard_id}" 27 | } 28 | } 29 | EOF 30 | 31 | --let $job_id = `http_reap(job_id)` 32 | --sleep 20 33 | --source kunlun-test/include/wait_http_request_finish.inc 34 | 35 | 36 | --kl_cluster_id(1) 37 | --source kunlun-test/include/check_shard_state.inc 38 | #--connection default 39 | #--let $sn_num=`select count(distinct shard_id) from shard_nodes;` 40 | #--echo $sn_num 41 | 42 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/get_status_rbr.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.query current working status 3 | 4 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 5 | 6 | --http 7 | request_type: POST 8 | header:Content-Type:application/json 9 | body:{ 10 | "version":"1.0", 11 | "job_id":"108", 12 | "job_type":"get_status", 13 | "timestamp":"202205131532", 14 | "paras":{ 15 | } 16 | } 17 | EOF 18 | 19 | 20 | #--sleep 20 21 | #--source kunlun-test/include/wait_http_request_finish.inc 22 | --echo "test create cluster" 23 | 24 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/install_rbrcluster.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.create cluster 3 | #2.cycle query status 4 | #3.check computer node write and read 5 | #4.check shard master and slave 6 | 7 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 8 | 9 | --http 10 | request_type: POST 11 | header:Content-Type:application/json 12 | body:{ 13 | "version":"1.0", 14 | "job_id":"", 15 | "job_type":"create_cluster", 16 | "user_name":"kunlun_test", 17 | "timestamp":"202205131532", 18 | "paras":{ 19 | "nick_name":"rbrcluster001", 20 | "ha_mode":"rbr", 21 | "shards":"1", 22 | "nodes":"3", 23 | "comps":"1", 24 | "max_storage_size":"20", 25 | "max_connections":"6", 26 | "cpu_cores":"8", 27 | "innodb_size":"1", 28 | "dbcfg":"1", 29 | "fullsync_level": "1", 30 | "storage_iplists": [ 31 | "${node_mgr.1}" 32 | ], 33 | "computer_iplists": [ 34 | "${node_mgr.1}" 35 | ] 36 | } 37 | } 38 | EOF 39 | 40 | --let $job_id = `http_reap(job_id)` 41 | --sleep 20 42 | --source kunlun-test/include/wait_http_request_finish.inc 43 | 44 | #check shard master and slave 45 | --kl_cluster_id(1) 46 | --source kunlun-test/include/check_shard_state.inc 47 | 48 | #check cn 49 | --pg_connect(testrbr1,computer.1, abc, abc) 50 | --source kunlun-test/include/check_cn_write_read.inc 51 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/logical_backup.test: -------------------------------------------------------------------------------- 1 | #--set_cluster_id(3) 2 | #--kl_cluster_id(1) 3 | --connection default 4 | --let $cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 5 | 6 | --http_connect(cluster_mgr_http1, cluster_mgr) 7 | --http 8 | request_type: POST 9 | header:Content-Type:application/json 10 | body:{ 11 | "version":"1.0", 12 | "job_id":"", 13 | "job_type":"logical_backup", 14 | "timestamp" : "1435749309", 15 | "user_name":"kunlun_test", 16 | "paras":{ 17 | "cluster_id":"${cluster_id}", 18 | "backup":[ 19 | { 20 | "db_table":"postgres_$$_public.transfer_account", 21 | "backup_time":"11:20:00-12:00:00" 22 | } 23 | ] 24 | } 25 | } 26 | EOF 27 | 28 | --let $job_id = `http_reap(job_id)` 29 | --echo $job_id 30 | #--sleep 20 31 | #--source kunlun-test/include/wait_http_request_finish.inc 32 | 33 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/manual_switch.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.create cluster 3 | #2.cycle query status 4 | #3.check computer node write and read 5 | #4.check shard active and standby (to be add) 6 | #5.add nodes(copies) 7 | #6.check all computer node write and read 8 | #7.check shard active and standby (to be add) 9 | #8.delete nodes(copies) 10 | #9.check all computer node write and read 11 | #10.check shard active and standby (to be add) 12 | #11.delete cluster 13 | 14 | --connection default 15 | --let $hostaddr = `select hostaddr from shard_nodes limit 0,1;` 16 | --let $cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 17 | --let $shard_id = `select shard_id from shard_nodes where status = "active" limit 0,1;` 18 | --let $port_id = `select port from shard_nodes where status = "active" limit 1,1;` 19 | 20 | --http_connect(cluster_mgr_http1, cluster_mgr) 21 | --http 22 | request_type: POST 23 | header:Content-Type:application/json 24 | body:{ 25 | "version":"1.0", 26 | "job_id":"", 27 | "job_type":"manual_switch", 28 | "timestamp":"1435749309", 29 | "user_name":"zhangsan", 30 | "paras":{ 31 | "cluster_id":"${cluster_id}", 32 | "shard_id":"${shard_id}", 33 | "master_hostaddr":"192.168.0.140_59403", 34 | "assign_hostaddr":"192.168.0.129_59403" 35 | } 36 | } 37 | EOF 38 | 39 | --let $job_id = `http_reap(job_id)` 40 | --sleep 20 41 | --source kunlun-test/include/wait_http_request_finish.inc 42 | --echo "done" 43 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/masterslave_stopslave.test: -------------------------------------------------------------------------------- 1 | --let $current_time=`select now()` 2 | --set_cluster_id(6) 3 | --kl_cluster_id(1) 4 | 5 | #stop the first shard slave 6 | --connect(rbrshard18,shard_slave.1-1,clustmgr,clustmgr_pwd) 7 | stop slave; 8 | --connect(rbrshard19,shard_slave.1-2,clustmgr,clustmgr_pwd) 9 | stop slave; 10 | 11 | --sleep 80 12 | --connection default 13 | --let $shardid = `select distinct shard_id from shard_nodes limit 0,1;` 14 | eval select step from rbr_consfailover where shard_id = $shardid and step ="Fail" order by id desc limit 15; 15 | --connect(rbrshard111,storage.1.1,clustmgr,clustmgr_pwd) 16 | show slave hosts; 17 | --connect(rbrshard112,storage.1.2,clustmgr,clustmgr_pwd) 18 | show slave hosts; 19 | --connect(rbrshard113,storage.1.3,clustmgr,clustmgr_pwd) 20 | show slave hosts; 21 | 22 | #stop the second shard slave 23 | --connect(rbrshard28,shard_slave.2-1,clustmgr,clustmgr_pwd) 24 | stop slave; 25 | --connect(rbrshard29,shard_slave.2-2,clustmgr,clustmgr_pwd) 26 | stop slave; 27 | 28 | --sleep 80 29 | --connection default 30 | --let $shardid = `select distinct shard_id from shard_nodes limit 1,1;` 31 | eval select step from rbr_consfailover where shard_id = $shardid and step ="Fail" order by id desc limit 15; 32 | --connect(rbrshard211,storage.2.1,clustmgr,clustmgr_pwd) 33 | show slave hosts; 34 | --connect(rbrshard212,storage.2.2,clustmgr,clustmgr_pwd) 35 | show slave hosts; 36 | --connect(rbrshard213,storage.2.3,clustmgr,clustmgr_pwd) 37 | show slave hosts; 38 | 39 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/remote_control_killcluster.test: -------------------------------------------------------------------------------- 1 | --set_cluster_id(8) 2 | --kl_cluster_id(1) 3 | #--let $port = `select port from cluster_mgr_nodes where member_state ="source" limit 0,1;` 4 | --connection default 5 | --exec ./kunlun-test/util/test_client cluster_mgr "ps -aux |grep cluster_mgr|grep -v grep |awk '{print $2}' | xargs kill -9" 6 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/remote_control_killmysql.test: -------------------------------------------------------------------------------- 1 | --connection default 2 | --let $node_mgr_1 = query_get_value(select hostaddr from server_nodes where machine_type = "computer",hostaddr,1) 3 | --let $sourceport_num =`select count(port) from shard_nodes where member_state = "source";` 4 | --let $i = 0 5 | --let $j = 20 6 | --echo $sourceport_num 7 | --set_cluster_id(19) 8 | --kl_cluster_id(1) 9 | while($j > 0) 10 | { 11 | --let $port = `select port from shard_nodes where member_state = "source" limit $i,1;` 12 | #--exec ./kunlun-test/util/test_client shard.1 " ps -aux |grep $port|grep -v grep |awk '{print $2}' | xargs kill" > dev/nul 2>1& 13 | --exec ./kunlun-test/util/test_client shard.1 " ps -aux |grep $port|grep -v grep |awk '{print $2}' | xargs kill" 14 | #--inc $i 15 | #--dec $sourceport_num 16 | --dec $j 17 | } 18 | 19 | --sleep 100 20 | --connection default 21 | --let $shardid = `select distinct shard_id from shard_nodes where status = "active" limit 0,1;` 22 | eval select step from rbr_consfailover where shard_id = $shardid and step ="Fail" order by id desc limit 15; 23 | --connect(rbrshard12,storage.1.1,clustmgr,clustmgr_pwd) 24 | show slave hosts; 25 | --connect(rbrshard13,storage.1.2,clustmgr,clustmgr_pwd) 26 | show slave hosts; 27 | --connect(rbrshard14,storage.1.3,clustmgr,clustmgr_pwd) 28 | show slave hosts; 29 | 30 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/remote_control_killpg.test: -------------------------------------------------------------------------------- 1 | --let $compport_num =`select count(port) from comp_nodes;` 2 | --let $i = 0 3 | --echo $compport_num 4 | --set_cluster_id(8) 5 | --kl_cluster_id(1) 6 | while($compport_num > 0) 7 | { 8 | --let $port = `select port from comp_nodes limit $i,1;` 9 | --exec ./kunlun-test/util/test_client shard.1 " ps -aux |grep $port|grep -v grep |awk '{print $2}' | xargs kill" 10 | --inc $i 11 | --dec $compport_num 12 | } 13 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/remote_control_stopmysql.test: -------------------------------------------------------------------------------- 1 | --let $current_time=`select now()` 2 | --echo $current_time 3 | --let $port_num =`select count(*) from shard_nodes where member_state ="source" ;` 4 | --let $i = 0 5 | --echo $port_num 6 | --set_cluster_id(1) 7 | --kl_cluster_id(1) 8 | 9 | while($port_num > 0) 10 | { 11 | --let $port = `select port from shard_nodes where member_state ="source" limit $i,1;` 12 | #--echo $port 13 | --exec ./kunlun-test/util/test_client shard.1 "cd /home/kunlun/testmgr0.9.2/instance_binaries/storage/$port/kunlun-storage-0.9.2/dba_tools && ./stopmysql.sh $port" 14 | #--sleep 1 15 | --echo $i 16 | --dec $port_num 17 | } 18 | 19 | 20 | --sleep 30 21 | 22 | #witch the first shard 23 | --connection default 24 | --let $shardid = `select distinct shard_id from shard_nodes limit 0,1;` 25 | eval select step from rbr_consfailover where shard_id = $shardid and step ="Fail" order by id desc limit 15; 26 | --connect(rbrshard12,storage.1.1,clustmgr,clustmgr_pwd) 27 | show slave hosts; 28 | --connect(rbrshard13,storage.1.2,clustmgr,clustmgr_pwd) 29 | show slave hosts; 30 | --connect(rbrshard14,storage.1.3,clustmgr,clustmgr_pwd) 31 | show slave hosts; 32 | 33 | 34 | --sleep 30 35 | 36 | #switch the second shard 37 | --connection default 38 | --let $shardid = `select distinct shard_id from shard_nodes limit 1,1;` 39 | eval select step from rbr_consfailover where shard_id = $shardid and step ="Fail" order by id desc limit 15; 40 | --connect(rbrshard22,storage.2.1,clustmgr,clustmgr_pwd) 41 | show slave hosts; 42 | --connect(rbrshard23,storage.2.2,clustmgr,clustmgr_pwd) 43 | show slave hosts; 44 | --connect(rbrshard24,storage.2.3,clustmgr,clustmgr_pwd) 45 | show slave hosts; 46 | 47 | 48 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/set_noswitch.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.create cluster 3 | #2.cycle query status 4 | #3.check computer node write and read 5 | #4.check shard active and standby (to be add) 6 | #5.add nodes(copies) 7 | #6.check all computer node write and read 8 | #7.check shard active and standby (to be add) 9 | #8.delete nodes(copies) 10 | #9.check all computer node write and read 11 | #10.check shard active and standby (to be add) 12 | #11.delete cluster 13 | 14 | --connection default 15 | --let $hostaddr = `select hostaddr from shard_nodes limit 0,1;` 16 | --let $cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 17 | --let $shard_id = `select shard_id from shard_nodes where status = "active" limit 0,1;` 18 | --let $port_id = `select port from shard_nodes where status = "active" limit 1,1;` 19 | 20 | --http_connect(cluster_mgr_http1, cluster_mgr) 21 | --http 22 | request_type: POST 23 | header:Content-Type:application/json 24 | body:{ 25 | "version":"1.0", 26 | "job_id":"", 27 | "job_type":"set_noswitch", 28 | "timestamp":"1435749309", 29 | "user_name":"zhangsan", 30 | "paras":{ 31 | "type":"1", 32 | "timeout":"600", 33 | "cluster_id":"${cluster_id}", 34 | "shard_id":"${shard_id}" 35 | } 36 | } 37 | EOF 38 | 39 | --let $job_id = `http_reap(job_id)` 40 | --sleep 20 41 | #--source kunlun-test/include/wait_http_request_finish.inc 42 | --echo "done" 43 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/table_repartition.test: -------------------------------------------------------------------------------- 1 | --connection default 2 | --let $src_cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 3 | --let $dst_cluster_id = `select id from db_clusters where status = "inuse" limit 1,1;` 4 | 5 | #--exec_in_background python2 /home/hellen/kunlun_test_framework_nostatus/mysql-test/kunlun-test/t/loop_process_transfer.py --meta_host 192.168.0.140:59301 --thread_num 1 --clusterid $src_cluster_id --timeout 50 --total_money 1000000 6 | 7 | #--sleep 50 8 | 9 | 10 | 11 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 12 | --http 13 | request_type: POST 14 | header:Content-Type:application/json 15 | body:{ 16 | "version":"1.0", 17 | "job_id":"", 18 | "job_type":"table_repartition", 19 | "timestamp" : "1435749309", 20 | "user_name":"kunlun_test", 21 | "paras":{ 22 | "src_cluster_id":"${src_cluster_id}", 23 | "dst_cluster_id":"${dst_cluster_id}", 24 | "repartition_tables":"postgres_$$_public.transfer_account=>postgres_$$_public.t1" 25 | } 26 | } 27 | EOF 28 | 29 | --let $job_id = `http_reap(job_id)` 30 | --sleep 20 31 | --source kunlun-test/include/wait_http_request_finish.inc 32 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.1.2/t/update_instance_cgroup.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.create cluster 3 | #2.cycle query status 4 | #3.check computer node write and read 5 | #4.check shard active and standby (to be add) 6 | #5.delete cluster 7 | 8 | --connection default 9 | --let $node_mgr_1 = query_get_value(select hostaddr from server_nodes where machine_type = "computer",hostaddr,1) 10 | --let $node_mgr_2 = query_get_value(select hostaddr from server_nodes where machine_type = "computer",hostaddr,2) 11 | --let $node_mgr_3 = query_get_value(select hostaddr from server_nodes where machine_type = "computer",hostaddr,3) 12 | --echo $node_mgr_1,$node_mgr_2,$node_mgr_3 13 | --let $storage_iplists="$node_mgr_1","$node_mgr_2","$node_mgr_3" 14 | 15 | 16 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 17 | 18 | --http 19 | request_type: POST 20 | header:Content-Type:application/json 21 | body:{ 22 | "version":"1.0", 23 | "job_id":"", 24 | "job_type":"update_instance_cgroup", 25 | "user_name":"kunlun_test", 26 | "timestamp":"202205131532", 27 | "paras":{ 28 | "ip":"192.168.0.140", 29 | "port":"59401", 30 | "type":"mysql", 31 | "cpu_cores":"1", 32 | "cgroup_mode":"share" 33 | } 34 | } 35 | EOF 36 | 37 | --let $job_id = `http_reap(job_id)` 38 | --sleep 60 39 | #--source kunlun-test/include/wait_http_request_finish.inc 40 | --echo "update_instance_cgroup" 41 | 42 | 43 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/include/check_by_get_state.inc: -------------------------------------------------------------------------------- 1 | --http 2 | request_type: POST 3 | header:Content-Type:application/json 4 | body:{ 5 | "version":"1.0", 6 | "job_id":"{$job_id}", 7 | "job_type":"get_status", 8 | "timestamp" : "202205131532", 9 | "paras":{ } 10 | } 11 | EOF 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/include/check_cn_write_read.inc: -------------------------------------------------------------------------------- 1 | SET client_min_messages TO 'warning'; 2 | drop table if exists t1111; 3 | RESET client_min_messages; 4 | create table t1111(id int primary key, info text, wt int); 5 | insert into t1111(id,info,wt) values(1, 'record1', 1); 6 | insert into t1111(id,info,wt) values(2, 'record2', 2); 7 | update t1111 set wt = 12 where id = 1; 8 | select * from t1111; 9 | delete from t1111 where id = 1; 10 | select * from t1111; 11 | prepare q1(int) as select*from t1111 where id=$1; 12 | begin; 13 | execute q1(1); 14 | execute q1(2); 15 | prepare q2(text,int, int) as update t1111 set info=$1 , wt=$2 where id=$3; 16 | execute q2('Rec1',2,1); 17 | commit; 18 | execute q2('Rec2',3,2); 19 | drop table t1111; 20 | 21 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/include/check_create_cluster_state.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # 1. check shard status 4 | # 2. check pg status 5 | # 3. check metadb shards, shard_nodes, comp_nodes 6 | # 7 | 8 | --connection default 9 | 10 | --let $shard_nums = `SELECT COUNT(*) FROM shards` 11 | --let $comp_nums = `SELECT COUNT(*) FROM comp_nodes` 12 | 13 | if($ha_mode == rbr) 14 | { 15 | 16 | while($shard_nums > 0) 17 | { 18 | --let $shardid=shard.$shard_nums 19 | --source kunlun-test/suite/cluster_manager/check_rbr_shard_state.inc 20 | dec $shard_nums; 21 | } 22 | 23 | } 24 | 25 | --let c_inc=1 26 | while($comp_nums > 0) 27 | { 28 | let $cconn_name=cc_pg_conn.$c_inc; 29 | let $comp = computer.$comp_nums; 30 | --pg_connect($cconn_name, $comp, $comp_user, $comp_pwd) 31 | --source kunlun-test/suite/cluster_manager/check_computer_state.inc 32 | dec $comp_nums; 33 | inc $c_inc; 34 | --connection default 35 | } 36 | 37 | 38 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/include/check_delete_cluster_state.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # 1. check shards, shard_nodes 4 | # 2. check comp_nodes 5 | # 3. check metadb commit_log_xxx and ddl_ops_log_xxxx 6 | # 7 | 8 | --connection default 9 | 10 | --let $cn_num=`SELECT COUNT(*) FROM comp_nodes;` 11 | if($cn_num != 0) 12 | { 13 | --echo "comp is not empty" 14 | --exit 15 | } 16 | 17 | --let $sds_num=`SELECT COUNT(*) FROM shards;` 18 | if($sds_num != 0) 19 | { 20 | --echo "shards is not empty" 21 | --exit 22 | } 23 | 24 | --let $sn_num=`SELECT COUNT(*) FROM shard_nodes;` 25 | if($sn_num != 0) 26 | { 27 | --echo "shard_nods is not empty" 28 | --exit 29 | } 30 | 31 | --let $dc_num=`SELECT COUNT(*) FROM db_clusters;` 32 | if($dc_num != 0) 33 | { 34 | --echo "db_clusters is not empty" 35 | --exit 36 | } 37 | 38 | --let $commit_name='commit_log_$cluster_name' 39 | --let $ddlops_name='ddl_ops_log_$cluster_name' 40 | --let $commit_exist=`SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME=$commit_name;` 41 | if($commit_exist != 0) 42 | { 43 | --echo "commit_log is not deleted, please check" 44 | --exit 45 | } 46 | 47 | --let $ddlops_exist=`SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME=$ddlops_name;` 48 | if($ddlops_exist != 0) 49 | { 50 | --echo "ddl_ops_log is not deleted, please check" 51 | --exit 52 | } 53 | 54 | --echo "==== delete cluster ok ====" 55 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/include/check_delete_cluster_state.inc.bak: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # 1. check shards, shard_nodes 4 | # 2. check comp_nodes 5 | # 3. check metadb commit_log_xxx and ddl_ops_log_xxxx 6 | # 7 | 8 | --connection default 9 | 10 | --let $sds_num=query_get_value(SELECT COUNT(*) FROM shards WHERE db_cluster_id=$cluster_id, COUNT(*), 1) 11 | if($sds_num != 0) 12 | { 13 | --echo "shards is not empty" 14 | --exit 15 | } 16 | 17 | --let $sn_num=query_get_value(SELECT COUNT(*) FROM shard_nodes WHERE db_cluster_id=$cluster_id, COUNT(*), 1) 18 | if($sn_num != 0) 19 | { 20 | --echo "shard_nods is not empty" 21 | --exit 22 | } 23 | 24 | --let $dc_num=query_get_value(SELECT COUNT(*) FROM db_clusters WHERE id=$cluster_id, COUNT(*), 1) 25 | if($dc_num != 0) 26 | { 27 | --echo "db_clusters is not empty" 28 | --exit 29 | } 30 | 31 | --let $commit_name='commit_log_$cluster_name' 32 | --let $ddlops_name='ddl_ops_log_$cluster_name' 33 | --let $commit_exist=query_get_value(SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME=$commit_name, COUNT(*), 1) 34 | if($commit_exist != 0) 35 | { 36 | --echo "commit_log is not deleted, please check" 37 | --exit 38 | } 39 | 40 | --let $ddlops_exist=query_get_value(SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME=$ddlops_name, COUNT(*), 1) 41 | if($ddlops_exist != 0) 42 | { 43 | --echo "ddl_ops_log is not deleted, please check" 44 | --exit 45 | } 46 | 47 | --echo "==== delete cluster ok ====" 48 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/include/check_rbr_shard_state.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # check rbr mode state 4 | # 1. check metadb shard_nodes 5 | # 2. check m/s 6 | 7 | --connect(shard_master, $shardid, clustmgr, clustmgr_pwd) 8 | --let $slave_host = query_get_value(SHOW SLAVE HOSTS, Host, 1) 9 | 10 | if($slave_host == "No such row") 11 | { 12 | --echo "connect master host and get slave hosts failed" 13 | --exit 14 | } 15 | 16 | if($slave_host == "NULL") 17 | { 18 | --echo "connect master host and get slave hosts empty" 19 | --exit 20 | } 21 | 22 | disconnect shard_master; 23 | 24 | --connection default 25 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/include/check_shard_state.inc: -------------------------------------------------------------------------------- 1 | --connection default 2 | --let $shard_nums = `SELECT COUNT(*) FROM shards ` 3 | #--let $i = 0 4 | 5 | while($shard_nums > 0) 6 | { 7 | #--let $shardid = `select shard_id from shard_nodes where member_state ="source" and status = "creating" limit $i,1;` 8 | --let $shardid=shard.$shard_nums 9 | --echo $shardid 10 | #--set_cluster_id(4) 11 | #--kl_cluster_id(1) 12 | --connect(shard_master,$shardid, clustmgr, clustmgr_pwd) 13 | --let $slave_host = query_get_value(SHOW SLAVE HOSTS, Host, 1) 14 | if($slave_host == "No such row") 15 | { 16 | --echo "connect master host and get slave hosts failed" 17 | --exit 18 | } 19 | 20 | if($slave_host == "NULL") 21 | { 22 | --echo "connect master host and get slave hosts empty" 23 | --exit 24 | } 25 | disconnect shard_master; 26 | --connection default 27 | dec $shard_nums; 28 | #inc $i; 29 | } 30 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/include/get_backup_status.inc: -------------------------------------------------------------------------------- 1 | 2 | if(!$http_req_timeout) 3 | { 4 | --let $http_req_timeout = 500 5 | } 6 | 7 | #--echo $job_id 8 | --sleep 5 9 | 10 | --let $condition=1 11 | while ($condition == 1) 12 | { 13 | --connection default 14 | --let $status = `select status from cluster_general_job_log where job_type='shard_coldbackup' order by id desc limit 1;` 15 | 16 | if ($status != ongoing) 17 | { 18 | --let $condition=0 19 | } 20 | 21 | --sleep 10 22 | #--echo status: $status 23 | --dec $http_req_timeout 24 | if($http_req_timeout <= 0) 25 | { 26 | --let $condition=0 27 | --exit 28 | } 29 | 30 | } 31 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/include/get_cluster_name.inc: -------------------------------------------------------------------------------- 1 | --http 2 | request_type: POST 3 | header:Content-Type:application/json 4 | body:{ 5 | "version":"1.0", 6 | "job_id":"", 7 | "job_type":"get_cluster_summary", 8 | "timestamp" : "202205131532", 9 | "paras":{ } 10 | } 11 | EOF 12 | --let $cluster_name = `http_reap(attachment.list_cluster[0].cluster_name)` 13 | --let $cluster_id = `http_reap(attachment.list_cluster[0].cluster_id)` 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/include/get_restore_status.inc: -------------------------------------------------------------------------------- 1 | 2 | if(!$http_req_timeout) 3 | { 4 | --let $http_req_timeout = 500 5 | } 6 | 7 | --echo $job_id 8 | --sleep 5 9 | 10 | --let $condition=1 11 | while ($condition == 1) 12 | { 13 | --connection default 14 | --let $status = `select status from restore_log where general_log_id = $job_id order by id desc limit 1;` 15 | 16 | if ($status != ongoing) 17 | { 18 | --let $condition=0 19 | } 20 | 21 | --sleep 10 22 | #--echo status: $status 23 | --dec $http_req_timeout 24 | if($http_req_timeout <= 0) 25 | { 26 | --let $condition=0 27 | --exit 28 | } 29 | 30 | } 31 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/include/wait_http_request_finish.inc: -------------------------------------------------------------------------------- 1 | 2 | if(!$http_req_timeout) 3 | { 4 | --let $http_req_timeout = 500 5 | } 6 | 7 | #--echo $job_id 8 | --sleep 5 9 | 10 | --let $condition=1 11 | while ($condition == 1) 12 | { 13 | --http 14 | request_type: POST 15 | header:Content-Type:application/json 16 | body:{ 17 | "version": "1.0", 18 | "job_id":"${job_id}", 19 | "job_type": "get_status", 20 | "timestamp" : "1435749309", 21 | "paras": {} 22 | } 23 | EOF 24 | 25 | --let $status = `http_reap(status)` 26 | if ($status != ongoing) 27 | { 28 | --let $condition=0 29 | } 30 | 31 | --sleep 10 32 | #--echo status: $status 33 | --dec $http_req_timeout 34 | if($http_req_timeout <= 0) 35 | { 36 | --let $condition=0 37 | --exit 38 | } 39 | 40 | --let $cluster_name = `http_reap(attachment.cluster_name)` 41 | --let $cluster_id = `http_reap(attachment.cluster_id)` 42 | } 43 | 44 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/add_comps_existcluster.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.add computer node 3 | #2.check all computer node write and read 4 | #3.delete computer node 5 | #4.check all computer node write and read 6 | #5.delete cluster 7 | 8 | --connection default 9 | --let $cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 10 | --let $node_mgr_1 = query_get_value(select hostaddr from server_nodes where machine_type = "computer",hostaddr,1) 11 | --let $node_mgr_2 = query_get_value(select hostaddr from server_nodes where machine_type = "computer",hostaddr,2) 12 | --let $node_mgr_3 = query_get_value(select hostaddr from server_nodes where machine_type = "computer",hostaddr,3) 13 | --let $storage_iplists="$node_mgr_1","$node_mgr_2","$node_mgr_3" 14 | 15 | 16 | --http_connect(cluster_mgr_http1, cluster_mgr,500000) 17 | #add comps 18 | --http 19 | request_type: POST 20 | header:Content-Type:application/json 21 | body:{ 22 | "version":"1.0", 23 | "job_id":"", 24 | "job_type":"add_comps", 25 | "user_name":"kunlun_test", 26 | "timestamp":"202205131532", 27 | "paras":{ 28 | "cluster_id":"${cluster_id}", 29 | "comps":"1", 30 | "computer_iplists":[ 31 | ${storage_iplists} 32 | ] 33 | } 34 | } 35 | EOF 36 | 37 | --let $job_id = `http_reap(job_id)` 38 | --source kunlun-test/include/wait_http_request_finish.inc 39 | --echo "done" 40 | #--kl_cluster_id(1) 41 | 42 | #--pg_connect(testrbr2,computer.2, abc, abc) 43 | #--source kunlun-test/include/check_cn_write_read.inc 44 | 45 | #--pg_connect(testrbr3,computer.3, abc, abc) 46 | #--source kunlun-test/include/check_cn_write_read.inc 47 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/add_nodes_existcluster.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.add nodes 3 | #2.check msyld master and slave 4 | 5 | --connection default 6 | --let $node_mgr_1 = query_get_value(select hostaddr from server_nodes where machine_type = "computer",hostaddr,1) 7 | --let $node_mgr_2 = query_get_value(select hostaddr from server_nodes where machine_type = "computer",hostaddr,2) 8 | --let $node_mgr_3 = query_get_value(select hostaddr from server_nodes where machine_type = "computer",hostaddr,3) 9 | --let $storage_iplists="$node_mgr_1","$node_mgr_2","$node_mgr_3" 10 | --let $cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 11 | --let $shard_id = `select shard_id from shard_nodes where db_cluster_id= $cluster_id and status = "active" limit 0,1;` 12 | #--let $cluster_id = `select id from db_clusters limit 0,1;` 13 | #--let $shard_id = `select shard_id from shard_nodes limit 0,1;` 14 | 15 | --http_connect(cluster_mgr_http1, cluster_mgr,500000) 16 | #add nodes 17 | --http 18 | request_type: POST 19 | header:Content-Type:application/json 20 | body:{ 21 | "version":"1.0", 22 | "job_id":"", 23 | "job_type":"add_nodes", 24 | "user_name":"kunlun_test", 25 | "timestamp":"202205131532", 26 | "paras":{ 27 | "cluster_id":"${cluster_id}", 28 | "shard_id":"${shard_id}", 29 | "nodes":"1", 30 | "storage_iplists":[ 31 | ${storage_iplists} 32 | ] 33 | } 34 | } 35 | EOF 36 | 37 | --let $job_id = `http_reap(job_id)` 38 | --sleep 20 39 | --source kunlun-test/include/wait_http_request_finish.inc 40 | --echo "end" 41 | #--kl_cluster_id(1) 42 | #--source kunlun-test/include/check_shard_state.inc 43 | 44 | 45 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/add_shards_existcluster.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.add computer node 3 | #2.check all computer node write and read 4 | #3.delete computer node 5 | #4.check all computer node write and read 6 | #5.delete cluster 7 | 8 | #--set_cluster_id(3) 9 | #--kl_cluster_id(1) 10 | 11 | 12 | --connection default 13 | --let $cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 14 | --let $node_mgr_1 = query_get_value(select hostaddr from server_nodes where machine_type = "computer",hostaddr,1) 15 | --let $node_mgr_2 = query_get_value(select hostaddr from server_nodes where machine_type = "computer",hostaddr,2) 16 | --let $node_mgr_3 = query_get_value(select hostaddr from server_nodes where machine_type = "computer",hostaddr,3) 17 | --let $storage_iplists="$node_mgr_1","$node_mgr_2","$node_mgr_3" 18 | 19 | 20 | --http_connect(cluster_mgr_http1, cluster_mgr,500000) 21 | #add shards 22 | --http 23 | request_type: POST 24 | header:Content-Type:application/json 25 | body:{ 26 | "version":"1.0", 27 | "job_id":"", 28 | "job_type":"add_shards", 29 | "user_name":"kunlun_test", 30 | "timestamp":"202205131532", 31 | "paras":{ 32 | "cluster_id":"${cluster_id}", 33 | "shards":"1", 34 | "nodes":"3", 35 | "storage_iplists":[ 36 | ${storage_iplists} 37 | ] 38 | } 39 | } 40 | EOF 41 | 42 | --let $job_id = `http_reap(job_id)` 43 | --sleep 10 44 | --source kunlun-test/include/wait_http_request_finish.inc 45 | --sleep 10 46 | --source kunlun-test/include/get_shard_coldbackup_status.inc 47 | 48 | --sleep 20 49 | #--kl_cluster_id(1) 50 | --source kunlun-test/include/check_shard_state.inc 51 | --echo "done" 52 | 53 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/create_rcr.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.create cluster 3 | #2.cycle query status 4 | #3.check computer node write and read 5 | #4.check shard active and standby (to be add) 6 | #5.delete cluster 7 | --connection default 8 | --let $src_cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 9 | --let $dst_cluster_id = `select id from db_clusters where status = "inuse" limit 1,1;` 10 | 11 | 12 | --http_connect(cluster_mgr_http1, cluster_mgr,500000) 13 | 14 | --http 15 | request_type: POST 16 | header:Content-Type:application/json 17 | body:{ 18 | "version":"1.0", 19 | "job_id":"", 20 | "job_type":"create_rcr", 21 | "user_name":"kunlun_test", 22 | "timestamp":"202205131532", 23 | "paras":{ 24 | "master_info":{ 25 | "meta_db":"192.168.0.134:59301", 26 | "cluster_id":"${src_cluster_id}" 27 | }, 28 | "cluster_id":"${dst_cluster_id}" 29 | } 30 | } 31 | EOF 32 | 33 | --let $job_id = `http_reap(job_id)` 34 | --sleep 60 35 | --source kunlun-test/include/wait_http_request_finish.inc 36 | --echo "done" 37 | 38 | --connection default 39 | select * from cluster_rcr_infos ; 40 | select * from cluster_rcr_meta_sync ; 41 | 42 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/del_comps_existcluster.test: -------------------------------------------------------------------------------- 1 | #--set_cluster_id(7) 2 | #--kl_cluster_id(1) 3 | 4 | --connection default 5 | --let $cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 6 | --let $comp_id = `select id from comp_nodes where status = "active" limit 1,1;` 7 | --echo $comp_id 8 | --let $cn_num=`SELECT COUNT(*) FROM comp_nodes;` 9 | --echo $cn_num 10 | 11 | #delete comp 12 | --http_connect(cluster_mgr_http1, cluster_mgr,500000) 13 | --http 14 | request_type: POST 15 | header:Content-Type:application/json 16 | body:{ 17 | "ver":"1.0", 18 | "job_id":"", 19 | "job_type":"delete_comp", 20 | "user_name":"kunlun_test", 21 | "timestamp":"202205131532", 22 | "paras":{ 23 | "cluster_id":"${cluster_id}", 24 | "comp_id":"${comp_id}" 25 | } 26 | } 27 | EOF 28 | 29 | 30 | #--sleep 40 31 | #--source kunlun-test/include/wait_http_request_finish.inc 32 | --echo "done" 33 | --connection default 34 | --let $cn_num=`SELECT COUNT(*) FROM comp_nodes;` 35 | --echo $cn_num 36 | 37 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/del_nodes_existcluster.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.delete_comp 3 | #2.check the remained nodes master and slave 4 | 5 | --connection default 6 | --let $hostaddr = `select hostaddr from shard_nodes where status = "active" limit 1,1;` 7 | #--let $cluster_id = `select id from db_clusters limit 0,1;` 8 | #--let $shard_id = `select shard_id from shard_nodes limit 0,1;` 9 | #--let $port_id = `select port from shard_nodes limit 1,1;` 10 | --let $cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 11 | --let $shard_id = `select shard_id from shard_nodes where status = "active" limit 0,1;` 12 | --let $port_id = `select port from shard_nodes where status = "active" limit 1,1;` 13 | --let $sn_num=`select count(distinct shard_id) from shard_nodes where status = "active";` 14 | 15 | #delete shard 16 | --http_connect(cluster_mgr_http1, cluster_mgr,500000) 17 | --http 18 | request_type: POST 19 | header:Content-Type:application/json 20 | body:{ 21 | "ver":"1.0", 22 | "job_id":"", 23 | "job_type":"delete_node", 24 | "user_name":"kunlun_test", 25 | "timestamp":"202205131532", 26 | "paras":{ 27 | "cluster_id":"${cluster_id}", 28 | "shard_id":"${shard_id}", 29 | "hostaddr":"${hostaddr}", 30 | "port": "${port_id}" 31 | } 32 | } 33 | EOF 34 | 35 | --let $job_id = `http_reap(job_id)` 36 | #--sleep 20 37 | #--source kunlun-test/include/wait_http_request_finish.inc 38 | --echo "end" 39 | 40 | #--kl_cluster_id(1) 41 | #--source kunlun-test/include/check_shard_state.inc 42 | #--connection default 43 | #--let $sn_num=`select count(distinct shard_id) from shard_nodes;` 44 | #--echo $sn_num 45 | 46 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/del_rbrcluster.test: -------------------------------------------------------------------------------- 1 | #--set_cluster_id(3) 2 | #--kl_cluster_id(1) 3 | --connection default 4 | --let $cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 5 | #--let $cluster_id = `select id from db_clusters limit 0,1;` 6 | 7 | --http_connect(cluster_mgr_http1, cluster_mgr,500000) 8 | --http 9 | request_type: POST 10 | header:Content-Type:application/json 11 | body:{ 12 | "version":"1.0", 13 | "job_id":"", 14 | "job_type":"delete_cluster", 15 | "timestamp" : "1435749309", 16 | "user_name":"kunlun_test", 17 | "paras":{ "cluster_id":"${cluster_id}"} 18 | } 19 | EOF 20 | 21 | --let $job_id = `http_reap(job_id)` 22 | --sleep 20 23 | --source kunlun-test/include/wait_http_request_finish.inc 24 | --echo "del cluster" 25 | 26 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/del_rcr.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.create cluster 3 | #2.cycle query status 4 | #3.check computer node write and read 5 | #4.check shard active and standby (to be add) 6 | #5.delete cluster 7 | --connection default 8 | --let $src_cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 9 | --let $dst_cluster_id = `select id from db_clusters where status = "inuse" limit 1,1;` 10 | --let $comp_host1 = `select hostaddr from comp_nodes where status = "active" and db_cluster_id = $src_cluster_id limit 0,1;` 11 | --let $comp_port1 = `select port from comp_nodes where status = "active" and db_cluster_id = $src_cluster_id limit 0,1;` 12 | --let $comp_host2 = `select hostaddr from comp_nodes where status = "active" and db_cluster_id = $dst_cluster_id limit 0,1;` 13 | --let $comp_port2 = `select port from comp_nodes where status = "active" and db_cluster_id = $dst_cluster_id limit 0,1;` 14 | 15 | --http_connect(cluster_mgr_http1, cluster_mgr,500000) 16 | 17 | --http 18 | request_type: POST 19 | header:Content-Type:application/json 20 | body:{ 21 | "version":"1.0", 22 | "job_id":"", 23 | "job_type":"delete_rcr", 24 | "user_name":"kunlun_test", 25 | "timestamp":"202205131532", 26 | "paras":{ 27 | "master_info":{ 28 | "meta_db":"192.168.0.134:59301", 29 | "cluster_id":"${src_cluster_id}" 30 | }, 31 | "cluster_id":"${dst_cluster_id}" 32 | } 33 | } 34 | EOF 35 | 36 | --let $job_id = `http_reap(job_id)` 37 | --sleep 10 38 | --source kunlun-test/include/wait_http_request_finish.inc 39 | --sleep 10 40 | --echo "done" 41 | 42 | --exec python2 kunlun-test/t/smokeTest.py --host $comp_host1 --port $comp_port1 43 | --exec python2 kunlun-test/t/smokeTest.py --host $comp_host2 --port $comp_port2 44 | 45 | --connection default 46 | select * from cluster_rcr_infos ; 47 | select * from cluster_rcr_meta_sync ; 48 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/del_shard_existcluster.test: -------------------------------------------------------------------------------- 1 | #--set_cluster_id(7) 2 | #--kl_cluster_id(1) 3 | 4 | --connection default 5 | --let $cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 6 | --let $shard_id = `select shard_id from shard_nodes where status = "active" and db_cluster_id = $cluster_id limit 0,1;` 7 | --echo $shard_id 8 | --let $sn_num=`select count(distinct shard_id) from shard_nodes;` 9 | --echo $sn_num 10 | 11 | #delete shard 12 | --http_connect(cluster_mgr_http1, cluster_mgr,500000) 13 | --http 14 | request_type: POST 15 | header:Content-Type:application/json 16 | body:{ 17 | "ver":"1.0", 18 | "job_id":"", 19 | "job_type":"delete_shard", 20 | "user_name":"kunlun_test", 21 | "timestamp":"202205131532", 22 | "paras":{ 23 | "cluster_id":"${cluster_id}", 24 | "shard_id":"${shard_id}" 25 | } 26 | } 27 | EOF 28 | 29 | 30 | #--sleep 20 31 | #--source kunlun-test/include/wait_http_request_finish.inc 32 | --echo "done" 33 | 34 | 35 | --connection default 36 | --let $sn_num=`select count(distinct shard_id) from shard_nodes;` 37 | --echo $sn_num 38 | 39 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/modify_rcr1.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.create cluster 3 | #2.cycle query status 4 | #3.check computer node write and read 5 | #4.check shard active and standby (to be add) 6 | #5.delete cluster 7 | --connection default 8 | --let $src_cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 9 | --let $dst_cluster_id = `select id from db_clusters where status = "inuse" limit 1,1;` 10 | 11 | --http_connect(cluster_mgr_http1, cluster_mgr,500000) 12 | 13 | --http 14 | request_type: POST 15 | header:Content-Type:application/json 16 | body:{ 17 | "version":"1.0", 18 | "job_id":"", 19 | "job_type":"modify_rcr", 20 | "user_name":"kunlun_test", 21 | "timestamp":"202205131532", 22 | "paras":{ 23 | "master_info":{ 24 | "meta_db":"192.168.0.134:59301", 25 | "cluster_id":"${src_cluster_id}" 26 | }, 27 | "cluster_id":"${dst_cluster_id}", 28 | "work_mode":"modify_sync_delay", 29 | "sync_delay":"10" 30 | } 31 | } 32 | EOF 33 | 34 | --let $job_id = `http_reap(job_id)` 35 | --sleep 10 36 | --source kunlun-test/include/wait_http_request_finish.inc 37 | --sleep 10 38 | --echo "done" 39 | 40 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/modify_rcr11.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.create cluster 3 | #2.cycle query status 4 | #3.check computer node write and read 5 | #4.check shard active and standby (to be add) 6 | #5.delete cluster 7 | --connection default 8 | --let $src_cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 9 | --let $dst_cluster_id = `select id from db_clusters where status = "inuse" limit 1,1;` 10 | 11 | --http_connect(cluster_mgr_http1, cluster_mgr,500000) 12 | 13 | --http 14 | request_type: POST 15 | header:Content-Type:application/json 16 | body:{ 17 | "version":"1.0", 18 | "job_id":"", 19 | "job_type":"modify_rcr", 20 | "user_name":"kunlun_test", 21 | "timestamp":"202205131532", 22 | "paras":{ 23 | "slave_info":{ 24 | "meta_db":"192.168.0.134:59301,192.168.0.134:59302,192.168.0.134:59303", 25 | "cluster_id":"${dst_cluster_id}" 26 | }, 27 | "cluster_id":"${src_cluster_id}", 28 | "work_mode":"modify_sync_delay", 29 | "sync_delay":"12" 30 | } 31 | } 32 | EOF 33 | 34 | --let $job_id = `http_reap(job_id)` 35 | --sleep 10 36 | --source kunlun-test/include/wait_http_request_finish.inc 37 | --sleep 10 38 | --echo "done" 39 | 40 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/modify_rcr2.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.create cluster 3 | #2.cycle query status 4 | #3.check computer node write and read 5 | #4.check shard active and standby (to be add) 6 | #5.delete cluster 7 | --connection default 8 | --let $src_cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 9 | --let $dst_cluster_id = `select id from db_clusters where status = "inuse" limit 1,1;` 10 | 11 | --http_connect(cluster_mgr_http1, cluster_mgr,500000) 12 | 13 | --http 14 | request_type: POST 15 | header:Content-Type:application/json 16 | body:{ 17 | "version":"1.0", 18 | "job_id":"", 19 | "job_type":"modify_rcr", 20 | "user_name":"kunlun_test", 21 | "timestamp":"202205131532", 22 | "paras":{ 23 | "master_info":{ 24 | "meta_db":"192.168.0.134:59301", 25 | "cluster_id":"${src_cluster_id}" 26 | }, 27 | "cluster_id":"${dst_cluster_id}", 28 | "work_mode":"stop_rcr" 29 | } 30 | } 31 | EOF 32 | 33 | --let $job_id = `http_reap(job_id)` 34 | --sleep 10 35 | --source kunlun-test/include/wait_http_request_finish.inc 36 | --sleep 10 37 | --echo "done" 38 | 39 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/modify_rcr22.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.create cluster 3 | #2.cycle query status 4 | #3.check computer node write and read 5 | #4.check shard active and standby (to be add) 6 | #5.delete cluster 7 | --connection default 8 | --let $src_cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 9 | --let $dst_cluster_id = `select id from db_clusters where status = "inuse" limit 1,1;` 10 | 11 | --http_connect(cluster_mgr_http1, cluster_mgr,500000) 12 | 13 | --http 14 | request_type: POST 15 | header:Content-Type:application/json 16 | body:{ 17 | "version":"1.0", 18 | "job_id":"", 19 | "job_type":"modify_rcr", 20 | "user_name":"kunlun_test", 21 | "timestamp":"202205131532", 22 | "paras":{ 23 | "slave_info":{ 24 | "meta_db":"192.168.0.134:59301,192.168.0.134:59302,192.168.0.134:59303", 25 | "cluster_id":"${dst_cluster_id}" 26 | }, 27 | "cluster_id":"${src_cluster_id}", 28 | "work_mode":"stop_rcr" 29 | } 30 | } 31 | EOF 32 | 33 | --let $job_id = `http_reap(job_id)` 34 | --sleep 10 35 | --source kunlun-test/include/wait_http_request_finish.inc 36 | --sleep 10 37 | --echo "done" 38 | 39 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/modify_rcr3.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.create cluster 3 | #2.cycle query status 4 | #3.check computer node write and read 5 | #4.check shard active and standby (to be add) 6 | #5.delete cluster 7 | --connection default 8 | --let $src_cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 9 | --let $dst_cluster_id = `select id from db_clusters where status = "inuse" limit 1,1;` 10 | 11 | --http_connect(cluster_mgr_http1, cluster_mgr,500000) 12 | 13 | --http 14 | request_type: POST 15 | header:Content-Type:application/json 16 | body:{ 17 | "version":"1.0", 18 | "job_id":"", 19 | "job_type":"modify_rcr", 20 | "user_name":"kunlun_test", 21 | "timestamp":"202205131532", 22 | "paras":{ 23 | "master_info":{ 24 | "meta_db":"192.168.0.134:59301", 25 | "cluster_id":"${src_cluster_id}" 26 | }, 27 | "cluster_id":"${dst_cluster_id}", 28 | "work_mode":"start_rcr" 29 | } 30 | } 31 | EOF 32 | 33 | --let $job_id = `http_reap(job_id)` 34 | --sleep 10 35 | --source kunlun-test/include/wait_http_request_finish.inc 36 | --sleep 10 37 | --echo "done" 38 | 39 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/modify_rcr33.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.create cluster 3 | #2.cycle query status 4 | #3.check computer node write and read 5 | #4.check shard active and standby (to be add) 6 | #5.delete cluster 7 | --connection default 8 | --let $src_cluster_id = `select id from db_clusters where status = "inuse" limit 0,1;` 9 | --let $dst_cluster_id = `select id from db_clusters where status = "inuse" limit 1,1;` 10 | 11 | --http_connect(cluster_mgr_http1, cluster_mgr,500000) 12 | 13 | --http 14 | request_type: POST 15 | header:Content-Type:application/json 16 | body:{ 17 | "version":"1.0", 18 | "job_id":"", 19 | "job_type":"modify_rcr", 20 | "user_name":"kunlun_test", 21 | "timestamp":"202205131532", 22 | "paras":{ 23 | "slave_info":{ 24 | "meta_db":"192.168.0.134:59301,192.168.0.134:59302,192.168.0.134:59303", 25 | "cluster_id":"${dst_cluster_id}" 26 | }, 27 | "cluster_id":"${src_cluster_id}", 28 | "work_mode":"start_rcr" 29 | } 30 | } 31 | EOF 32 | 33 | --let $job_id = `http_reap(job_id)` 34 | --sleep 10 35 | --source kunlun-test/include/wait_http_request_finish.inc 36 | --sleep 10 37 | --echo "done" 38 | 39 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/remote_control_killcluster.test: -------------------------------------------------------------------------------- 1 | --set_cluster_id(8) 2 | --kl_cluster_id(1) 3 | #--let $port = `select port from cluster_mgr_nodes where member_state ="source" limit 0,1;` 4 | --connection default 5 | --exec ./kunlun-test/util/test_client cluster_mgr "ps -aux |grep cluster_mgr|grep -v grep |awk '{print $2}' | xargs kill -9" 6 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/remote_control_killmysql.test: -------------------------------------------------------------------------------- 1 | --connection default 2 | --let $node_mgr_1 = query_get_value(select hostaddr from server_nodes where machine_type = "computer",hostaddr,1) 3 | --let $sourceport_num =`select count(port) from shard_nodes where member_state = "source";` 4 | --let $i = 0 5 | --let $j = 20 6 | --echo $sourceport_num 7 | --set_cluster_id(14) 8 | --kl_cluster_id(1) 9 | while($j > 0) 10 | { 11 | --let $port = `select port from shard_nodes where member_state = "source" limit $i,1;` 12 | #--exec ./kunlun-test/util/test_client shard.1 " ps -aux |grep $port|grep -v grep |awk '{print $2}' | xargs kill" > dev/nul 2>1& 13 | --exec ./kunlun-test/util/test_client shard.1 " ps -aux |grep $port|grep -v grep |awk '{print $2}' | xargs kill" 14 | #--inc $i 15 | #--dec $sourceport_num 16 | --dec $j 17 | } 18 | 19 | --sleep 100 20 | --connection default 21 | --let $shardid = `select distinct shard_id from shard_nodes where status = "active" limit 0,1;` 22 | eval select step from rbr_consfailover where shard_id = $shardid and step ="Fail" order by id desc limit 15; 23 | --connect(rbrshard12,storage.1.1,clustmgr,clustmgr_pwd) 24 | show slave hosts; 25 | --connect(rbrshard13,storage.1.2,clustmgr,clustmgr_pwd) 26 | show slave hosts; 27 | --connect(rbrshard14,storage.1.3,clustmgr,clustmgr_pwd) 28 | show slave hosts; 29 | 30 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/remote_control_killpg.test: -------------------------------------------------------------------------------- 1 | --let $compport_num =`select count(port) from comp_nodes where status = "active";` 2 | --let $i = 0 3 | --echo $compport_num 4 | --set_cluster_id(17) 5 | --kl_cluster_id(1) 6 | while($compport_num > 0) 7 | { 8 | --let $port = `select port from comp_nodes limit $i,1;` 9 | --exec ./kunlun-test/util/test_client shard.1 " ps -aux |grep $port|grep -v grep |awk '{print $2}' | xargs kill" 10 | --inc $i 11 | --dec $compport_num 12 | } 13 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/remote_control_stopmysql.test: -------------------------------------------------------------------------------- 1 | --let $current_time=`select now()` 2 | --echo $current_time 3 | #--let $port_num =`select count(*) from shard_nodes where member_state ="source" and status = "active";` 4 | --let $i = 0 5 | --let $j = 20 6 | #--echo $port_num 7 | --set_cluster_id(42) 8 | --kl_cluster_id(1) 9 | 10 | while($j > 0) 11 | { 12 | --let $port = `select port from shard_nodes where member_state ="source" limit $i,1;` 13 | #--echo $port 14 | --exec ./kunlun-test/util/test_client shard.2 "cd /home/kunlun/testmgr1.1/instance_binaries/storage/$port/kunlun-storage-1.1.1/dba_tools && ./stopmysql.sh $port" 15 | #--sleep 1 16 | --echo $i 17 | --dec $j 18 | } 19 | 20 | 21 | --sleep 30 22 | 23 | #witch the first shard 24 | --connection default 25 | --let $shardid = `select distinct shard_id from shard_nodes where status = "active" limit 0,1;` 26 | eval select step from rbr_consfailover where shard_id = $shardid and step ="Fail" order by id desc limit 0,1; 27 | --connect(rbrshard12,storage.1.1,clustmgr,clustmgr_pwd) 28 | show slave hosts; 29 | --connect(rbrshard13,storage.1.2,clustmgr,clustmgr_pwd) 30 | show slave hosts; 31 | --connect(rbrshard14,storage.1.3,clustmgr,clustmgr_pwd) 32 | show slave hosts; 33 | 34 | 35 | --sleep 30 36 | 37 | #switch the second shard 38 | #--connection default 39 | #--let $shardid = `select distinct shard_id from shard_nodes where status = "active" limit 1,1;` 40 | #eval select step from rbr_consfailover where shard_id = $shardid and step ="Fail" order by id desc limit 15; 41 | #--connect(rbrshard22,storage.2.1,clustmgr,clustmgr_pwd) 42 | #show slave hosts; 43 | #--connect(rbrshard23,storage.2.2,clustmgr,clustmgr_pwd) 44 | #show slave hosts; 45 | #--connect(rbrshard24,storage.2.3,clustmgr,clustmgr_pwd) 46 | #show slave hosts; 47 | 48 | 49 | -------------------------------------------------------------------------------- /test/hellentest/kunlun_test1.2.1/t/smokeTest.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | # Copyright (c) 2019 ZettaDB inc. All rights reserved. 3 | # This source code is licensed under Apache 2.0 License, 4 | # combined with Common Clause Condition 1.0, as detailed in the NOTICE file. 5 | 6 | import psycopg2 7 | import sys 8 | import argparse 9 | def test(hoststr, portstr): 10 | intport = int(portstr) 11 | conn = psycopg2.connect(host=hoststr, port=intport, user='abc', password='abc', database='postgres') 12 | conn.autocommit = True 13 | cur = conn.cursor() 14 | sqls=["SET client_min_messages TO 'warning';", 15 | "drop table if exists t1111", 16 | "RESET client_min_messages;", 17 | "create table t1111(id int primary key, info text, wt int)", 18 | "insert into t1111(id,info,wt) values(1, 'record1', 1)", 19 | "insert into t1111(id,info,wt) values(2, 'record2', 2)", 20 | "update t1111 set wt = 12 where id = 1", "select * from t1111", 21 | "delete from t1111 where id = 1", "select * from t1111", 22 | "prepare q1(int) as select*from t1111 where id=$1", 23 | "begin", 24 | "execute q1(1)", 25 | "execute q1(2)", 26 | "prepare q2(text,int, int) as update t1111 set info=$1 , wt=$2 where id=$3", 27 | "execute q2('Rec1',2,1)", 28 | "commit", 29 | "execute q2('Rec2',3,2)", 30 | "drop table t1111"] 31 | for sql in sqls: 32 | res = cur.execute(sql+";") 33 | print "command:%s, res:%s" % (sql, str(res)) 34 | 35 | if __name__ == '__main__': 36 | parser = argparse.ArgumentParser(description="insert data for caict testing") 37 | parser.add_argument('--host', type=str, required=True, help='comp ip') 38 | parser.add_argument('--port', type=int, required=True, help='comp port') 39 | args = parser.parse_args() 40 | test(args.host,args.port) 41 | 42 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/constraint_test5.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.约束测试 3 | --set_cluster_id(46) 4 | --kl_cluster_id(1) 5 | 6 | --pg_connect(testrbr5,computer.1, abc, abc) 7 | DROP TABLE IF EXISTS TABLE_6 CASCADE; 8 | CREATE TABLE TABLE_6 (CODE CHAR(5) PRIMARY KEY,TITLE VARCHAR(40),DID INTEGER,DATE_PROD DATE,KIND VARCHAR(10)); 9 | DROP TABLE IF EXISTS TABLE_7; 10 | CREATE TABLE TABLE_7 (CODE CHAR(5),TITLE VARCHAR(40),DID INTEGER,DATE_PROD DATE,KIND VARCHAR(10),CONSTRAINT CODE_TITLE PRIMARY KEY(CODE,TITLE)); 11 | drop table table_6; 12 | 13 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/constraint_test6.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.约束测试 3 | --set_cluster_id(46) 4 | --kl_cluster_id(1) 5 | 6 | --pg_connect(testrbr6,computer.1, abc, abc) 7 | CREATE UNIQUE INDEX IDX_CODE_TABLE_7 ON TABLE_7(CODE); 8 | DROP INDEX IDX_CODE_TABLE_7; 9 | CREATE UNIQUE INDEX IDX_CODE_TABLE_7 ON TABLE_7(CODE,TITLE); 10 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/constraint_test7.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.约束测试 3 | --set_cluster_id(46) 4 | --kl_cluster_id(1) 5 | 6 | --pg_connect(testrbr7,computer.1, abc, abc) 7 | DROP TABLE IF EXISTS users; 8 | CREATE TABLE users (id INT NOT NULL PRIMARY KEY AUTO_INCREMENT,username VARCHAR(60) NOT NULL,UNIQUE (username),CONSTRAINT min_username_length CHECK (CHARACTER_LENGTH(username) >=4)); 9 | INSERT INTO users (username) VALUES ('a'); 10 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/datatype_test12.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.索引测试 3 | --set_cluster_id(50) 4 | --kl_cluster_id(1) 5 | 6 | --pg_connect(testrbr12,computer.1, abc, abc) 7 | create table t_ora_datatype_varchar2(c0 varchar(100)); 8 | create table t_ora_datatype_char(c0 char(100)); 9 | create table t_ora_datatype_nvarchar2(c0 varchar(100)); 10 | create table t_ora_datatype_raw(c0 bytea); 11 | create table t_ora_datatype_clob(c0 text); 12 | create table t_ora_datatype_blob(c0 bytea); 13 | create table t_ora_datatype_long(c0 text); 14 | create table t_ora_datatype_date(c0 timestamp); 15 | create table t_ora_datatype_timestamp(c0 timestamp(6)); 16 | create table t_ora_datatype_timestamptz(c0 timestamp with time zone); 17 | create table t_ora_datatype_number(c0 decimal(10,2)); 18 | create table t_ora_datatype_float(c0 double precision); 19 | create table t (rowid serial8 not null, c int); 20 | insert into t (c) values (1); 21 | insert into t (c) values (2); 22 | insert into t (c) values (3); 23 | insert into t (c) values (4); 24 | insert into t (c) values (5); 25 | select rowid,c from t order by c; 26 | 27 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/dual_function13.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.索引测试 3 | --set_cluster_id(50) 4 | --kl_cluster_id(1) 5 | 6 | --pg_connect(testrbr12,computer.1, abc, abc) 7 | create table t_ora_fun_analyze(id varchar(100)primary key, score decimal(10,2)); 8 | insert into t_ora_fun_analyze(id,score) values(1,100); 9 | insert into t_ora_fun_analyze(id,score) values (2,200); 10 | insert into t_ora_fun_analyze(id,score) values (3,300); 11 | select id,rank() over(partition by 1 order by id) as rk,sum(score) over(partition by 1 order by id) as sum,lag(id) over(partition by 1 order by id) as la from t_ora_fun_analyze; 12 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/index_test8.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.索引测试 3 | --set_cluster_id(50) 4 | --kl_cluster_id(1) 5 | 6 | --pg_connect(testrbr8,computer.1, abc, abc) 7 | create table inttable( id int primary key, value varchar(30),dept varchar(30) ); 8 | create index testindex1 on inttable(value); 9 | create unique index uniqueindex2 on inttable(dept); 10 | create table tbtest(id int, name varchar(20)); 11 | #alter table tbtest add ( constraint id_pk primary key (id) ); 12 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/index_test9.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.索引测试 3 | --set_cluster_id(50) 4 | --kl_cluster_id(1) 5 | 6 | --pg_connect(testrbr9,computer.1, abc, abc) 7 | #CREATE TABLE t1(col1 char(10), col2 char(10), key index((lower(col1)))); 8 | CREATE TABLE t1(col1 char(10), col2 char(10)); 9 | #ALTER TABLE t1 ADD INDEX idx1((lower(col1))); 10 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/mode_management_test1.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.create cluster 3 | #2.cycle query status 4 | #3.check computer node write and read 5 | #4.check shard master and slave 6 | 7 | --http_connect(cluster_mgr_http1, cluster_mgr,50000) 8 | 9 | --http 10 | request_type: POST 11 | header:Content-Type:application/json 12 | body:{ 13 | "version":"1.0", 14 | "job_id":"", 15 | "job_type":"create_cluster", 16 | "user_name":"kunlun_test", 17 | "timestamp":"202205131532", 18 | "paras":{ 19 | "nick_name":"rbrcluster001", 20 | "ha_mode":"rbr", 21 | "shards":"1", 22 | "nodes":"3", 23 | "comps":"1", 24 | "max_storage_size":"20", 25 | "max_connections":"6", 26 | "cpu_cores":"8", 27 | "innodb_size":"1", 28 | "dbcfg":"1", 29 | "fullsync_level": "1", 30 | "storage_iplists": [ 31 | "${node_mgr.1}" 32 | ], 33 | "computer_iplists": [ 34 | "${node_mgr.1}" 35 | ] 36 | } 37 | } 38 | EOF 39 | 40 | --let $job_id = `http_reap(job_id)` 41 | --sleep 20 42 | --source kunlun-test/include/wait_http_request_finish.inc 43 | 44 | #check shard master and slave 45 | --kl_cluster_id(1) 46 | --source kunlun-test/include/check_shard_state.inc 47 | 48 | #check cn 49 | --pg_connect(testrbr1,computer.1, abc, abc) 50 | --source kunlun-test/include/check_cn_write_read.inc 51 | create database testdb; 52 | CREATE DATABASE test1 WITH encoding='UTF8' lc_collate='en_US.utf8'; 53 | drop database if exists testdb; 54 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/mvcc_test10.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.索引测试 3 | --set_cluster_id(50) 4 | --kl_cluster_id(1) 5 | 6 | --pg_connect(testrbr10,computer.1, abc, abc) 7 | INSERT INTO inttable (id, value) VALUES ( 1, 43 ); 8 | begin; 9 | update inttable set value=10 where id=1; 10 | 11 | 12 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/partition_tabele14.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.索引测试 3 | --set_cluster_id(50) 4 | --kl_cluster_id(1) 5 | 6 | --pg_connect(testrbr14,computer.1, abc, abc) 7 | create table t_ora_par_hash(id INT ,name varchar(100)) partition by hash(id); 8 | CREATE TABLE employees (id INT NOT NULL,hired DATE NOT NULL DEFAULT '1970-01-01',store_id INT) PARTITION BY LIST (store_id); 9 | CREATE TABLE employees_pNorth PARTITION OF employees FOR VALUES in (1, 2, 3, 4, 5); 10 | CREATE TABLE employees_pEast PARTITION OF employees FOR VALUES in (6,7,8,9,10); 11 | CREATE TABLE employees_pWest PARTITION OF employees FOR VALUES in (11, 12, 13, 14, 15); 12 | CREATE TABLE employees_Central PARTITION OF employees FOR VALUES in (16, 17, 18, 19, 20); 13 | CREATE TABLE employees_1 (id INT NOT NULL,fname VARCHAR(30),lname VARCHAR(30),hired DATE NOT NULL DEFAULT '1970-01-01',separated DATE DEFAULT '9999-12-31',job_code INT,store_id INT,city VARCHAR(15))PARTITION BY LIST (city); 14 | CREATE TABLE pRegion_1 PARTITION OF employees_1 FOR VALUES IN ('LosAngeles', 'Seattle', 'Houston'); 15 | CREATE TABLE pRegion_2 PARTITION OF employees_1 FOR VALUES IN ('Chicago', 'Columbus', 'Boston'); 16 | CREATE TABLE pRegion_3 PARTITION OF employees_1 FOR VALUES IN ('NewYork', 'LongIsland', 'Baltimore'); 17 | CREATE TABLE pRegion_4 PARTITION OF employees_1 FOR VALUES IN ('Atlanta', 'Raleigh', 'Cincinnati'); 18 | drop table if exists employees; 19 | CREATE TABLE employees (id INT NOT NULL,fname VARCHAR(30),lname VARCHAR(30),hired DATE NOT NULL DEFAULT '1970-01-01',separated DATE DEFAULT '9999-12-31',job_code INT,store_id INT NOT NULL)PARTITION BY RANGE (store_id); 20 | #CREATE TABLE `t1` ( `a` char(10) NOT NULL, `b` int) PARTITION BY RANGE COLUMNS(`a`)(PARTITION p0 VALUES LESS THAN ('a'),PARTITION p1 VALUES LESS THAN ('b'), partition p2 values less than maxvalue); 21 | # 22 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/pl_sql15.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.PL/SQL 测试 3 | --set_cluster_id(50) 4 | --kl_cluster_id(1) 5 | 6 | --pg_connect(testrbr15,computer.1, abc, abc) 7 | drop table if exists t; 8 | CREATE TABLE t(id int PRIMARY KEY AUTO_INCREMENT, c int); 9 | INSERT INTO t(c) VALUES (1); 10 | INSERT INTO t(c) VALUES (2); 11 | INSERT INTO t(c) VALUES (3), (4), (5); 12 | SELECT * FROM t; 13 | #CREATE TABLE t(a int AUTO_INCREMENT key) AUTO_ID_CACHE 100; 14 | set session auto_increment_increment=65536; 15 | INSERT INTO t(c) VALUES (1); 16 | INSERT INTO t(c) VALUES (2); 17 | INSERT INTO t(c) VALUES (3), (4), (5); 18 | SELECT * FROM t; 19 | set session auto_increment_increment=1; 20 | create sequence seq_test increment 1 minvalue 1 maxvalue 10 start 1 cache 1 cycle; 21 | select nextval('seq_test'); 22 | select nextval('seq_test'); 23 | select nextval('seq_test'); 24 | select nextval('seq_test'); 25 | select nextval('seq_test'); 26 | with recursive cte(a) as (select 1 union select a+1 from cte where a < 5) select * from cte; 27 | 28 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/sql92_test11.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.索引测试 3 | --set_cluster_id(50) 4 | --kl_cluster_id(1) 5 | 6 | --pg_connect(testrbr11,computer.1, abc, abc) 7 | drop table if exists inttable; 8 | drop table if exists t1; 9 | create table inttable( id int, value int ); 10 | alter table inttable add primary key(id); 11 | INSERT INTO inttable (id, value) VALUES ( 1, 43 ); 12 | SELECT * FROM inttable; 13 | SELECT value FROM inttable WHERE id =1 FOR UPDATE; 14 | DELETE FROM inttable WHERE id=1; 15 | SELECT * from (SELECT id , value FROM inttable WHERE id = 2) as t; 16 | SELECT * FROM inttable ORDER BY value; 17 | INSERT INTO inttable (id, value) VALUES ( 1, 43 ); 18 | #Create table t1 like inttable; 19 | #Create table t2 as (SELECT * FROM inttable); 20 | Create table t1 (like inttable including all); 21 | insert into t1 select * from inttable; 22 | Create table t2 (like inttable including all); 23 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/table_structure16.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.PL/SQL 测试 3 | --set_cluster_id(50) 4 | --kl_cluster_id(1) 5 | 6 | --pg_connect(testrbr16,computer.1, abc, abc) 7 | drop table if exists t1; 8 | create table test1(id varchar(100)primary key, score decimal(10,2),gender character varying(50),name character varying(100),age character varying(10)); 9 | COMMENT ON TABLE test1 IS '测试用户表'; 10 | drop index if exists user_name; 11 | CREATE INDEX user_name ON test1 (name); 12 | GRANT ALL ON TABLE test1 TO abc; 13 | 14 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/table_structure17.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.PL/SQL 测试 3 | --set_cluster_id(50) 4 | --kl_cluster_id(1) 5 | 6 | --pg_connect(testrbr17,computer.1, abc, abc) 7 | select partitioning_type,subpartitioning_type,count(*) from dba_part_tables where owner='mine' group by partitioning_type,subpartitioning_type order by partitioning_type,subpartitioning_type; 8 | select p.table_owner, p.table_name, p.high_value, max(p.partition_position) over (partition by p.table_owner, p.table_name) as p, partition_position from dba_tab_partitions p, dba_part_key_columns c, dba_tab_columns cc where table_owner = 'mine' and c.owner = p.table_owner and c.name = p.table_name and cc.owner = c.owner and cc.table_name = c.name and cc.column_name = c.column_name and p.partitioning_type='HASH' and cc.data_type like '%VARCHAR%'; 9 | select p.table_owner, p.table_name, p.high_value, max(p.partition_position) over (partition by p.table_owner, p.table_name) as p, partition_position from dba_tab_partitions p, dba_part_key_columns c, dba_tab_columns cc where table_owner = 'mine' and c.owner = p.table_owner and c.name = p.table_name and cc.owner = c.owner and cc.table_name = c.name and cc.column_name = c.column_name and p.partitioning_type='RANGE' and cc.data_type like '%DATE%'; 10 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/tableobject_management_test2.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.表对象测试 3 | --set_cluster_id(46) 4 | --kl_cluster_id(1) 5 | 6 | --pg_connect(testrbr2,computer.1, abc, abc) 7 | drop database if exists testdb; 8 | create database testdb; 9 | create table t1 (id int not null primary key auto_increment, col1 int); 10 | insert into t1 (col1) values (1),(2),(3),(4),(5); 11 | alter table t1 alter column col1 type bigint; 12 | alter table t1 add name varchar(20); 13 | alter table t1 drop column name; 14 | alter table t1 add sale_time date; 15 | alter table t1 alter column sale_time set default current_timestamp; 16 | drop table if exists t1; 17 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/usergrant_management_test4.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.用户权限测试 3 | --set_cluster_id(46) 4 | --kl_cluster_id(1) 5 | 6 | --pg_connect(testrbr4,computer.1, abc, abc) 7 | create table t1 (id int not null primary key auto_increment, col1 int); 8 | insert into t1 (col1) values (1),(2),(3),(4),(5); 9 | create user test1 with password '123456'; 10 | alter user test1 with password 'abc123'; 11 | grant all ON t1 to test1; 12 | revoke all ON t1 from test1; 13 | drop user test1; 14 | drop table t1; 15 | -------------------------------------------------------------------------------- /test/hellentest/testcase_reference_tidb/testcase/viewobject_management_test3.test: -------------------------------------------------------------------------------- 1 | #Use case description: 2 | #1.视图对象测试 3 | --set_cluster_id(46) 4 | --kl_cluster_id(1) 5 | 6 | --pg_connect(testrbr3,computer.1, abc, abc) 7 | create table t_test (id int not null primary key auto_increment, col1 int); 8 | create view v_test as select * from t_test; 9 | create or replace view v_test as select * from t_test where 1=0; 10 | drop view v_test; 11 | drop table t_test; 12 | -------------------------------------------------------------------------------- /test/include/wait_http_request_finish.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Include this script to wait until the http connect to check 3 | # request finished, status=done || status=failed 4 | # http_req_timeout need be setted 5 | 6 | --disable_result_log 7 | --disable_query_log 8 | 9 | if(!$http_req_timeout) 10 | { 11 | let $http_req_timeout = 500; 12 | } 13 | 14 | --let $job_id = `http_reap(job_id)` 15 | 16 | --http 17 | request_type: POST 18 | header:Content-Type:application/json 19 | body:{ 20 | "version": "1.0", 21 | "job_id":"${job_id}", 22 | "job_type": "get_status", 23 | "timestamp" : "1435749309", 24 | "paras": {} 25 | } 26 | EOF 27 | 28 | --let $status = `http_reap(status)` 29 | 30 | while ($status == 'ongoing' && $http_req_timeout > 0) 31 | { 32 | --http 33 | request_type: POST 34 | header:Content-Type:application/json 35 | body:{ 36 | "version": "1.0", 37 | "job_id":"${job_id}", 38 | "job_type": "get_status", 39 | "timestamp" : "1435749309", 40 | "paras": {} 41 | } 42 | EOF 43 | 44 | --let $status = `http_reap(status)` 45 | --sleep 10 46 | 47 | let $http_req_timeout = $http_req_timeout - 10; 48 | } 49 | 50 | --enable_query_log 51 | --enable_result_log -------------------------------------------------------------------------------- /test/r/action_1st.result: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS t1; 2 | SET @@sql_mode='NO_ENGINE_SUBSTITUTION'; 3 | SET SQL_WARNINGS=1; 4 | CREATE TABLE t1 (a INT); 5 | INSERT INTO t1 VALUES (1); 6 | INSERT INTO t1 VALUES (2); 7 | DROP TABLE t1; 8 | -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/README: -------------------------------------------------------------------------------- 1 | cluster_manager test inc files 2 | -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/add_computer_assign_computer.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # add computer based on input params 4 | # need input params: 5 | # cluster_id, comps, computer_iplists 6 | 7 | --http 8 | request_type: POST 9 | header:Content-Type:application/json 10 | body:{ 11 | "version": "1.0", 12 | "job_id":"", 13 | "job_type": "add_comps", 14 | "timestamp" : "1435749309", 15 | "user_name":"kunlun_test", 16 | "paras":{ 17 | "cluster_id":"${cluster_id}", 18 | "comps":"${comps}", 19 | "computer_iplists" : [ 20 | ${computer_iplists} 21 | ] 22 | } 23 | } 24 | EOF 25 | 26 | --source kunlun-test/suite/cluster_manager/include/get_http_request_result.inc 27 | 28 | --connection default 29 | --let $comp_nums = `SELECT COUNT(*) FROM comp_nodes WHERE db_cluster_id=$cluster_id` 30 | --let c_inc=1 31 | while($comp_nums > 0) 32 | { 33 | let $cconn_name=cc_pg_conn.$c_inc; 34 | let $comp = computer.$comp_nums; 35 | --pg_connect($cconn_name, $comp, $comp_user, $comp_pwd) 36 | --source kunlun-test/suite/cluster_manager/include/check_computer_state.inc 37 | dec $comp_nums; 38 | inc $c_inc; 39 | --disconnect $cconn_name 40 | } -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/add_computer_without_assign.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # add computer based on input params 4 | # need input params: 5 | # cluster_id, comps 6 | 7 | --http 8 | request_type: POST 9 | header:Content-Type:application/json 10 | body:{ 11 | "version": "1.0", 12 | "job_id":"", 13 | "job_type": "add_comps", 14 | "timestamp" : "1435749309", 15 | "user_name":"kunlun_test", 16 | "paras":{ 17 | "cluster_id":"${cluster_id}", 18 | "comps":"${comps}" 19 | } 20 | } 21 | EOF 22 | 23 | --source kunlun-test/suite/cluster_manager/include/get_http_request_result.inc 24 | 25 | --connection default 26 | --let $comp_nums = `SELECT COUNT(*) FROM comp_nodes WHERE db_cluster_id=$cluster_id` 27 | 28 | --let c_inc=1 29 | while($comp_nums > 0) 30 | { 31 | let $cconn_name=cc_pg_conn.$c_inc; 32 | let $comp = computer.$comp_nums; 33 | --pg_connect($cconn_name, $comp, $comp_user, $comp_pwd) 34 | --source kunlun-test/suite/cluster_manager/include/check_computer_state.inc 35 | dec $comp_nums; 36 | inc $c_inc; 37 | --disconnect $cconn_name 38 | } -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/add_node_assign_storage.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # add node based on input params 4 | # need input params: 5 | # cluster_id, shard_id, nodes, storage_iplists 6 | 7 | --http 8 | request_type: POST 9 | header:Content-Type:application/json 10 | body:{ 11 | "version": "1.0", 12 | "job_id":"", 13 | "job_type": "add_nodes", 14 | "timestamp" : "1435749309", 15 | "user_name":"kunlun_test", 16 | "paras":{ 17 | "cluster_id":"${cluster_id}", 18 | "shard_id":"${shard_id}", 19 | "nodes":"${nodes}", 20 | "storage_iplists":[ 21 | ${storage_iplists} 22 | ] 23 | } 24 | } 25 | EOF 26 | 27 | --source kunlun-test/suite/cluster_manager/include/get_http_request_result.inc 28 | 29 | --let $shard_nums = `SELECT COUNT(*) FROM shards WHERE db_cluster_id=$cluster_id` 30 | 31 | while($shard_nums > 0) 32 | { 33 | --let $shard_name=shardname.$shard_nums 34 | --let $shardid=shard.$shard_nums 35 | --source kunlun-test/suite/cluster_manager/include/check_rbr_shard_state.inc 36 | dec $shard_nums; 37 | } 38 | -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/add_node_without_assign.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # add node based on input params 4 | # need input params: 5 | # cluster_id, shard_id, nodes 6 | 7 | --http 8 | request_type: POST 9 | header:Content-Type:application/json 10 | body:{ 11 | "version": "1.0", 12 | "job_id":"", 13 | "job_type": "add_nodes", 14 | "timestamp" : "1435749309", 15 | "user_name":"kunlun_test", 16 | "paras":{ 17 | "cluster_id":"${cluster_id}", 18 | "shard_id":"${shard_id}", 19 | "nodes":"${nodes}" 20 | } 21 | } 22 | EOF 23 | 24 | --source kunlun-test/suite/cluster_manager/include/get_http_request_result.inc 25 | 26 | --let $shard_nums = `SELECT COUNT(*) FROM shards WHERE db_cluster_id=$cluster_id` 27 | 28 | while($shard_nums > 0) 29 | { 30 | --let $shard_name=shardname.$shard_nums 31 | --let $shardid=shard.$shard_nums 32 | --source kunlun-test/suite/cluster_manager/include/check_rbr_shard_state.inc 33 | dec $shard_nums; 34 | } -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/add_shard_assign_storage.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # add shard based on input params 4 | # need input params: 5 | # cluster_id, shards, nodes, storage_iplists 6 | 7 | --http 8 | request_type: POST 9 | header:Content-Type:application/json 10 | body:{ 11 | "version": "1.0", 12 | "job_id":"", 13 | "job_type": "add_shards", 14 | "timestamp" : "1435749309", 15 | "user_name":"kunlun_test", 16 | "paras":{ 17 | "cluster_id":"${cluster_id}", 18 | "shards":"${shards}", 19 | "nodes":"${nodes}", 20 | "storage_iplists":[ 21 | ${storage_iplists} 22 | ] 23 | } 24 | } 25 | EOF 26 | 27 | --source kunlun-test/suite/cluster_manager/include/get_http_request_result.inc 28 | 29 | --connection default 30 | --let $shard_nums = `SELECT COUNT(*) FROM shards WHERE db_cluster_id=$cluster_id` 31 | 32 | while($shard_nums > 0) 33 | { 34 | --let $shard_name=shardname.$shard_nums 35 | --let $shardid=shard.$shard_nums 36 | --let $shard_id=query_get_value(SELECT id FROM shards WHERE db_cluster_id=$cluster_id, id, $shard_nums) 37 | 38 | --source kunlun-test/suite/cluster_manager/include/check_rbr_shard_state.inc 39 | --dec $shard_nums 40 | } -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/add_shard_without_assign.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # add computer based on input params 4 | # need input params: 5 | # cluster_id, shards, nodes 6 | 7 | --http 8 | request_type: POST 9 | header:Content-Type:application/json 10 | body:{ 11 | "version": "1.0", 12 | "job_id":"", 13 | "job_type": "add_shards", 14 | "timestamp" : "1435749309", 15 | "user_name":"kunlun_test", 16 | "paras":{ 17 | "cluster_id":"${cluster_id}", 18 | "shards":"${shards}", 19 | "nodes" : "${nodes}" 20 | } 21 | } 22 | EOF 23 | 24 | --source kunlun-test/suite/cluster_manager/include/get_http_request_result.inc 25 | 26 | --connection default 27 | --let $shard_nums = `SELECT COUNT(*) FROM shards WHERE db_cluster_id=$cluster_id` 28 | 29 | while($shard_nums > 0) 30 | { 31 | --let $shard_name=$shard_conn_name.$shard_nums 32 | --let $shardid=shard.$shard_nums 33 | --source kunlun-test/suite/cluster_manager/include/check_rbr_shard_state.inc 34 | --dec $shard_nums 35 | } -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/check_computer_state.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # check pg state 4 | # 5 | 6 | SET client_min_messages TO 'warning'; 7 | drop table if exists t1111; 8 | RESET client_min_messages; 9 | create table t1111(id int primary key, info text, wt int); 10 | insert into t1111(id,info,wt) values(1, 'record1', 1); 11 | insert into t1111(id,info,wt) values(2, 'record2', 2); 12 | update t1111 set wt = 12 where id = 1; 13 | select * from t1111; 14 | delete from t1111 where id = 1; 15 | select * from t1111; 16 | prepare q1(int) as select*from t1111 where id=$1; 17 | begin; 18 | execute q1(1); 19 | execute q1(2); 20 | prepare q2(text,int, int) as update t1111 set info=$1 , wt=$2 where id=$3; 21 | execute q2('Rec1',2,1); 22 | commit; 23 | execute q2('Rec2',3,2); 24 | drop table t1111; -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/check_delete_cluster_state.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # 1. check shards, shard_nodes 4 | # 2. check comp_nodes 5 | # 3. check metadb commit_log_xxx and ddl_ops_log_xxxx 6 | # 7 | 8 | --connection default 9 | 10 | --let $sds_num=query_get_value(SELECT COUNT(*) FROM shards WHERE db_cluster_id=$cluster_id, COUNT(*), 1) 11 | if($sds_num != 0) 12 | { 13 | --echo shards is not empty 14 | --exit 15 | } 16 | 17 | --let $sn_num=query_get_value(SELECT COUNT(*) FROM shard_nodes WHERE db_cluster_id=$cluster_id, COUNT(*), 1) 18 | if($sn_num != 0) 19 | { 20 | --echo shard_nodes is not empty 21 | --exit 22 | } 23 | 24 | --let $dc_num=query_get_value(SELECT COUNT(*) FROM db_clusters WHERE id=$cluster_id, COUNT(*), 1) 25 | if($dc_num != 0) 26 | { 27 | --echo db_clusters is not empty 28 | --exit 29 | } 30 | 31 | --let $comps_num=query_get_value(SELECT COUNT(*) FROM comp_nodes WHERE db_cluster_id=$cluster_id, COUNT(*), 1) 32 | if($comps_num) 33 | { 34 | --echo comp_nodes is not empty 35 | --exit 36 | } 37 | 38 | --let $commit_name='commit_log_$cluster_name' 39 | --let $ddlops_name='ddl_ops_log_$cluster_name' 40 | --let $commit_exist=query_get_value(SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME=$commit_name, COUNT(*), 1) 41 | if($commit_exist != 0) 42 | { 43 | --echo commit_log is not deleted, please check 44 | --exit 45 | } 46 | 47 | --let $ddlops_exist=query_get_value(SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME=$ddlops_name, COUNT(*), 1) 48 | if($ddlops_exist != 0) 49 | { 50 | --echo ddl_ops_log is not deleted, please check 51 | --exit 52 | } 53 | 54 | --echo ==== delete cluster ok ==== -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/check_delete_computer_state.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # 1. check comp_nodes 4 | # input params 5 | # cluster_id, comp_id 6 | # 7 | 8 | --connection default 9 | 10 | --let $comps_num=query_get_value(SELECT COUNT(*) FROM comp_nodes WHERE id=$comp_id AND db_cluster_id=$cluster_id, COUNT(*), 1) 11 | if($comps_num != 0) 12 | { 13 | --echo comp_id $comp_id is not deleted from comp_nodes 14 | --exit 15 | } -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/check_delete_node_state.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # 1. check shard_nodes 4 | # input parameters 5 | # hostaddr, port 6 | # 7 | 8 | --connection default 9 | 10 | --let $wrapp_hostaddr=kl_str_concat(', $hostaddr, ') 11 | --let $sn_num=query_get_value(SELECT COUNT(*) FROM shard_nodes WHERE hostaddr=$wrapp_hostaddr AND port=$port, COUNT(*), 1) 12 | if($sn_num != 0) 13 | { 14 | --echo $hostaddr still is in shard_nodes 15 | --exit 16 | } -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/check_delete_shard_state.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # 1. check shards, shard_nodes 4 | # input parameters 5 | # cluster_id, shard_id 6 | # 7 | 8 | --connection default 9 | 10 | --let $sds_num=query_get_value(SELECT COUNT(*) FROM shards WHERE id=$shard_id AND db_cluster_id=$cluster_id, COUNT(*), 1) 11 | if($sds_num != 0) 12 | { 13 | --echo shards is not empty 14 | --exit 15 | } 16 | 17 | --let $sn_num=query_get_value(SELECT COUNT(*) FROM shard_nodes WHERE shard_id=$shard_id AND db_cluster_id=$cluster_id, COUNT(*), 1) 18 | if($sn_num != 0) 19 | { 20 | --echo shard_nodes is not empty 21 | --exit 22 | } -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/create_cluster_assign_computer.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # create cluster based on input params 4 | # need input params: 5 | # nick_name, ha_mode, shards, nodes, comps, fullsync_level, dbcfg, computer_iplists 6 | # 7 | 8 | if(!$fullsync_level) 9 | { 10 | --let $fullsync_level = 1 11 | } 12 | 13 | if(!$dbcfg) 14 | { 15 | --let $dbcfg = 1 16 | } 17 | 18 | --http 19 | request_type: POST 20 | header:Content-Type:application/json 21 | body:{ 22 | "version":"1.0", 23 | "job_id":"", 24 | "job_type":"create_cluster", 25 | "user_name":"kunlun_test", 26 | "timestamp":"202205131532", 27 | "paras":{ 28 | "nick_name":"${nick_name}", 29 | "ha_mode":"${ha_mode}", 30 | "shards":"${shards}", 31 | "nodes":"${nodes}", 32 | "comps":"${comps}", 33 | "max_storage_size":"20", 34 | "max_connections":"6", 35 | "cpu_cores":"8", 36 | "innodb_size":"1", 37 | "dbcfg":"${dbcfg}", 38 | "fullsync_level":"${fullsync_level}", 39 | "computer_iplists":[ 40 | ${computer_iplists} 41 | ] 42 | } 43 | } 44 | EOF 45 | 46 | --source kunlun-test/suite/cluster_manager/include/get_http_request_result.inc 47 | 48 | --let $cluster_id = `http_reap(attachment.cluster_id)` 49 | --let $cluster_name = `http_reap(attachment.cluster_name)` 50 | --let $comp_user = `http_reap(attachment.computer_user)` 51 | --let $comp_pwd = `http_reap(attachment.computer_pwd)` 52 | 53 | --source kunlun-test/suite/cluster_manager/include/check_create_cluster_state.inc -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/create_cluster_assign_storage.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # create cluster based on input params 4 | # need input params: 5 | # nick_name, ha_mode, shards, nodes, comps, fullsync_level, dbcfg, storage_iplists 6 | # 7 | 8 | if(!$fullsync_level) 9 | { 10 | --let $fullsync_level = 1 11 | } 12 | 13 | if(!$dbcfg) 14 | { 15 | --let $dbcfg = 1 16 | } 17 | 18 | --http 19 | request_type: POST 20 | header:Content-Type:application/json 21 | body:{ 22 | "version":"1.0", 23 | "job_id":"", 24 | "job_type":"create_cluster", 25 | "user_name":"kunlun_test", 26 | "timestamp":"202205131532", 27 | "paras":{ 28 | "nick_name":"${nick_name}", 29 | "ha_mode":"${ha_mode}", 30 | "shards":"${shards}", 31 | "nodes":"${nodes}", 32 | "comps":"${comps}", 33 | "max_storage_size":"20", 34 | "max_connections":"6", 35 | "cpu_cores":"8", 36 | "innodb_size":"1", 37 | "dbcfg":"${dbcfg}", 38 | "fullsync_level":"${fullsync_level}", 39 | "storage_iplists":[ 40 | ${storage_iplists} 41 | ] 42 | } 43 | } 44 | EOF 45 | 46 | 47 | --source kunlun-test/suite/cluster_manager/include/get_http_request_result.inc 48 | 49 | --let $cluster_id = `http_reap(attachment.cluster_id)` 50 | --let $cluster_name = `http_reap(attachment.cluster_name)` 51 | --let $comp_user = `http_reap(attachment.computer_user)` 52 | --let $comp_pwd = `http_reap(attachment.computer_pwd)` 53 | 54 | --source kunlun-test/suite/cluster_manager/include/check_create_cluster_state.inc -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/create_cluster_assign_storage_computer.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # create cluster based on input params 4 | # need input params: 5 | # nick_name, ha_mode, shards, nodes, comps, fullsync_level, dbcfg, storage_iplists, computer_iplists 6 | 7 | if(!$fullsync_level) 8 | { 9 | --let $fullsync_level = 1 10 | } 11 | 12 | if(!$dbcfg) 13 | { 14 | --let $dbcfg = 1 15 | } 16 | 17 | --http 18 | request_type: POST 19 | header:Content-Type:application/json 20 | body:{ 21 | "version":"1.0", 22 | "job_id":"", 23 | "job_type":"create_cluster", 24 | "user_name":"kunlun_test", 25 | "timestamp":"202205131532", 26 | "paras":{ 27 | "nick_name":"${nick_name}", 28 | "ha_mode":"${ha_mode}", 29 | "shards":"${shards}", 30 | "nodes":"${nodes}", 31 | "comps":"${comps}", 32 | "max_storage_size":"20", 33 | "max_connections":"6", 34 | "cpu_cores":"8", 35 | "innodb_size":"1", 36 | "dbcfg":"${dbcfg}", 37 | "fullsync_level":"${fullsync_level}", 38 | "storage_iplists":[ 39 | ${storage_iplists} 40 | ], 41 | "computer_iplists":[ 42 | ${computer_iplists} 43 | ] 44 | } 45 | } 46 | EOF 47 | 48 | 49 | --source kunlun-test/suite/cluster_manager/include/get_http_request_result.inc 50 | 51 | --let $cluster_id = `http_reap(attachment.cluster_id)` 52 | --let $cluster_name = `http_reap(attachment.cluster_name)` 53 | --let $comp_user = `http_reap(attachment.computer_user)` 54 | --let $comp_pwd = `http_reap(attachment.computer_pwd)` 55 | 56 | --source kunlun-test/suite/cluster_manager/include/check_create_cluster_state.inc -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/create_cluster_without_assign.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # create cluster based on input params 4 | # need input params: 5 | # nick_name, ha_mode, shards, nodes, comps, fullsync_level, dbcfg 6 | # 7 | 8 | if(!$fullsync_level) 9 | { 10 | --let $fullsync_level = 1 11 | } 12 | 13 | if(!$dbcfg) 14 | { 15 | --let $dbcfg = 1 16 | } 17 | 18 | --http 19 | request_type: POST 20 | header:Content-Type:application/json 21 | body:{ 22 | "version":"1.0", 23 | "job_id":"", 24 | "job_type":"create_cluster", 25 | "user_name":"kunlun_test", 26 | "timestamp":"202205131532", 27 | "paras":{ 28 | "nick_name":"${nick_name}", 29 | "ha_mode":"${ha_mode}", 30 | "shards":"${shards}", 31 | "nodes":"${nodes}", 32 | "comps":"${comps}", 33 | "max_storage_size":"20", 34 | "max_connections":"6", 35 | "cpu_cores":"8", 36 | "innodb_size":"1", 37 | "dbcfg":"${dbcfg}" 38 | } 39 | } 40 | EOF 41 | 42 | 43 | --source kunlun-test/suite/cluster_manager/include/get_http_request_result.inc 44 | 45 | --let $cluster_id = `http_reap(attachment.cluster_id)` 46 | --let $cluster_name = `http_reap(attachment.cluster_name)` 47 | --let $comp_user = `http_reap(attachment.computer_user)` 48 | --let $comp_pwd = `http_reap(attachment.computer_pwd)` 49 | 50 | --source kunlun-test/suite/cluster_manager/include/check_create_cluster_state.inc -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/delete_cluster_input_params.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # delete cluster based on input params 4 | # need input params: 5 | # cluster_id 6 | # 7 | 8 | --http 9 | request_type: POST 10 | header:Content-Type:application/json 11 | body:{ 12 | "version": "1.0", 13 | "job_id":"", 14 | "job_type": "delete_cluster", 15 | "timestamp" : "1435749309", 16 | "user_name":"kunlun_test", 17 | "paras":{ 18 | "cluster_id":"${cluster_id}" 19 | } 20 | } 21 | EOF 22 | 23 | --source kunlun-test/suite/cluster_manager/include/get_http_request_result.inc 24 | 25 | --let $cluster_name=`http_reap(attachment.cluster_name)` 26 | 27 | --source kunlun-test/suite/cluster_manager/include/check_delete_cluster_state.inc -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/delete_computer_input_params.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # delete computer based on input params 4 | # need input params: 5 | # cluster_id, comp_id 6 | # 7 | 8 | --http 9 | request_type: POST 10 | header:Content-Type:application/json 11 | body:{ 12 | "version": "1.0", 13 | "job_id":"", 14 | "job_type": "delete_comp", 15 | "timestamp" : "1435749309", 16 | "user_name":"kunlun_test", 17 | "paras":{ 18 | "cluster_id":"${cluster_id}", 19 | "comp_id":"${comp_id}" 20 | } 21 | } 22 | EOF 23 | 24 | --source kunlun-test/suite/cluster_manager/include/get_http_request_result.inc 25 | --sleep 2 26 | --source kunlun-test/suite/cluster_manager/include/check_delete_computer_state.inc -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/delete_node_input_params.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # delete node based on input params 4 | # need input params: 5 | # cluster_id, shard_id, hostaddr, port 6 | # 7 | 8 | --http 9 | request_type: POST 10 | header:Content-Type:application/json 11 | body:{ 12 | "version": "1.0", 13 | "job_id":"", 14 | "job_type": "delete_shard", 15 | "timestamp" : "1435749309", 16 | "user_name":"kunlun_test", 17 | "paras":{ 18 | "cluster_id":"${cluster_id}", 19 | "shard_id":"${shard_id}", 20 | "hostaddr":"${hostaddr}", 21 | "port":"${port}" 22 | } 23 | } 24 | EOF 25 | 26 | --source kunlun-test/suite/cluster_manager/include/get_http_request_result.inc 27 | --sleep 2 28 | --source kunlun-test/suite/cluster_manager/include/check_delete_node_state.inc -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/delete_shard_input_params.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # delete shard based on input params 4 | # need input params: 5 | # cluster_id, shard_id 6 | # 7 | 8 | --http 9 | request_type: POST 10 | header:Content-Type:application/json 11 | body:{ 12 | "version": "1.0", 13 | "job_id":"", 14 | "job_type": "delete_shard", 15 | "timestamp" : "1435749309", 16 | "user_name":"kunlun_test", 17 | "paras":{ 18 | "cluster_id":"${cluster_id}", 19 | "shard_id":"${shard_id}" 20 | } 21 | } 22 | EOF 23 | 24 | --source kunlun-test/suite/cluster_manager/include/get_http_request_result.inc 25 | --sleep 2 26 | --source kunlun-test/suite/cluster_manager/include/check_delete_shard_state.inc -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/get_http_request_result.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # set http_req_timeout 4 | # send get_status command to cluster_mgr for getting command result 5 | # if error or timeout 6 | # exit 7 | # 8 | 9 | if(!$http_req_timeout) 10 | { 11 | --let $http_req_timeout = 500 12 | } 13 | 14 | --let $job_id = `http_reap(job_id)` 15 | 16 | --sleep 5 17 | 18 | --let $condition=1 19 | while ($condition == 1) 20 | { 21 | --http 22 | request_type: POST 23 | header:Content-Type:application/json 24 | body:{ 25 | "version": "1.0", 26 | "job_id":"${job_id}", 27 | "job_type": "get_status", 28 | "timestamp" : "1435749309", 29 | "paras": {} 30 | } 31 | EOF 32 | 33 | --let $status = `http_reap(status)` 34 | if ($status != ongoing) 35 | { 36 | --let $condition=0 37 | } 38 | 39 | --sleep 10 40 | #--echo status: $status 41 | --dec $http_req_timeout 42 | if($http_req_timeout <= 0) 43 | { 44 | --let $condition=0 45 | --exit 46 | } 47 | 48 | } 49 | -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/pre_install_cluster.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # create cluster without assigned storage and computer nodes 4 | # 5 | 6 | --http_connect($http_connection_name, cluster_mgr) 7 | 8 | --let $nick_name=$nickname 9 | --let $ha_mode=rbr 10 | --let $shards=1 11 | --let $nodes=3 12 | --let $comps=1 13 | --let $dbcfg=1 14 | 15 | --source kunlun-test/suite/cluster_manager/include/create_cluster_without_assign.inc 16 | 17 | --echo === create cluster ok === -------------------------------------------------------------------------------- /test/suite/cluster_manager/include/rebuild_node_input_params.inc: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # rebuild node based on input params 4 | # need input params: 5 | # cluster_id, shard_id, allow_pull_from_master, allow_replica_delay, 6 | # rb_nodes 7 | # {"hostaddr":"127.0.0.1","port":"57001","need_backup":"0","hdfs_host":"hdfs","pv_limit":"10" 8 | # },{"hostaddr":"127.0.0.2","port":"57001","need_backup":"0","hdfs_host":"hdfs","pv_limit":"10"} 9 | # 10 | 11 | --http 12 | request_type: POST 13 | header:Content-Type:application/json 14 | body:{ 15 | "version":"1.0", 16 | "job_id":"", 17 | "job_type":"rebuild_node", 18 | "user_name":"kunlun_test", 19 | "timestamp":"202205131532", 20 | "paras":{ 21 | "cluster_id":"${cluster_id}", 22 | "shard_id":"${shard_id}", 23 | "allow_pull_from_master":"${allow_pull_from_master}", 24 | "allow_replica_delay":"${allow_replica_delay}", 25 | "rb_nodes":[ 26 | ${rb_nodes} 27 | ] 28 | } 29 | } 30 | EOF 31 | 32 | 33 | --source kunlun-test/suite/cluster_manager/include/get_http_request_result.inc 34 | 35 | --source kunlun-test/suite/cluster_manager/include/check_create_cluster_state.inc 36 | -------------------------------------------------------------------------------- /test/suite/cluster_manager/r/README: -------------------------------------------------------------------------------- 1 | cluster manager test result files 2 | -------------------------------------------------------------------------------- /test/suite/cluster_manager/t/README: -------------------------------------------------------------------------------- 1 | cluster manager test files 2 | -------------------------------------------------------------------------------- /test/suite/cluster_manager/t/create_cluster_assign_storage_computer.test: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # create cluster without assigned storage and computer nodes 4 | # 5 | 6 | --let $storage_1 = query_get_value(SELECT hostaddr FROM server_nodes WHERE machine_type='storage' and node_stats='running', hostaddr, 1) 7 | --let $storage_2 = query_get_value(SELECT hostaddr FROM server_nodes WHERE machine_type='storage' and node_stats='running', hostaddr, 2) 8 | --let $computer_1 = query_get_value(SELECT hostaddr FROM server_nodes WHERE machine_type='computer' and node_stats='running', hostaddr, 1) 9 | 10 | --http_connect(create_cluster_cm_http, cluster_mgr) 11 | 12 | --let $nick_name=create_cluster_without_assign 13 | --let $ha_mode=rbr 14 | --let $shards=1 15 | --let $nodes=3 16 | --let $comps=1 17 | --let $dbcfg=1 18 | --let $storage_iplists="$storage_1","$storage_2" 19 | --let $computer_iplists="$computer_1" 20 | 21 | --source kunlun-test/suite/cluster_manager/include/create_cluster_assign_storage_computer.inc 22 | 23 | --connection create_cluster_cm_http 24 | --source kunlun-test/suite/cluster_manager/include/delete_cluster_input_params.inc -------------------------------------------------------------------------------- /test/suite/cluster_manager/t/create_cluster_without_assign.test: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # create cluster without assigned storage and computer nodes 4 | # 5 | 6 | --http_connect(create_cluster_cm_http, cluster_mgr) 7 | 8 | --let $nick_name=create_cluster_without_assign 9 | --let $ha_mode=rbr 10 | --let $shards=1 11 | --let $nodes=3 12 | --let $comps=1 13 | --let $dbcfg=1 14 | 15 | --source kunlun-test/suite/cluster_manager/include/create_cluster_without_assign.inc 16 | 17 | #--let $clusterid=$cluster_id 18 | 19 | #--source kunlun-test/suite/cluster_manager/include/delete_cluster_input_params.inc 20 | -------------------------------------------------------------------------------- /test/suite/cluster_manager/t/delete_cluster_by_cluster_id.test: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # delete cluster by input clusterid 4 | # 5 | 6 | --http_connect(delete_cluster_cm_http, cluster_mgr) 7 | 8 | --let $cluster_id=31 9 | 10 | --source kunlun-test/suite/cluster_manager/include/delete_cluster_input_params.inc -------------------------------------------------------------------------------- /test/suite/cluster_manager/t/loop_create_and_delete_comps.test: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # 1. create cluster 4 | # 2. add two comps and delete one comp rand 5 | # 3. check pg can read/write 6 | # 7 | 8 | --let $http_connection_name=loop_create_and_delete_comps 9 | --let $nickname=loop_create_and_delete_comps 10 | 11 | --source kunlun-test/suite/cluster_manager/include/pre_install_cluster.inc 12 | 13 | --let $comps=2 14 | --connection $http_connection_name 15 | --source kunlun-test/suite/cluster_manager/include/add_computer_without_assign.inc 16 | 17 | --connection default 18 | --let $rand_num=`SELECT FLOOR(1+RAND()*3)` 19 | --echo rand_num: $rand_num 20 | 21 | --let $comp_id=query_get_value(SELECT id FROM comp_nodes WHERE db_cluster_id=$cluster_id, id, $rand_num) 22 | --source kunlun-test/suite/cluster_manager/include/delete_computer_input_params.inc 23 | 24 | --connection default 25 | --let $comp_nums = `SELECT COUNT(*) FROM comp_nodes WHERE db_cluster_id=$cluster_id` 26 | --let c_inc=1 27 | while($comp_nums > 0) 28 | { 29 | --let $cconn_name=cc_pg_conn.$c_inc 30 | --let $comp = computer.$comp_nums 31 | --pg_connect($cconn_name, $comp, $comp_user, $comp_pwd) 32 | --source kunlun-test/suite/cluster_manager/include/check_computer_state.inc 33 | --dec $comp_nums 34 | --inc $c_inc 35 | --disconnect $cconn_name 36 | } 37 | 38 | --connection $http_connection_name 39 | --source kunlun-test/suite/cluster_manager/include/delete_cluster_input_params.inc -------------------------------------------------------------------------------- /test/suite/cluster_manager/t/loop_create_and_delete_shards.test: -------------------------------------------------------------------------------- 1 | # 2 | # Description: 3 | # 1. create cluster 4 | # 2. add two shards and delete one shard rand 5 | # 3. check pg can read/write 6 | # 7 | 8 | --let $http_connection_name=loop_create_and_delete_shards 9 | --let $nickname=loop_create_and_delete_shards 10 | 11 | --source kunlun-test/suite/cluster_manager/include/pre_install_cluster.inc 12 | 13 | --let $shards=1 14 | --let $nodes=3 15 | --let $add_num=2 16 | 17 | while($add_num > 0) 18 | { 19 | --connection $http_connection_name 20 | --let $shard_conn_name=add_shard_.$add_num 21 | --source kunlun-test/suite/cluster_manager/include/add_shard_without_assign.inc 22 | dec $add_num; 23 | } 24 | 25 | --connection default 26 | --let $rand_num=`SELECT FLOOR(1+RAND()*3)` 27 | --echo rand_num: $rand_num 28 | --let $shard_id=query_get_value(SELECT id FROM shards WHERE db_cluster_id=$cluster_id, id, $rand_num) 29 | 30 | --connection $http_connection_name 31 | --source kunlun-test/suite/cluster_manager/include/delete_shard_input_params.inc 32 | 33 | --connection default 34 | --let $comp_nums = `SELECT COUNT(*) FROM comp_nodes WHERE db_cluster_id=$cluster_id` 35 | --let c_inc=1 36 | while($comp_nums > 0) 37 | { 38 | --let $cconn_name=cc_pg_conn.$c_inc 39 | --let $comp = computer.$comp_nums 40 | --pg_connect($cconn_name, $comp, $comp_user, $comp_pwd) 41 | --source kunlun-test/suite/cluster_manager/include/check_computer_state.inc 42 | --dec $comp_nums 43 | --inc $c_inc 44 | --disconnect $cconn_name 45 | } 46 | 47 | --connection $http_connection_name 48 | --source kunlun-test/suite/cluster_manager/include/delete_cluster_input_params.inc -------------------------------------------------------------------------------- /test/suite/consfailover/README: -------------------------------------------------------------------------------- 1 | 测试程序说明: 2 | 模拟多个账号之间循环转账,在转账过程中,随机kill/stop 计算节点和存储主节点。运行一段时间后,停止转账,统计当前各个表中账号总额是否一致 3 | 如果shard主节点异常,cluster_mgr检测到后,20s(配置值)触发容灾切换。 4 | 5 | 准备工作: 6 | 1。购买测试集群,例如2个shard,4个计算节点 7 | 2。连接一个计算节点,建立50000个账号,建立50个range分区表,每个分区表范围为1000 8 | 3。每个账号存入1000 9 | 10 | 运行测试程序 11 | -------------------------------------------------------------------------------- /test/t/action_1st.test: -------------------------------------------------------------------------------- 1 | --disable_warnings 2 | DROP TABLE IF EXISTS t1; 3 | SET @@sql_mode='NO_ENGINE_SUBSTITUTION'; 4 | --enable_warnings 5 | 6 | SET SQL_WARNINGS=1; 7 | 8 | CREATE TABLE t1 (a INT); 9 | INSERT INTO t1 VALUES (1); 10 | INSERT INTO t1 VALUES (2); 11 | 12 | DROP TABLE t1; 13 | 14 | --http_connect(cluster_mgr_http1, cluster_mgr) 15 | 16 | --http 17 | request_type: POST 18 | header:Content-Type:application/json 19 | body:{ 20 | "version": "1.0", 21 | "job_id":"", 22 | "job_type": "create_cluster", 23 | "timestamp" : "1435749309", 24 | "user_name":"kunlun_test", 25 | "paras":{ 26 | "nick_name":"my_nick_name", 27 | "ha_mode":"rbr", 28 | "shards":"2", 29 | "nodes":"3", 30 | "comps":"1", 31 | "max_storage_size":"20", 32 | "max_connections":"6", 33 | "cpu_cores":"8", 34 | "innodb_size":"1", 35 | "machinelist": 36 | [ 37 | {"hostaddr":"${node_mgr.1}"}, 38 | {"hostaddr":"${node_mgr.2}"}, 39 | {"hostaddr":"${node_mgr.3}"} 40 | ] 41 | } 42 | } 43 | EOF 44 | 45 | --source kunlun-test/include/wait_http_request_finish.inc 46 | --------------------------------------------------------------------------------