├── .gitignore ├── README.md ├── archive ├── process_utf_es_result.py ├── rocksdb-overlap │ ├── ctc.yaml │ ├── i_generation.c │ ├── iochaos.yaml │ ├── oltp_common.lua │ └── sysbench-config ├── scan_table_kv.py ├── system-testing │ ├── README.md │ ├── SOURCEME.sh │ ├── dockerfiles │ │ └── compaction-filter-sysbench │ │ │ ├── Dockerfile │ │ │ ├── common.lua │ │ │ └── updates.lua │ ├── examples │ │ ├── tct.yaml │ │ └── trr.yaml │ ├── hack │ │ ├── bash_prompt.sh │ │ ├── lib.sh │ │ └── run_workload_sleep.sh │ ├── insecure_key │ ├── pipelines │ │ ├── pipeline--1pd1db3kv.yaml │ │ ├── pipeline--tpcc-prepare.yaml │ │ └── pipeline-systesting.yaml │ ├── rbac │ │ └── system_testing.yaml │ ├── tasks │ │ ├── task--lib--env-prepare.yaml │ │ ├── task--upgrade-test--env-ops.yaml │ │ ├── task--workload-compaction-filter-sysbench.yaml │ │ ├── task--workload-tpcc--prepare.yaml │ │ ├── task--workload-tpcc--restore.yaml │ │ ├── task-env-ops.yaml │ │ ├── task-sleep.yaml │ │ ├── task-teardown.yaml │ │ └── task-workload-run.yaml │ └── workload_script │ │ └── compaction_filter_sysbench.sh └── tilo │ ├── .gitignore │ ├── cases │ ├── __init__.py │ ├── test_7386.py │ ├── test_7444.py │ └── test_real_7386.py │ ├── poetry.lock │ ├── pyproject.toml │ ├── tests │ ├── __init__.py │ └── codec │ │ ├── __init__.py │ │ └── test_tikv.py │ └── tilo │ ├── __init__.py │ ├── clients.py │ └── codec │ ├── __init__.py │ ├── bytes_.py │ ├── excs.py │ ├── number.py │ ├── tidb.py │ └── tikv.py ├── bin ├── artifacts ├── case2pr ├── jira_to_gspread.py └── tictl ├── hack ├── ctc.yaml ├── partition-one-tikv.yaml ├── random-partition-one-every-hour.yaml ├── tikv-dockerfile ├── tiup-localhost-topo-1pd1kv1db.yaml └── tiup-localhost-topo-1pd3kv1db.yaml ├── ops └── ansible │ ├── .gitignore │ ├── README.md │ ├── ansible.cfg │ ├── create_user.yaml │ ├── deploy_ntp.yaml │ ├── deploy_pubkey.yaml │ ├── dir_permission.yaml │ ├── disk.yaml │ ├── hosts.ini.example │ ├── remove_service.yaml │ ├── show_numa_info.yaml │ ├── sysinfo.yaml │ ├── tuning_kernel_parameters.yaml │ └── update_password.yaml ├── testbed └── tikv-standard.yaml ├── tipocket-ctl ├── .gitignore ├── README.md ├── scripts │ ├── clean_resource.sh │ ├── env │ ├── slack-notify │ │ ├── Dockerfile │ │ ├── notify.py │ │ └── secret.yaml │ └── tail.sh ├── setup.py ├── specs │ ├── config-compaction-guard.toml │ ├── config-tidb-5.0.toml │ ├── config-tidb-log-warning.toml │ ├── config-tikv-5.0-rc.toml │ ├── config-tikv-5.0.toml │ └── config-tikv-pipelined-locking.toml └── tpctl │ ├── __init__.py │ ├── __main__.py │ ├── app.py │ ├── case.py │ ├── debug.py │ ├── deploy.py │ ├── dockerfile.py │ ├── scripts │ └── env_raw.sh │ ├── tidb_cluster.py │ ├── utils.py │ └── yaml_dump_tidbcluster.py └── txn-test └── main.go /.gitignore: -------------------------------------------------------------------------------- 1 | *.db 2 | 3 | *.pyc 4 | 5 | __pycache__ 6 | .ccls-cache 7 | 8 | .idea/ 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Testing TiDB efficiently 2 | 3 | ## Features 4 | 5 | * Tools 6 | * [artifacts](bin/artifacts) list TiDB/TiKV/Pd latest tarballs 7 | * [case2pr](bin/case2pr) found the PR that a case is added 8 | * [tipocket-ctl](tipocket-ctl/) is a command line tool for [tipocket](https://github.com/pingcap/tipocket) 9 | * [ansible](ops/ansible) ansible scripts that help initialize machines 10 | * Hack 11 | * [TiKV dockerfile](hack/tikv-dockerfile) is almost same as the official dockerfile 12 | * [tidb-operator-yaml](hack/ctx.yaml) is a tidb-cluster CR definition 13 | * Transaction testing 14 | * [txn-test](txn-test/) provides a demo to reproduce txn-related bug 15 | -------------------------------------------------------------------------------- /archive/process_utf_es_result.py: -------------------------------------------------------------------------------- 1 | import json 2 | import csv 3 | import os 4 | from concurrent.futures import ThreadPoolExecutor, as_completed 5 | 6 | from jira import JIRA 7 | 8 | 9 | JIRA_URI = os.getenv('JIRA_URI') 10 | JIRA_USERNAME = os.getenv('JIRA_USERNAME') 11 | JIRA_PASSWORD = os.getenv('JIRA_PASSWORD') 12 | 13 | jira = JIRA(JIRA_URI, auth=(JIRA_USERNAME, JIRA_PASSWORD)) 14 | 15 | 16 | def parse_hit(hit): 17 | source = hit['_source'] 18 | 19 | case_id = source['name'] 20 | reason = source['reason'] 21 | url = source['annotations']['jenkins.build'] 22 | duration = source['finished_at'] - source['started_at'] 23 | 24 | return [case_id, reason, url, duration] 25 | 26 | 27 | def get_owner(case_id): 28 | issue = jira.issue(case_id) 29 | return issue.fields.assignee.displayName, issue.fields.reporter.displayName 30 | 31 | 32 | def main(): 33 | with open('/tmp/hits.json') as f: 34 | hits = json.load(f) 35 | 36 | rows = [] 37 | for hit in hits: 38 | rows.append(parse_hit(hit)) 39 | 40 | case_owner_mapping = {} 41 | 42 | with ThreadPoolExecutor(max_workers=5) as executor: 43 | future_to_case = {executor.submit(get_owner, row[0]): row[0] 44 | for row in rows} 45 | for future in as_completed(future_to_case): 46 | case_id = future_to_case[future] 47 | try: 48 | owner, qa_owner = future.result() 49 | except: # noqa 50 | print('WARN', case_id) 51 | else: 52 | case_owner_mapping[case_id] = (owner, qa_owner) 53 | 54 | with open('utf.csv', 'w') as csvfile: 55 | writer = csv.writer(csvfile, delimiter=',', lineterminator='\r\n', 56 | quoting=csv.QUOTE_ALL) 57 | writer.writerow(['case id', 'failed reason', 'url', 58 | 'duration', 'owner', 'qa owner']) 59 | for row in rows: 60 | newrow = row.copy() 61 | newrow.extend(case_owner_mapping.get(row[0], ('Unknown', 'Unknown'))) 62 | writer.writerow(newrow) 63 | 64 | 65 | if __name__ == '__main__': 66 | main() 67 | -------------------------------------------------------------------------------- /archive/rocksdb-overlap/ctc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: pingcap.com/v1alpha1 2 | kind: TidbCluster 3 | metadata: 4 | name: ctc1 5 | spec: 6 | # configUpdateStrategy: RollingUpdate 7 | enablePVReclaim: false 8 | imagePullPolicy: Always 9 | pvReclaimPolicy: Delete 10 | timezone: Asia/Shanghai 11 | pd: 12 | image: pingcap/pd:v4.0.1 13 | replicas: 3 14 | limits: 15 | cpu: "8" 16 | memory: "8Gi" 17 | requests: 18 | storage: 10Gi 19 | # storageClassName: nvme 20 | config: {} 21 | tidb: 22 | image: pingcap/tidb:v4.0.1 23 | replicas: 1 24 | service: 25 | type: NodePort 26 | limits: 27 | cpu: "16" 28 | memory: "16Gi" 29 | config: {} 30 | tikv: 31 | image: pingcap/tikv:v4.0.1 32 | config: 33 | log-level: info 34 | rocksdb: 35 | defaultcf: 36 | block-size: "16KB" # default 16KB 37 | write-buffer-size: "4MB" # default 128MB 38 | target-file-size-base: "2MB" # default 8MB 39 | writecf: 40 | block-size: "32KB" 41 | write-buffer-size: "4MB" 42 | target-file-size-base: "4MB" 43 | replicas: 5 44 | requests: 45 | storage: 10Gi 46 | limits: 47 | cpu: "8" 48 | memory: "16Gi" 49 | # storageClassName: nvme 50 | 51 | --- 52 | 53 | apiVersion: pingcap.com/v1alpha1 54 | kind: TidbMonitor 55 | metadata: 56 | name: ctc1 57 | spec: 58 | clusters: 59 | - name: ctc1 60 | prometheus: 61 | baseImage: prom/prometheus 62 | version: v2.18.1 63 | grafana: 64 | baseImage: grafana/grafana 65 | version: 6.1.6 66 | service: 67 | type: NodePort 68 | initializer: 69 | baseImage: pingcap/tidb-monitor-initializer 70 | version: v4.0.2 71 | reloader: 72 | baseImage: pingcap/tidb-monitor-reloader 73 | version: v1.0.1 74 | imagePullPolicy: IfNotPresent 75 | -------------------------------------------------------------------------------- /archive/rocksdb-overlap/i_generation.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | int main (int argc, char **argv) { 11 | if (argc < 2) { 12 | printf("Usage: %s \n", argv[0]); 13 | return 1; 14 | } 15 | 16 | const char *filename = argv[1]; 17 | uint32_t generation = 666; 18 | struct stat file_stat; 19 | 20 | int fileno = open(filename, O_RDONLY); 21 | if (fileno < 0) { 22 | printf("Open file %s error", filename); 23 | return 1; 24 | } 25 | int ret = fstat(fileno, &file_stat); 26 | if (ret < 0) { 27 | printf("Stat file %s error", filename); 28 | return 1; 29 | } 30 | 31 | // get inode number and generation number 32 | if (ioctl(fileno, FS_IOC_GETVERSION, &generation)) { 33 | printf("Get generation number errno: %d\n", errno); 34 | } 35 | printf("inode number: %lu\n", file_stat.st_ino); 36 | printf("inode generation: %u\n", generation); 37 | 38 | close(fileno); 39 | 40 | return 0; 41 | } 42 | -------------------------------------------------------------------------------- /archive/rocksdb-overlap/iochaos.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: chaos-mesh.org/v1alpha1 2 | kind: IoChaos 3 | metadata: 4 | name: io-delay 5 | spec: 6 | action: latency 7 | mode: all 8 | selector: 9 | labelSelectors: 10 | app.kubernetes.io/instance: ctc1 11 | app.kubernetes.io/component: tikv 12 | volumePath: /var/lib/tikv 13 | path: "/var/lib/tikv/db/**/*" 14 | delay: "0ms" 15 | percent: 50 16 | -------------------------------------------------------------------------------- /archive/rocksdb-overlap/oltp_common.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) 2006-2018 Alexey Kopytov 2 | 3 | -- This program is free software; you can redistribute it and/or modify 4 | -- it under the terms of the GNU General Public License as published by 5 | -- the Free Software Foundation; either version 2 of the License, or 6 | -- (at your option) any later version. 7 | 8 | -- This program is distributed in the hope that it will be useful, 9 | -- but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | -- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | -- GNU General Public License for more details. 12 | 13 | -- You should have received a copy of the GNU General Public License 14 | -- along with this program; if not, write to the Free Software 15 | -- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 16 | 17 | -- ----------------------------------------------------------------------------- 18 | -- Common code for OLTP benchmarks. 19 | -- ----------------------------------------------------------------------------- 20 | 21 | function init() 22 | assert(event ~= nil, 23 | "this script is meant to be included by other OLTP scripts and " .. 24 | "should not be called directly.") 25 | end 26 | 27 | if sysbench.cmdline.command == nil then 28 | error("Command is required. Supported commands: prepare, prewarm, run, " .. 29 | "cleanup, help") 30 | end 31 | 32 | -- Command line options 33 | sysbench.cmdline.options = { 34 | table_size = 35 | {"Number of rows per table", 10000}, 36 | range_size = 37 | {"Range size for range SELECT queries", 100}, 38 | tables = 39 | {"Number of tables", 1}, 40 | point_selects = 41 | {"Number of point SELECT queries per transaction", 10}, 42 | simple_ranges = 43 | {"Number of simple range SELECT queries per transaction", 1}, 44 | sum_ranges = 45 | {"Number of SELECT SUM() queries per transaction", 1}, 46 | order_ranges = 47 | {"Number of SELECT ORDER BY queries per transaction", 1}, 48 | distinct_ranges = 49 | {"Number of SELECT DISTINCT queries per transaction", 1}, 50 | index_updates = 51 | {"Number of UPDATE index queries per transaction", 1}, 52 | non_index_updates = 53 | {"Number of UPDATE non-index queries per transaction", 1}, 54 | delete_inserts = 55 | {"Number of DELETE/INSERT combinations per transaction", 1}, 56 | range_selects = 57 | {"Enable/disable all range SELECT queries", true}, 58 | auto_inc = 59 | {"Use AUTO_INCREMENT column as Primary Key (for MySQL), " .. 60 | "or its alternatives in other DBMS. When disabled, use " .. 61 | "client-generated IDs", true}, 62 | skip_trx = 63 | {"Don't start explicit transactions and execute all queries " .. 64 | "in the AUTOCOMMIT mode", false}, 65 | secondary = 66 | {"Use a secondary index in place of the PRIMARY KEY", false}, 67 | create_secondary = 68 | {"Create a secondary index in addition to the PRIMARY KEY", true}, 69 | mysql_storage_engine = 70 | {"Storage engine, if MySQL is used", "innodb"}, 71 | pgsql_variant = 72 | {"Use this PostgreSQL variant when running with the " .. 73 | "PostgreSQL driver. The only currently supported " .. 74 | "variant is 'redshift'. When enabled, " .. 75 | "create_secondary is automatically disabled, and " .. 76 | "delete_inserts is set to 0"} 77 | } 78 | 79 | -- Prepare the dataset. This command supports parallel execution, i.e. will 80 | -- benefit from executing with --threads > 1 as long as --tables > 1 81 | function cmd_prepare() 82 | local drv = sysbench.sql.driver() 83 | local con = drv:connect() 84 | 85 | for i = sysbench.tid % sysbench.opt.threads + 1, sysbench.opt.tables, 86 | sysbench.opt.threads do 87 | create_table(drv, con, i) 88 | end 89 | end 90 | 91 | -- Preload the dataset into the server cache. This command supports parallel 92 | -- execution, i.e. will benefit from executing with --threads > 1 as long as 93 | -- --tables > 1 94 | -- 95 | -- PS. Currently, this command is only meaningful for MySQL/InnoDB benchmarks 96 | function cmd_prewarm() 97 | local drv = sysbench.sql.driver() 98 | local con = drv:connect() 99 | 100 | assert(drv:name() == "mysql", "prewarm is currently MySQL only") 101 | 102 | -- Do not create on disk tables for subsequent queries 103 | con:query("SET tmp_table_size=2*1024*1024*1024") 104 | con:query("SET max_heap_table_size=2*1024*1024*1024") 105 | 106 | for i = sysbench.tid % sysbench.opt.threads + 1, sysbench.opt.tables, 107 | sysbench.opt.threads do 108 | local t = "sbtest" .. i 109 | print("Prewarming table " .. t) 110 | con:query("ANALYZE TABLE sbtest" .. i) 111 | con:query(string.format( 112 | "SELECT AVG(id) FROM " .. 113 | "(SELECT * FROM %s FORCE KEY (PRIMARY) " .. 114 | "LIMIT %u) t", 115 | t, sysbench.opt.table_size)) 116 | con:query(string.format( 117 | "SELECT COUNT(*) FROM " .. 118 | "(SELECT * FROM %s WHERE k LIKE '%%0%%' LIMIT %u) t", 119 | t, sysbench.opt.table_size)) 120 | end 121 | end 122 | 123 | -- Implement parallel prepare and prewarm commands 124 | sysbench.cmdline.commands = { 125 | prepare = {cmd_prepare, sysbench.cmdline.PARALLEL_COMMAND}, 126 | prewarm = {cmd_prewarm, sysbench.cmdline.PARALLEL_COMMAND} 127 | } 128 | 129 | 130 | -- Template strings of random digits with 11-digit groups separated by dashes 131 | 132 | -- 10 groups, 119 characters 133 | local c_value_template = "###########-###########-###########-" .. 134 | "###########-###########-###########-" .. 135 | "###########-###########-###########-" .. 136 | "###########" 137 | 138 | -- 5 groups, 59 characters 139 | local pad_value_template = "###########-###########-###########-" .. 140 | "###########-###########" 141 | 142 | function get_c_value() 143 | return sysbench.rand.string(c_value_template) 144 | end 145 | 146 | function get_pad_value() 147 | return sysbench.rand.string(pad_value_template) 148 | end 149 | 150 | function create_table(drv, con, table_num) 151 | local id_index_def, id_def 152 | local engine_def = "" 153 | local extra_table_options = "" 154 | local query 155 | 156 | if sysbench.opt.secondary then 157 | id_index_def = "KEY xid" 158 | else 159 | id_index_def = "PRIMARY KEY" 160 | end 161 | 162 | if drv:name() == "mysql" or drv:name() == "attachsql" or 163 | drv:name() == "drizzle" 164 | then 165 | if sysbench.opt.auto_inc then 166 | id_def = "INTEGER NOT NULL AUTO_INCREMENT" 167 | else 168 | id_def = "INTEGER NOT NULL" 169 | end 170 | engine_def = "/*! ENGINE = " .. sysbench.opt.mysql_storage_engine .. " */" 171 | extra_table_options = mysql_table_options or "" 172 | elseif drv:name() == "pgsql" 173 | then 174 | if not sysbench.opt.auto_inc then 175 | id_def = "INTEGER NOT NULL" 176 | elseif pgsql_variant == 'redshift' then 177 | id_def = "INTEGER IDENTITY(1,1)" 178 | else 179 | id_def = "SERIAL" 180 | end 181 | else 182 | error("Unsupported database driver:" .. drv:name()) 183 | end 184 | 185 | print(string.format("Creating table 'sbtest%d'...", table_num)) 186 | 187 | query = string.format([[ 188 | CREATE TABLE sbtest%d( 189 | id %s, 190 | k INTEGER DEFAULT '0' NOT NULL, 191 | c CHAR(120) DEFAULT '' NOT NULL, 192 | pad CHAR(60) DEFAULT '' NOT NULL, 193 | %s (id) 194 | ) %s %s]], 195 | table_num, id_def, id_index_def, engine_def, extra_table_options) 196 | 197 | con:query(query) 198 | 199 | if sysbench.opt.create_secondary then 200 | print(string.format("Creating a secondary index on 'sbtest%d'...", 201 | table_num)) 202 | con:query(string.format("CREATE INDEX k_%d ON sbtest%d(k)", 203 | table_num, table_num)) 204 | end 205 | 206 | if (sysbench.opt.table_size > 0) then 207 | print(string.format("Inserting %d records into 'sbtest%d'", 208 | sysbench.opt.table_size, table_num)) 209 | end 210 | 211 | if sysbench.opt.auto_inc then 212 | query = "INSERT INTO sbtest" .. table_num .. "(k, c, pad) VALUES" 213 | else 214 | query = "INSERT INTO sbtest" .. table_num .. "(id, k, c, pad) VALUES" 215 | end 216 | 217 | con:bulk_insert_init(query) 218 | 219 | local c_val 220 | local pad_val 221 | 222 | for i = 1, sysbench.opt.table_size do 223 | 224 | c_val = get_c_value() 225 | pad_val = get_pad_value() 226 | 227 | if (sysbench.opt.auto_inc) then 228 | query = string.format("(%d, '%s', '%s')", 229 | sb_rand(1, sysbench.opt.table_size), c_val, 230 | pad_val) 231 | else 232 | query = string.format("(%d, %d, '%s', '%s')", 233 | i, sb_rand(1, sysbench.opt.table_size), c_val, 234 | pad_val) 235 | end 236 | 237 | con:bulk_insert_next(query) 238 | end 239 | 240 | con:bulk_insert_done() 241 | end 242 | 243 | local t = sysbench.sql.type 244 | local stmt_defs = { 245 | point_selects = { 246 | "SELECT c FROM sbtest%u WHERE id=?", 247 | t.INT}, 248 | simple_ranges = { 249 | "SELECT c FROM sbtest%u WHERE id BETWEEN ? AND ?", 250 | t.INT, t.INT}, 251 | sum_ranges = { 252 | "SELECT SUM(k) FROM sbtest%u WHERE id BETWEEN ? AND ?", 253 | t.INT, t.INT}, 254 | order_ranges = { 255 | "SELECT c FROM sbtest%u WHERE id BETWEEN ? AND ? ORDER BY c", 256 | t.INT, t.INT}, 257 | distinct_ranges = { 258 | "SELECT DISTINCT c FROM sbtest%u WHERE id BETWEEN ? AND ? ORDER BY c", 259 | t.INT, t.INT}, 260 | index_updates = { 261 | "UPDATE sbtest%u SET k=k+1 WHERE id=?", 262 | t.INT}, 263 | non_index_updates = { 264 | "UPDATE sbtest%u SET c=? WHERE id=?", 265 | {t.CHAR, 120}, t.INT}, 266 | deletes = { 267 | "DELETE FROM sbtest%u WHERE id=?", 268 | t.INT}, 269 | inserts = { 270 | "INSERT INTO sbtest%u (id, k, c, pad) VALUES (?, ?, ?, ?)", 271 | t.INT, t.INT, {t.CHAR, 120}, {t.CHAR, 60}}, 272 | } 273 | 274 | function prepare_begin() 275 | stmt.begin = con:prepare("BEGIN") 276 | end 277 | 278 | function prepare_commit() 279 | stmt.commit = con:prepare("COMMIT") 280 | end 281 | 282 | function prepare_for_each_table(key) 283 | for t = 1, sysbench.opt.tables do 284 | stmt[t][key] = con:prepare(string.format(stmt_defs[key][1], t)) 285 | 286 | local nparam = #stmt_defs[key] - 1 287 | 288 | if nparam > 0 then 289 | param[t][key] = {} 290 | end 291 | 292 | for p = 1, nparam do 293 | local btype = stmt_defs[key][p+1] 294 | local len 295 | 296 | if type(btype) == "table" then 297 | len = btype[2] 298 | btype = btype[1] 299 | end 300 | if btype == sysbench.sql.type.VARCHAR or 301 | btype == sysbench.sql.type.CHAR then 302 | param[t][key][p] = stmt[t][key]:bind_create(btype, len) 303 | else 304 | param[t][key][p] = stmt[t][key]:bind_create(btype) 305 | end 306 | end 307 | 308 | if nparam > 0 then 309 | stmt[t][key]:bind_param(unpack(param[t][key])) 310 | end 311 | end 312 | end 313 | 314 | function prepare_point_selects() 315 | prepare_for_each_table("point_selects") 316 | end 317 | 318 | function prepare_simple_ranges() 319 | prepare_for_each_table("simple_ranges") 320 | end 321 | 322 | function prepare_sum_ranges() 323 | prepare_for_each_table("sum_ranges") 324 | end 325 | 326 | function prepare_order_ranges() 327 | prepare_for_each_table("order_ranges") 328 | end 329 | 330 | function prepare_distinct_ranges() 331 | prepare_for_each_table("distinct_ranges") 332 | end 333 | 334 | function prepare_index_updates() 335 | prepare_for_each_table("index_updates") 336 | end 337 | 338 | function prepare_non_index_updates() 339 | prepare_for_each_table("non_index_updates") 340 | end 341 | 342 | function prepare_delete_inserts() 343 | prepare_for_each_table("deletes") 344 | prepare_for_each_table("inserts") 345 | end 346 | 347 | function thread_init() 348 | drv = sysbench.sql.driver() 349 | con = drv:connect() 350 | 351 | -- Create global nested tables for prepared statements and their 352 | -- parameters. We need a statement and a parameter set for each combination 353 | -- of connection/table/query 354 | stmt = {} 355 | param = {} 356 | 357 | for t = 1, sysbench.opt.tables do 358 | stmt[t] = {} 359 | param[t] = {} 360 | end 361 | 362 | -- This function is a 'callback' defined by individual benchmark scripts 363 | prepare_statements() 364 | end 365 | 366 | -- Close prepared statements 367 | function close_statements() 368 | for t = 1, sysbench.opt.tables do 369 | for k, s in pairs(stmt[t]) do 370 | stmt[t][k]:close() 371 | end 372 | end 373 | if (stmt.begin ~= nil) then 374 | stmt.begin:close() 375 | end 376 | if (stmt.commit ~= nil) then 377 | stmt.commit:close() 378 | end 379 | end 380 | 381 | function thread_done() 382 | close_statements() 383 | con:disconnect() 384 | end 385 | 386 | function cleanup() 387 | local drv = sysbench.sql.driver() 388 | local con = drv:connect() 389 | 390 | for i = 1, sysbench.opt.tables do 391 | print(string.format("Dropping table 'sbtest%d'...", i)) 392 | con:query("DROP TABLE IF EXISTS sbtest" .. i ) 393 | end 394 | end 395 | 396 | local function get_table_num() 397 | return sysbench.rand.uniform(1, sysbench.opt.tables) 398 | end 399 | 400 | local function get_id() 401 | return sysbench.rand.default(1, sysbench.opt.table_size) 402 | end 403 | 404 | function begin() 405 | stmt.begin:execute() 406 | end 407 | 408 | function commit() 409 | stmt.commit:execute() 410 | end 411 | 412 | function execute_point_selects() 413 | local tnum = get_table_num() 414 | local i 415 | 416 | for i = 1, sysbench.opt.point_selects do 417 | param[tnum].point_selects[1]:set(get_id()) 418 | 419 | stmt[tnum].point_selects:execute() 420 | end 421 | end 422 | 423 | local function execute_range(key) 424 | local tnum = get_table_num() 425 | 426 | for i = 1, sysbench.opt[key] do 427 | local id = get_id() 428 | 429 | param[tnum][key][1]:set(id) 430 | param[tnum][key][2]:set(id + sysbench.opt.range_size - 1) 431 | 432 | stmt[tnum][key]:execute() 433 | end 434 | end 435 | 436 | function execute_simple_ranges() 437 | execute_range("simple_ranges") 438 | end 439 | 440 | function execute_sum_ranges() 441 | execute_range("sum_ranges") 442 | end 443 | 444 | function execute_order_ranges() 445 | execute_range("order_ranges") 446 | end 447 | 448 | function execute_distinct_ranges() 449 | execute_range("distinct_ranges") 450 | end 451 | 452 | function execute_index_updates() 453 | local tnum = get_table_num() 454 | 455 | for i = 1, sysbench.opt.index_updates do 456 | param[tnum].index_updates[1]:set(get_id()) 457 | 458 | stmt[tnum].index_updates:execute() 459 | end 460 | end 461 | 462 | function execute_non_index_updates() 463 | local tnum = get_table_num() 464 | 465 | for i = 1, sysbench.opt.non_index_updates do 466 | param[tnum].non_index_updates[1]:set_rand_str(c_value_template) 467 | param[tnum].non_index_updates[2]:set(get_id()) 468 | 469 | stmt[tnum].non_index_updates:execute() 470 | end 471 | end 472 | 473 | function execute_delete_inserts() 474 | local tnum = get_table_num() 475 | 476 | for i = 1, sysbench.opt.delete_inserts do 477 | local id = get_id() 478 | local k = get_id() 479 | 480 | param[tnum].deletes[1]:set(id) 481 | 482 | param[tnum].inserts[1]:set(id) 483 | param[tnum].inserts[2]:set(k) 484 | param[tnum].inserts[3]:set_rand_str(c_value_template) 485 | param[tnum].inserts[4]:set_rand_str(pad_value_template) 486 | 487 | stmt[tnum].deletes:execute() 488 | stmt[tnum].inserts:execute() 489 | end 490 | end 491 | 492 | -- Re-prepare statements if we have reconnected, which is possible when some of 493 | -- the listed error codes are in the --mysql-ignore-errors list 494 | function sysbench.hooks.before_restart_event(errdesc) 495 | if errdesc.sql_errno == 2013 or -- CR_SERVER_LOST 496 | errdesc.sql_errno == 2055 or -- CR_SERVER_LOST_EXTENDED 497 | errdesc.sql_errno == 2006 or -- CR_SERVER_GONE_ERROR 498 | errdesc.sql_errno == 2011 -- CR_TCP_CONNECTION 499 | then 500 | close_statements() 501 | prepare_statements() 502 | end 503 | end 504 | -------------------------------------------------------------------------------- /archive/rocksdb-overlap/sysbench-config: -------------------------------------------------------------------------------- 1 | mysql-host=0.0.0.0 2 | mysql-port=31452 3 | mysql-user=root 4 | mysql-password='' 5 | mysql-db=sbtest 6 | time=600 7 | threads=4 8 | report-interval=10 9 | db-driver=mysql 10 | -------------------------------------------------------------------------------- /archive/scan_table_kv.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from tikv_client.asynchronous import RawClient 4 | from tilo.codec.bytes_ import encode_bytes 5 | from tilo.codec.number import encode_int 6 | 7 | 8 | async def scan_mvcc_delete_records(table_id): 9 | client = await RawClient.connect('127.0.0.1:2379') 10 | 11 | key_t = encode_int(b't', int(table_id)) 12 | start_key = encode_int(key_t + b'_i', 0) 13 | start_key = encode_bytes(b'', start_key) 14 | # _s > _r > _i 15 | end_key = encode_bytes(b'', key_t + b'_s') 16 | 17 | delete_count = 0 18 | 19 | def analysis(kvs): 20 | nonlocal delete_count 21 | total = 0 22 | for k, v in kvs.items(): 23 | print(len(v)) 24 | if v[:1] == b'D': 25 | total += 1 26 | delete_count += total 27 | print(f'analysis {len(kvs)} kvs, {total} deletes') 28 | 29 | 30 | while True: 31 | limit = 5000 32 | kvs = await client.scan(start_key, 33 | end=end_key, 34 | limit=limit, 35 | cf='write') 36 | analysis(kvs) 37 | if len(kvs) < limit: 38 | break 39 | else: 40 | start_key = max(kvs) 41 | 42 | print('delete count:', delete_count) 43 | 44 | 45 | if __name__ == '__main__': 46 | import sys 47 | 48 | loop = asyncio.get_event_loop() 49 | loop.run_until_complete(scan_mvcc_delete_records(int(sys.argv[1]))) 50 | -------------------------------------------------------------------------------- /archive/system-testing/README.md: -------------------------------------------------------------------------------- 1 | # 系统测试 2 | 3 | ## 关于 hack 脚本 4 | 5 | 当前的代码风格 6 | 7 | 1. 函数名以 `st::` 为前缀 8 | 2. 命令以 `st-` 为前缀 9 | 2. 10 | 2. 11 | 12 | # 操作手册 13 | 14 | ## 快速上手 15 | 16 | 快速部署一个 1pd-1db-3kv 的集群。 17 | 18 | 1. 设置系统测试的关键环境变量:`export ST_NAMESPACE=st--xxx`。表明你要在哪个 namespace 下进行系统测试, 19 | 集群资源,tekton 任务都会创建在这个 ns 下。 20 | 21 | 2. 运行 `source SOURCE.sh`,它会给常见命令设置 alias,其中关键的两个是 `k` 和 `tkn`。 22 | ``` 23 | alias k="kubectl -n $ns" 24 | alias tkn="tkn -n $ST_NAMESPACE" 25 | ``` 26 | 27 | 3. 创建 serviceaccount 账号和相关 binding 28 | ``` 29 | k apply -f rbac/ 30 | kubectl create clusterrolebinding "st-sa-binding-$ST_NAMESPACE" \ 31 | --clusterrole=system-testing --serviceaccount=$ST_NAMESPACE:system-testing 32 | ``` 33 | 34 | 4. 部署 tasks 和 pipelines 35 | ``` 36 | k apply -f pipelines/ -f tasks/ 37 | ``` 38 | 39 | 5. 启动 task 和 pipeline。以较简单的 pipeline `st--1pd1db3kv` 为例 40 | ``` 41 | tkn-start-pipeline st--1pd1db3kv -p run-id=$ST_NAMESPACE 42 | ``` 43 | 44 | ## 执行特定的测试 45 | 46 | ### 执行 Compaction Filter 的 Sysbench 测试 47 | 48 | 先部署集群资源,假设资源部署在 `st--1pd1db3kv` 名字空间下。 49 | 50 | ``` 51 | tkn-start-task st--workload-compaction-filter-sysbench -p res-ns=st--1pd1db3kv \ 52 | -p tct-name=st--1pd1db3kv -p trr-name=st--1pd1db3kv \ 53 | -p workload-script-b64=$(base64 workload_script/compaction_filter_sysbench.sh) 54 | ``` 55 | -------------------------------------------------------------------------------- /archive/system-testing/SOURCEME.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}") && pwd) 4 | cd $ROOT 5 | 6 | source hack/lib.sh 7 | 8 | function st::setup_aliases() { 9 | local ns="$1" 10 | unalias k 2>%1 11 | unalias tkn 2>%1 12 | unalias naglfar 2>%1 13 | unalias tkn-start-task 2>%1 14 | unalias tkn-start-pipeline 2>%1 15 | 16 | alias tkn="tkn -n $ST_NAMESPACE" 17 | alias tkn-start-task="tkn -n $ST_NAMESPACE task start -s $ST_TEKTON_SA -w name=manifest,emptyDir=""" 18 | alias tkn-start-pipeline="tkn -n $ST_NAMESPACE pipeline start -s $ST_TEKTON_SA -w name=manifest,emptyDir=""" 19 | echo -e "Tekton tasks/pipelines all run in namespace \e[36m$ST_NAMESPACE\e[0m" 20 | 21 | alias k="kubectl -n $ns" 22 | alias naglfar="naglfar -n $ns" 23 | echo "You now have these aliases with -n option:" 24 | echo -e "\t\e[36mk=kubectl,naglfar=naglfar\e[0m" 25 | } 26 | 27 | # Command 28 | function st-use-ns() { 29 | if [ -z "$1" ]; then 30 | echo "Usage: st-use-ns NAMESPACE" 31 | false 32 | else 33 | export ST_NAMESPACE="$1" 34 | echo -e "System testing resources namespace is \e[36m$1\e[0m" 35 | st::setup_aliases "$ST_NAMESPACE" 36 | if [ $? -eq 0 ]; then 37 | echo -e "You can also try \e[34msource hack/bash_prompt.sh\e[0m" 38 | fi 39 | fi 40 | } 41 | 42 | ST_TEKTON_SA="system-testing" 43 | 44 | st-use-ns "$ST_NAMESPACE" 45 | -------------------------------------------------------------------------------- /archive/system-testing/dockerfiles/compaction-filter-sysbench/Dockerfile: -------------------------------------------------------------------------------- 1 | # https://github.com/PingCAP-QE/bench-toolset/blob/master/Dockerfile 2 | FROM hub.pingcap.net/mahjonp/bench-toolset 3 | RUN mkdir -p /data/apps/compaction-filter-sysbench/ 4 | WORKDIR /data/apps/compaction-filter-sysbench/ 5 | 6 | ADD common.lua ./ 7 | ADD updates.lua ./ 8 | 9 | # Output: hub.pingcap.net/system-testing/compaction-filter-sysbench:210304 10 | -------------------------------------------------------------------------------- /archive/system-testing/dockerfiles/compaction-filter-sysbench/common.lua: -------------------------------------------------------------------------------- 1 | function init() end 2 | 3 | if sysbench.cmdline.command == nil then 4 | error("Command is required. Supported commands: prepare, run, help") 5 | end 6 | 7 | sysbench.cmdline.options = { 8 | table_size = {"Number of rows per table", 10000}, 9 | tables = {"Number of tables", 1}, 10 | modifies = {"Row to be updated in one transaction", 20} 11 | } 12 | 13 | function cmd_prepare() 14 | local drv = sysbench.sql.driver() 15 | local con = drv:connect() 16 | local threads = sysbench.opt.threads 17 | for i = sysbench.tid % threads + 1, sysbench.opt.tables, threads do 18 | create_table(drv, con, i) 19 | end 20 | end 21 | 22 | sysbench.cmdline.commands = { 23 | prepare = {cmd_prepare, sysbench.cmdline.PARALLEL_COMMAND} 24 | } 25 | 26 | -- 20 groups, 249 characters 27 | local c_value_template = "###########-###########-###########-" .. 28 | "###########-###########-###########-" .. 29 | "###########-###########-###########-" .. 30 | "###########-###########-###########-" .. 31 | "###########-###########-###########-" .. 32 | "###########-###########-###########-" .. 33 | "###########-###########" 34 | 35 | -- 4 group, 47 characters 36 | local pad_value_template = "###########-###########-###########-###########" 37 | 38 | function get_c_value() return sysbench.rand.string(c_value_template) end 39 | 40 | function get_pad_value() return sysbench.rand.string(pad_value_template) end 41 | 42 | function create_table(drv, con, table_num) 43 | local id_def = "BIGINT NOT NULL" 44 | local id_index_def = "PRIMARY KEY" 45 | local engine_def = "" 46 | local extra_table_options = "" 47 | 48 | print(string.format("Creating table 'sbtest%d'...", table_num)) 49 | local query = string.format([[ 50 | CREATE TABLE IF NOT EXISTS sbtest%d( 51 | id %s, 52 | k BIGINT DEFAULT '0' NOT NULL, 53 | c CHAR(255) DEFAULT '' NOT NULL, 54 | pad CHAR(60) DEFAULT '' NOT NULL, 55 | id_suffix %s, 56 | PRIMARY KEY (id), 57 | UNIQUE KEY (id_suffix), 58 | KEY (k) 59 | ) %s %s]], table_num, id_def, id_def, engine_def, 60 | extra_table_options) 61 | con:query(query) 62 | 63 | print(string.format("Inserting %d records into 'sbtest%d'", 64 | sysbench.opt.table_size, table_num)) 65 | 66 | query = "INSERT INTO sbtest" .. table_num .. 67 | "(id, k, c, pad, id_suffix) VALUES" 68 | con:bulk_insert_init(query) 69 | for i = 1, sysbench.opt.table_size do 70 | local c_val = "" 71 | if sysbench.rand.uniform(1, 6) % 6 == 3 then 72 | c_val = get_c_value() 73 | end 74 | local pad_val = get_pad_value() 75 | query = string.format("(%d, %d, '%s', '%s', %d)", i, sb_rand(1, 255), 76 | c_val, pad_val, i) 77 | con:bulk_insert_next(query) 78 | end 79 | con:bulk_insert_done() 80 | end 81 | 82 | local t = sysbench.sql.type 83 | local stmt_defs = { 84 | delete = {"DELETE FROM sbtest%u WHERE id = ?", t.INT}, 85 | insert = { 86 | "INSERT INTO sbtest%u (id, k, c, pad, id_suffix) VALUES (?, ?, ?, ?, ?)", 87 | t.INT, t.INT, {t.CHAR, 255}, {t.CHAR, 60}, t.INT 88 | }, 89 | update = {"UPDATE sbtest%u SET k = k + ? WHERE id = ?", t.INT, t.INT} 90 | } 91 | 92 | function prepare_begin() stmt.begin = con:prepare("BEGIN") end 93 | 94 | function prepare_commit() stmt.commit = con:prepare("COMMIT") end 95 | 96 | function prepare_for_each_table(key) 97 | for t = 1, sysbench.opt.tables do 98 | stmt[t][key] = con:prepare(string.format(stmt_defs[key][1], t)) 99 | 100 | local nparam = #stmt_defs[key] - 1 101 | if nparam > 0 then param[t][key] = {} end 102 | for p = 1, nparam do 103 | local btype = stmt_defs[key][p + 1] 104 | local len 105 | if type(btype) == "table" then 106 | len = btype[2] 107 | btype = btype[1] 108 | end 109 | if btype == sysbench.sql.type.VARCHAR or btype == 110 | sysbench.sql.type.CHAR then 111 | param[t][key][p] = stmt[t][key]:bind_create(btype, len) 112 | else 113 | param[t][key][p] = stmt[t][key]:bind_create(btype) 114 | end 115 | end 116 | if nparam > 0 then stmt[t][key]:bind_param(unpack(param[t][key])) end 117 | end 118 | end 119 | 120 | function thread_init() 121 | drv = sysbench.sql.driver() 122 | con = drv:connect() 123 | 124 | stmt = {} 125 | param = {} 126 | for t = 1, sysbench.opt.tables do 127 | stmt[t] = {} 128 | param[t] = {} 129 | end 130 | prepare_statements() 131 | end 132 | 133 | function close_statements() 134 | for t = 1, sysbench.opt.tables do 135 | for k, s in pairs(stmt[t]) do stmt[t][k]:close() end 136 | end 137 | if (stmt.begin ~= nil) then stmt.begin:close() end 138 | if (stmt.commit ~= nil) then stmt.commit:close() end 139 | end 140 | 141 | function thread_done() 142 | close_statements() 143 | con:disconnect() 144 | end 145 | 146 | function get_table_num() return sysbench.rand.uniform(1, sysbench.opt.tables) end 147 | 148 | function get_id() return sysbench.rand.uniform(1, sysbench.opt.table_size) end 149 | 150 | function begin() stmt.begin:execute() end 151 | 152 | function commit() stmt.commit:execute() end 153 | 154 | function sysbench.hooks.before_restart_event(errdesc) 155 | if errdesc.sql_errno == 2013 or -- CR_SERVER_LOST 156 | errdesc.sql_errno == 2055 or -- CR_SERVER_LOST_EXTENDED 157 | errdesc.sql_errno == 2006 or -- CR_SERVER_GONE_ERROR 158 | errdesc.sql_errno == 2011 -- CR_TCP_CONNECTION 159 | then 160 | close_statements() 161 | prepare_statements() 162 | end 163 | end 164 | -------------------------------------------------------------------------------- /archive/system-testing/dockerfiles/compaction-filter-sysbench/updates.lua: -------------------------------------------------------------------------------- 1 | require("common") 2 | 3 | function sleep(n) os.execute("sleep " .. n) end 4 | 5 | function get_random_zero_sum_seq(len) 6 | local sum = 0 7 | local t = {} 8 | for i = 1, len / 2 do 9 | local x = sysbench.rand.uniform(1, 255) 10 | table.insert(t, x) 11 | table.insert(t, -x) 12 | end 13 | if #t == len - 1 then table.insert(t, 0) end 14 | return t 15 | end 16 | 17 | function prepare_statements() 18 | prepare_begin() 19 | prepare_commit() 20 | prepare_for_each_table("delete") 21 | prepare_for_each_table("insert") 22 | prepare_for_each_table("update") 23 | end 24 | 25 | function too_many_processlist(con) 26 | local rs = con:query("show processlist") 27 | local busy_count = 0 28 | for i = 1, rs.nrows do 29 | local command = unpack(rs:fetch_row(), 5, 5) 30 | if command ~= "Sleep" then busy_count = busy_count + 1 end 31 | end 32 | rs:free() 33 | return busy_count >= 20 34 | end 35 | 36 | function get_counters(con, tid) 37 | local sql = "select sum(k), count(id) from sbtest" .. tid 38 | local rs = con:query(sql) 39 | local sum_k_s, count_id_s = unpack(rs:fetch_row(), 1, 2) 40 | rs:free() 41 | local sum_k = tonumber(sum_k_s) 42 | local count_id = tonumber(count_id_s) 43 | local sql = string.format("select count(k) from sbtest%u use index(k)", tid) 44 | local rs = con:query(sql) 45 | local count_k_s = unpack(rs:fetch_row(), 1, 1) 46 | rs:free() 47 | local count_k = tonumber(count_k_s) 48 | return sum_k, count_id, count_k 49 | end 50 | 51 | function thread_init() 52 | drv = sysbench.sql.driver() 53 | con = drv:connect() 54 | stmt = {} 55 | param = {} 56 | for t = 1, sysbench.opt.tables do 57 | stmt[t] = {} 58 | param[t] = {} 59 | end 60 | prepare_statements() 61 | 62 | local tid = sysbench.tid % sysbench.opt.threads + 1 63 | if tid <= sysbench.opt.tables then 64 | while too_many_processlist(con) do sleep(1) end 65 | sum_k, count_id, count_k = get_counters(con, tid) 66 | print("sbtest" .. tid .. ", sum(k): " .. sum_k .. ", count(id): " .. 67 | count_id .. ", count(k): " .. count_k) 68 | end 69 | end 70 | 71 | function thread_done() 72 | local tid = sysbench.tid % sysbench.opt.threads + 1 73 | if tid <= sysbench.opt.tables then 74 | while too_many_processlist(con) do sleep(1) end 75 | local sum_k_1, count_id_1, count_k_1 = get_counters(con, tid) 76 | if sum_k_1 ~= sum_k or count_id_1 ~= count_id or count_k_1 ~= count_k then 77 | print("corrupt sbtest" .. tid .. ", sum(k): " .. sum_k .. 78 | ", count(id): " .. count_id .. ", count(k): " .. count_k) 79 | os.exit(-1) 80 | end 81 | end 82 | close_statements() 83 | con:disconnect() 84 | end 85 | 86 | function event() 87 | begin() 88 | 89 | local tnum = get_table_num() 90 | local id_suffix = get_id() 91 | local rs = con:query(string.format( 92 | "SELECT id,k,id_suffix FROM sbtest%u WHERE id_suffix BETWEEN %d AND %d for update", 93 | tnum, id_suffix, id_suffix + sysbench.opt.modifies)) 94 | local update_list = {} 95 | local del_ins_list = {} 96 | for i = 1, rs.nrows do 97 | local id, k, suffix = unpack(rs:fetch_row(), 1, rs.nfields) 98 | id = tonumber(id) 99 | k = tonumber(k) 100 | suffix = tonumber(suffix) 101 | 102 | if sysbench.rand.uniform(1, 2) % 2 == 1 then 103 | local t = {} 104 | t["id"] = id 105 | t["k"] = k 106 | t["suffix"] = suffix 107 | table.insert(del_ins_list, t) 108 | else 109 | table.insert(update_list, id) 110 | end 111 | end 112 | rs:free() 113 | 114 | for i = 1, #del_ins_list do 115 | local id = del_ins_list[i]["id"] 116 | param[tnum].delete[1]:set(id) 117 | stmt[tnum].delete:execute() 118 | param[tnum].insert[1]:set(id + sysbench.opt.table_size) 119 | param[tnum].insert[2]:set(del_ins_list[i]["k"]) 120 | if sysbench.rand.uniform(1, 6) % 6 == 3 then 121 | param[tnum].insert[3]:set(get_c_value()) 122 | else 123 | param[tnum].insert[3]:set("") 124 | end 125 | param[tnum].insert[4]:set(get_pad_value()) 126 | param[tnum].insert[5]:set(del_ins_list[i]["suffix"]) 127 | stmt[tnum].insert:execute() 128 | end 129 | 130 | local zero_sum_seq = get_random_zero_sum_seq(#update_list) 131 | for i = 1, #update_list do 132 | param[tnum].update[1]:set(zero_sum_seq[i]) 133 | param[tnum].update[2]:set(update_list[i]) 134 | stmt[tnum].update:execute() 135 | end 136 | 137 | commit() 138 | end 139 | -------------------------------------------------------------------------------- /archive/system-testing/examples/tct.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: naglfar.pingcap.com/v1 2 | kind: TestClusterTopology 3 | metadata: 4 | name: qupeng 5 | spec: 6 | resourceRequest: qupeng 7 | tidbCluster: 8 | global: 9 | deployDir: "/disks1/deploy" 10 | dataDir: "/disks1/data" 11 | version: 12 | version: nightly 13 | control: ctl-pd-db-monitor 14 | serverConfigs: 15 | pd: |- 16 | replication.location-labels: ["host"] 17 | tikv: |- 18 | gc.enable-compaction-filter: true 19 | raftstore.apply-pool-size: 3 20 | tikv: 21 | - host: kv1 22 | port: 20160 23 | statusPort: 20180 24 | deployDir: /disk1/deploy/tikv-20160 25 | dataDir: /disk1/data/tikv-20160 26 | config: | 27 | server.labels: { host: "host1" } 28 | - host: kv1 29 | port: 20161 30 | statusPort: 20181 31 | deployDir: /disk1/deploy/tikv-20161 32 | dataDir: /disk1/data/tikv-20161 33 | config: | 34 | server.labels: { host: "host2" } 35 | - host: kv2 36 | port: 20160 37 | statusPort: 20180 38 | deployDir: /disk1/deploy/tikv-20160 39 | dataDir: /disk1/data/tikv-20160 40 | config: | 41 | server.labels: { host: "host3" } 42 | - host: kv2 43 | port: 20161 44 | statusPort: 20181 45 | deployDir: /disk1/deploy/tikv-20161 46 | dataDir: /disk1/data/tikv-20161 47 | config: | 48 | server.labels: { host: "host4" } 49 | tidb: 50 | - host: ctl-pd-db-monitor 51 | pd: 52 | - host: ctl-pd-db-monitor 53 | monitor: 54 | - host: ctl-pd-db-monitor 55 | grafana: 56 | - host: ctl-pd-db-monitor 57 | port: 9000 58 | -------------------------------------------------------------------------------- /archive/system-testing/examples/trr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: naglfar.pingcap.com/v1 2 | kind: TestResourceRequest 3 | metadata: 4 | name: qupeng 5 | spec: 6 | items: 7 | - name: ctl-pd-db-monitor 8 | spec: 9 | memory: 32GB 10 | cores: 16 11 | - name: kv1 12 | spec: 13 | memory: 32GB 14 | cores: 16 15 | disks: 16 | disk1: 17 | kind: nvme 18 | size: 3TB 19 | mountPath: /disk1 20 | - name: kv2 21 | spec: 22 | memory: 32GB 23 | cores: 16 24 | disks: 25 | disk1: 26 | kind: nvme 27 | size: 3TB 28 | mountPath: /disk1 29 | -------------------------------------------------------------------------------- /archive/system-testing/hack/bash_prompt.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script show the system testing namespace in the prompt PS1 4 | # 5 | # Usage: source hack/bash_prompt.sh 6 | 7 | function __get_st_ns { 8 | if [ -z "$ST_PROMPT_COMMAND" ]; then 9 | echo "" 10 | else 11 | if [ -z "$ST_NAMESPACE" ]; then 12 | echo "" 13 | else 14 | echo "($ST_NAMESPACE) " 15 | fi 16 | fi 17 | } 18 | 19 | function __st_prompt_command { 20 | PS1="`__get_st_ns`${PS1}" 21 | } 22 | 23 | function st-prompt-activate { 24 | export ST_PROMPT_COMMAND="on" 25 | echo "Run 'st-prompt-deactivate' to deactivate." 26 | } 27 | 28 | 29 | function st-prompt-deactivate { 30 | unset ST_PROMPT_COMMAND 31 | } 32 | 33 | 34 | st-prompt-activate 35 | 36 | if [[ $PROMPT_COMMAND != *"__st_prompt_command"* ]]; then 37 | export PROMPT_COMMAND="$PROMPT_COMMAND;__st_prompt_command" 38 | fi 39 | -------------------------------------------------------------------------------- /archive/system-testing/hack/lib.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | -------------------------------------------------------------------------------- /archive/system-testing/hack/run_workload_sleep.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ -z "$1" ]; then 4 | echo "Usage: $0 " 5 | exit 1 6 | fi 7 | 8 | run_id=$1 9 | kubectl -n $ST_NAMESPACE get tct $run_id > /dev/null \ 10 | || (echo "tct $run_id not found." && exit 1) 11 | kubectl -n $ST_NAMESPACE get tr workload > /dev/null \ 12 | || (echo "Node workload not found." && exit 1) 13 | 14 | cat < "$(workspaces.manifest.path)/test_resources.yaml" 25 | $(params.trr-manifest) 26 | EOF 27 | echo "generating resources manifest...done" 28 | cat "$(workspaces.manifest.path)/test_resources.yaml" 29 | 30 | echo "generating cluster manifest..." 31 | cat < "$(workspaces.manifest.path)/test_cluster.yaml" 32 | $(params.tct-manifest) 33 | EOF 34 | echo "generating cluster manifest...done" 35 | cat "$(workspaces.manifest.path)/test_cluster.yaml" 36 | - name: create-resources-and-cluster 37 | image: hub.pingcap.net/qa/kubetools:20200730 38 | script: | 39 | #!/usr/bin/env bash 40 | set -ex 41 | kubectl apply -f "$(workspaces.manifest.path)/test_resources.yaml" 42 | kubectl apply -f "$(workspaces.manifest.path)/test_cluster.yaml" 43 | - name: wait-until-cluster-ready 44 | image: hub.pingcap.net/qa/kubetools:20200730 45 | script: | 46 | #!/usr/bin/env bash 47 | while true 48 | do 49 | state=`kubectl get tct "$(params.tct-name)" -ojsonpath='{.status.state}' || echo unknown` 50 | echo "current resource state: $state" 51 | if [ "ready" = "$state" ]; then 52 | break 53 | fi 54 | echo "test resources isn't ready now, wait another 10s..." 55 | sleep 10 56 | done 57 | -------------------------------------------------------------------------------- /archive/system-testing/tasks/task--upgrade-test--env-ops.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1beta1 2 | kind: Task 3 | metadata: 4 | name: systesting-env-ops 5 | spec: 6 | params: 7 | - name: run-id 8 | type: string 9 | workspaces: 10 | - name: manifest 11 | mountPath: /adhoc-manifests 12 | steps: 13 | - name: generate-manifest 14 | image: hub.pingcap.net/qa/kubetools:20200730 15 | script: | 16 | #!/usr/bin/env bash 17 | echo "generating ops-resources manifest..." 18 | cat < "$(workspaces.manifest.path)/ops-resources.yaml" 19 | apiVersion: naglfar.pingcap.com/v1 20 | kind: TestClusterTopology 21 | metadata: 22 | name: $(params.run-id)-ops 23 | spec: 24 | resourceRequest: $(params.run-id) 25 | tidbCluster: 26 | global: 27 | deployDir: "/disks1/deploy" 28 | dataDir: "/disks1/data" 29 | version: 30 | version: v5.0.0-rc 31 | control: ctl 32 | tikv: 33 | - host: kv1 34 | - host: kv2 35 | - host: kv3 36 | - host: kv4 37 | tidb: 38 | - host: db-pd-ctl 39 | pd: 40 | - host: db-pd-ctl 41 | monitor: 42 | - host: db-pd-ctl 43 | grafana: 44 | - host: db-pd-ctl 45 | port: 9000 46 | EOF 47 | - name: apply-manifest 48 | image: hub.pingcap.net/qa/kubetools:20200730 49 | script: | 50 | #!/usr/bin/env bash 51 | kubectl apply -f "$(workspaces.manifest.path)/ops-resources.yaml" 52 | -------------------------------------------------------------------------------- /archive/system-testing/tasks/task--workload-compaction-filter-sysbench.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1beta1 2 | kind: Task 3 | metadata: 4 | name: st--workload-compaction-filter-sysbench 5 | spec: 6 | params: 7 | # test resouces info, including tidb cluster and workload 8 | - name: res-ns 9 | type: string 10 | 11 | # tidb cluster 12 | - name: tct-name 13 | type: string 14 | 15 | # workload info 16 | - name: trr-name 17 | type: string 18 | - name: workload-script-b64 19 | type: string 20 | default: "" 21 | 22 | workspaces: 23 | - name: manifest 24 | mountPath: /adhoc-manifests 25 | 26 | steps: 27 | - name: generate-run-manifest 28 | image: hub.pingcap.net/qa/kubetools:20200730 29 | script: | 30 | #!/usr/bin/env bash 31 | echo "generating worload manifest..." 32 | cat < "$(workspaces.manifest.path)/run.yaml" 33 | apiVersion: naglfar.pingcap.com/v1 34 | kind: TestWorkload 35 | metadata: 36 | name: "$(context.task.name)" 37 | spec: 38 | clusterTopologies: 39 | - name: "$(params.tct-name)" 40 | aliasName: cluster 41 | workloads: 42 | - name: workload-1 43 | dockerContainer: 44 | resourceRequest: 45 | name: "$(params.trr-name)" 46 | node: workload 47 | image: hub.pingcap.net/system-testing/compaction-filter-sysbench:210304 48 | imagePullPolicy: Always 49 | command: 50 | - /bin/bash 51 | - -c 52 | - | 53 | echo "Generating script.sh" 54 | echo "$(params.workload-script-b64)" | base64 -d > script.sh 55 | cat script.sh 56 | echo "Run script.sh" 57 | bash script.sh 58 | EOF 59 | echo "generating worload manifest...done" 60 | cat "$(workspaces.manifest.path)/run.yaml" 61 | - name: workload-tail-f 62 | image: hub.pingcap.net/qa/kubetools:20200730 63 | script: | 64 | #!/usr/bin/env bash 65 | set -xe 66 | shopt -s expand_aliases 67 | twName="$(context.task.name)" 68 | namespace="$(params.res-ns)" 69 | echo "ensure test-workload($twName) does not exists" 70 | alias kubectl="kubectl -n $namespace" 71 | exists=`kubectl get tw $twName >/dev/null && echo "yes" || echo "no"` 72 | if [ "$exists" == "yes" ]; then 73 | echo "test workload($twName) already exists, delete it." 74 | kubectl delete tw $twName 75 | fi 76 | 77 | echo "create test-workload($twName)..." 78 | kubectl apply -f "$(workspaces.manifest.path)/run.yaml" -n $namespace 79 | while true 80 | do 81 | state=`kubectl get tw "$twName" -ojsonpath='{.status.state}' || echo pending` 82 | echo "current workload state: $state" 83 | if [ "succeeded" == "$state" ]; then 84 | break 85 | elif [ "failed" == "$state" ]; then 86 | break 87 | elif [ "running" == "$state" ]; then 88 | break 89 | fi 90 | echo "workload wait another 5s" 91 | sleep 5 92 | done 93 | curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/PingCAP-QE/Naglfar/master/scripts/kubectl-naglfar-installer.sh | sh 94 | ~/.Naglfar/bin/naglfar logs $twName -n $namespace --follow 95 | kubectl delete tw $twName -n $namespace 96 | -------------------------------------------------------------------------------- /archive/system-testing/tasks/task--workload-tpcc--prepare.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1beta1 2 | kind: Task 3 | metadata: 4 | name: st--workload-tpcc--prepare 5 | spec: 6 | params: 7 | - name: workload-name 8 | type: string 9 | 10 | # dependencies 11 | - name: trr-name 12 | type: string 13 | - name: tct-name 14 | type: string 15 | 16 | # tpcc parameters 17 | - name: threads 18 | type: string 19 | - name: warehouses 20 | type: string 21 | workspaces: 22 | - name: manifest 23 | mountPath: /adhoc-manifests 24 | 25 | steps: 26 | - name: generate-restore-manifest 27 | image: hub.pingcap.net/qa/kubetools:20200730 28 | script: | 29 | #!/usr/bin/env bash 30 | echo "generating worload manifest..." 31 | cat < "$(workspaces.manifest.path)/restore.yaml" 32 | apiVersion: naglfar.pingcap.com/v1 33 | kind: TestWorkload 34 | metadata: 35 | name: "$(params.workload-name)" 36 | spec: 37 | clusterTopologies: 38 | - name: "$(params.tct-name)" 39 | aliasName: cluster 40 | workloads: 41 | - name: "$(params.workload-name)" 42 | dockerContainer: 43 | resourceRequest: 44 | name: "$(params.trr-name)" 45 | node: workload 46 | image: hub.pingcap.net/mahjonp/bench-toolset 47 | imagePullPolicy: Always 48 | command: 49 | - /bin/bash 50 | - -c 51 | - | 52 | set -x 53 | tidbHost=\`echo \$cluster_tidb | awk -F ":" '{print \$1}'\` 54 | tidbPort=\`echo \$cluster_tidb | awk -F ":" '{print \$2}'\` 55 | isTpccExist=\`mysql -uroot -P "\$tidbPort" -h "\$tidbHost" -e "use tpcc; show tables;" | grep customer > /dev/null && echo "yes" || echo "no"\` 56 | if [ "\$isTpccExist" == "no" ]; then 57 | mysql -uroot -P4000 -h "\$tidbHost" -e "create database tpcc;" 58 | echo "\`date\` prepare..." 59 | go-tpc -H \$tidbHost -P \$tidbPort -D tpcc tpcc prepare -T \$(params.threads) --warehouses \$(params.warehouses) 60 | echo "\`date\` prepare...done" 61 | else 62 | echo "database tpcc already exists" 63 | fi 64 | EOF 65 | echo "generating worload manifest...done" 66 | cat "$(workspaces.manifest.path)/restore.yaml" 67 | - name: workload-tail-f 68 | image: hub.pingcap.net/qa/kubetools:20200730 69 | script: | 70 | #!/usr/bin/env bash 71 | set -xe 72 | twName="$(params.workload-name)" 73 | echo "ensure test-workload($twName) does not exists" 74 | exists=`kubectl get tw $twName >/dev/null && echo "yes" || echo "no"` 75 | if [ "$exists" == "yes" ]; then 76 | echo "test workload($twName) already exists, delete it." 77 | kubectl delete tw $twName 78 | fi 79 | 80 | echo "create test-workload($twName)..." 81 | kubectl apply -f "$(workspaces.manifest.path)/restore.yaml" 82 | while true 83 | do 84 | state=`kubectl get tw "$twName" -ojsonpath='{.status.state}' || echo pending` 85 | echo "current workload state: $state" 86 | if [ "succeeded" == "$state" ]; then 87 | break 88 | elif [ "failed" == "$state" ]; then 89 | break 90 | elif [ "running" == "$state" ]; then 91 | break 92 | fi 93 | echo "workload wait another 5s" 94 | sleep 5 95 | done 96 | curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/PingCAP-QE/Naglfar/master/scripts/kubectl-naglfar-installer.sh | sh 97 | ~/.Naglfar/bin/naglfar logs $twName -n $(context.taskRun.namespace) --follow 98 | kubectl delete tw $twName 99 | -------------------------------------------------------------------------------- /archive/system-testing/tasks/task--workload-tpcc--restore.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1beta1 2 | kind: Task 3 | metadata: 4 | name: st--workload-tpcc--restore 5 | spec: 6 | 7 | params: 8 | - name: run-id 9 | type: string 10 | - name: tpcc-data-uri 11 | type: string 12 | default: "s3://benchmark/tpcc-10-nightly" 13 | workspaces: 14 | - name: manifest 15 | mountPath: /adhoc-manifests 16 | 17 | steps: 18 | - name: generate-restore-manifest 19 | image: hub.pingcap.net/qa/kubetools:20200730 20 | script: | 21 | #!/usr/bin/env bash 22 | echo "generating worload manifest..." 23 | cat < "$(workspaces.manifest.path)/restore.yaml" 24 | apiVersion: naglfar.pingcap.com/v1 25 | kind: TestWorkload 26 | metadata: 27 | name: "$(params.run-id)" 28 | spec: 29 | clusterTopologies: 30 | - name: "$(params.run-id)" 31 | aliasName: cluster 32 | workloads: 33 | - name: "$(params.run-id)" 34 | dockerContainer: 35 | resourceRequest: 36 | name: "$(params.run-id)" 37 | node: workload 38 | image: hub.pingcap.net/mahjonp/bench-toolset 39 | imagePullPolicy: Always 40 | command: 41 | - /bin/bash 42 | - -c 43 | - | 44 | set -x 45 | export AWS_ACCESS_KEY_ID=minioadmin 46 | export AWS_SECRET_ACCESS_KEY=minioadmin 47 | tidbHost=\`echo \$cluster_tidb0 | awk -F ":" '{print \$1}'\` 48 | pdHost=\`echo \$cluster_pd0 | awk -F ":" '{print \$1}'\` 49 | isTpccExist=\`mysql -uroot -P4000 -h "\$tidbHost" -e "use test; show tables;" | grep customer > /dev/null && echo "yes" || echo "no"\` 50 | if [ "\$isTpccExist" == "no" ]; then 51 | # mysql -uroot -P4000 -h "\$tidbHost" -e "create database tpcc;" 52 | echo "\`date\` restore..." 53 | 54 | # go-tpc -H \$tidbHost -P4000 -D tpcc tpcc prepare --warehouses 10 55 | 56 | # sql="restore database * from '$(params.tpcc-data-uri)/?endpoint=http://minio.pingcap.net:9000&access-key=minioadmin&secret-access-key=minioadmin&force-path-style=true';" 57 | # mysql -uroot -h \$tidbHost -P4000 -e "\$sql" 58 | 59 | br -V 60 | br restore full --pd="\$pdHost:2379" --storage "$(params.tpcc-data-uri)/" \ 61 | --s3.endpoint http://minio.pingcap.net:9000 --send-credentials-to-tikv=true 62 | 63 | ls /tmp/br.log* 64 | cat /tmp/br.log* 65 | 66 | echo "\`date\` restore...done" 67 | else 68 | echo "database tpcc already exists" 69 | fi 70 | 71 | EOF 72 | echo "generating worload manifest...done" 73 | cat "$(workspaces.manifest.path)/restore.yaml" 74 | - name: workload-tail-f 75 | image: hub.pingcap.net/qa/kubetools:20200730 76 | script: | 77 | #!/usr/bin/env bash 78 | set -xe 79 | twName="$(params.run-id)" 80 | echo "ensure test-workload($twName) does not exists" 81 | exists=`kubectl get tw $twName >/dev/null && echo "yes" || echo "no"` 82 | if [ "$exists" == "yes" ]; then 83 | echo "test workload($twName) already exists, delete it." 84 | kubectl delete tw $twName 85 | fi 86 | 87 | echo "create test-workload($twName)..." 88 | kubectl apply -f "$(workspaces.manifest.path)/restore.yaml" 89 | while true 90 | do 91 | state=`kubectl get tw "$twName" -ojsonpath='{.status.state}' || echo pending` 92 | echo "current workload state: $state" 93 | if [ "succeeded" == "$state" ]; then 94 | break 95 | elif [ "failed" == "$state" ]; then 96 | break 97 | elif [ "running" == "$state" ]; then 98 | break 99 | fi 100 | echo "workload wait another 5s" 101 | sleep 5 102 | done 103 | curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/PingCAP-QE/Naglfar/master/scripts/kubectl-naglfar-installer.sh | sh 104 | ~/.Naglfar/bin/naglfar logs $twName -n $(context.taskRun.namespace) --follow 105 | kubectl delete tw $twName 106 | -------------------------------------------------------------------------------- /archive/system-testing/tasks/task-env-ops.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1beta1 2 | kind: Task 3 | metadata: 4 | name: systesting-env-ops 5 | spec: 6 | params: 7 | - name: run-id 8 | type: string 9 | workspaces: 10 | - name: manifest 11 | mountPath: /adhoc-manifests 12 | steps: 13 | - name: generate-manifest 14 | image: hub.pingcap.net/qa/kubetools:20200730 15 | script: | 16 | #!/usr/bin/env bash 17 | echo "generating ops-resources manifest..." 18 | cat < "$(workspaces.manifest.path)/ops-resources.yaml" 19 | apiVersion: naglfar.pingcap.com/v1 20 | kind: TestClusterTopology 21 | metadata: 22 | name: $(params.run-id)-ops 23 | spec: 24 | resourceRequest: $(params.run-id) 25 | tidbCluster: 26 | global: 27 | deployDir: "/disks1/deploy" 28 | dataDir: "/disks1/data" 29 | version: 30 | version: nightly 31 | control: ctl 32 | tikv: 33 | - host: kv1 34 | - host: kv2 35 | - host: kv3 36 | - host: kv4 37 | tidb: 38 | - host: db 39 | pd: 40 | - host: pd 41 | monitor: 42 | - host: ctl 43 | grafana: 44 | - host: ctl 45 | port: 9000 46 | EOF 47 | - name: apply-manifest 48 | image: hub.pingcap.net/qa/kubetools:20200730 49 | script: | 50 | #!/usr/bin/env bash 51 | kubectl apply -f "$(workspaces.manifest.path)/ops-resources.yaml" 52 | -------------------------------------------------------------------------------- /archive/system-testing/tasks/task-sleep.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1beta1 2 | kind: Task 3 | metadata: 4 | name: systesting-sleep 5 | spec: 6 | params: 7 | - name: duration 8 | type: string 9 | default: "300" 10 | steps: 11 | - name: sleep-5m 12 | image: hub.pingcap.net/qa/kubetools:20200730 13 | script: | 14 | #!/usr/bin/env bash 15 | echo "start sleep for $(params.duration)..." 16 | sleep $(params.duration) 17 | echo "sleep...done" 18 | -------------------------------------------------------------------------------- /archive/system-testing/tasks/task-teardown.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1beta1 2 | kind: Task 3 | metadata: 4 | name: systesting-teardown 5 | 6 | spec: 7 | params: 8 | - name: run-id 9 | type: string 10 | steps: 11 | - name: teardown-all 12 | image: hub.pingcap.net/qa/kubetools:20200730 13 | script: | 14 | #!/usr/bin/env bash 15 | kubectl delete tct $(params.run-id) 16 | kubectl delete tw $(params.run-id) 17 | kubectl delete trr $(params.run-id) 18 | -------------------------------------------------------------------------------- /archive/system-testing/tasks/task-workload-run.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1beta1 2 | kind: Task 3 | metadata: 4 | name: st--workload-tpcc--run 5 | spec: 6 | params: 7 | - name: run-id 8 | type: string 9 | - name: db-name 10 | type: string 11 | default: test 12 | workspaces: 13 | - name: manifest 14 | mountPath: /adhoc-manifests 15 | steps: 16 | - name: generate-run-manifest 17 | image: hub.pingcap.net/qa/kubetools:20200730 18 | script: | 19 | #!/usr/bin/env bash 20 | echo "generating worload manifest..." 21 | file="$(workspaces.manifest.path)/run.yaml" 22 | cat < $file 23 | apiVersion: naglfar.pingcap.com/v1 24 | kind: TestWorkload 25 | metadata: 26 | name: "$(params.run-id)-workload-run" 27 | spec: 28 | clusterTopologies: 29 | - name: "$(params.run-id)" 30 | aliasName: cluster 31 | workloads: 32 | - name: "$(params.run-id)" 33 | dockerContainer: 34 | resourceRequest: 35 | name: "$(params.run-id)" 36 | node: workload 37 | image: hub.pingcap.net/mahjonp/bench-toolset 38 | imagePullPolicy: IfNotPresent 39 | command: 40 | - /bin/bash 41 | - -c 42 | - | 43 | set -x 44 | tidbHost=\`echo \$cluster_tidb | awk -F ":" '{print \$1}'\` 45 | tidbPort=\`echo \$cluster_tidb | awk -F ":" '{print \$2}'\` 46 | go-tpc -H \$tidbHost -P \$tidbPort -D \$(params.db-name) tpcc run --time 300m 47 | EOF 48 | echo "generating workload-run manifest...done" 49 | cat "$file" 50 | - name: workload-tail-f 51 | image: hub.pingcap.net/qa/kubetools:20200730 52 | script: | 53 | #!/usr/bin/env bash 54 | set -xe 55 | twName="$(params.run-id)-workload-run" 56 | echo "ensure test-workload($twName) does not exists" 57 | exists=`kubectl get tw $twName >/dev/null && echo "yes" || echo "no"` 58 | if [ "$exists" == "yes" ]; then 59 | echo "test workload($twName) already exists, delete it." 60 | kubectl delete tw $twName 61 | fi 62 | 63 | echo "create test-workload($twName)..." 64 | kubectl apply -f "$(workspaces.manifest.path)/run.yaml" 65 | while true 66 | do 67 | state=`kubectl get tw "$twName" -ojsonpath='{.status.state}' || echo pending` 68 | echo "current workload state: $state" 69 | if [ "succeeded" == "$state" ]; then 70 | break 71 | elif [ "failed" == "$state" ]; then 72 | break 73 | elif [ "running" == "$state" ]; then 74 | break 75 | fi 76 | echo "workload wait another 5s" 77 | sleep 5 78 | done 79 | curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/PingCAP-QE/Naglfar/master/scripts/kubectl-naglfar-installer.sh | sh 80 | ~/.Naglfar/bin/naglfar logs $twName -n $(context.taskRun.namespace) --follow 81 | kubectl delete tw $twName 82 | -------------------------------------------------------------------------------- /archive/system-testing/workload_script/compaction_filter_sysbench.sh: -------------------------------------------------------------------------------- 1 | set -x 2 | echo "hello, tmp.sh" 3 | tidbHost=`echo $cluster_tidb0 | awk -F ":" '{print $1}'` 4 | tidbPort=`echo $cluster_tidb0 | awk -F ":" '{print $2}'` 5 | isDBExist=`mysql -uroot -P "$tidbPort" -h "$tidbHost" -e "use sbtest; show tables;" | grep sbtest > /dev/null && echo "yes" || echo "no"` 6 | if [ "$isDBExist" == "no" ]; then 7 | mysql -uroot -P4000 -h "$tidbHost" -e "create database sbtest;" 8 | echo "`date` prepare..." 9 | sysbench --mysql-host="$tidbHost" --mysql-port="$tidbPort" --mysql-user=root --tables=16 --table-size=10 --threads=8 --time=60 updates prepare 10 | echo "`date` prepare...done" 11 | else 12 | echo "database sysbench already exists" 13 | fi 14 | sysbench --mysql-host="$tidbHost" --mysql-port="$tidbPort" --mysql-user=root --tables=16 --table-size=10 --threads=8 --time=60 updates run 15 | -------------------------------------------------------------------------------- /archive/tilo/.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | pytest_cache/ 3 | __pycache__/ 4 | .venv -------------------------------------------------------------------------------- /archive/tilo/cases/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cosven/tidb-testing/8e1ae96a45a85de11d905801bbf7914dc27c7e98/archive/tilo/cases/__init__.py -------------------------------------------------------------------------------- /archive/tilo/cases/test_7386.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import structlog 4 | 5 | from tilo.clients import PdClient, PlaygroundClient, new_sql_conn 6 | from tilo.clients import wait_till_true 7 | 8 | log = structlog.get_logger() 9 | 10 | 11 | def test_issue_7386(): 12 | log.msg('test 7386') 13 | 14 | pd = PdClient() 15 | pg = PlaygroundClient() 16 | conn = new_sql_conn() 17 | 18 | # try to disable auto region-merge 19 | pd.config_set('disable-remove-extra-replica', 'true') 20 | pd.config_set('patrol-region-interval', '50000ms') 21 | pd.apply_placement_rule({'group_id': 'pd', 22 | 'id': 'one-learner', 23 | 'start_key': '', 24 | 'end_key': '', 25 | 'role': 'learner', 26 | 'count': 1}) 27 | 28 | with conn.cursor() as cur: 29 | cur.execute("drop table if exists t;") 30 | cur.execute("create table t (a int primary key);") 31 | cur.execute("split table t between (0) and (10000) regions 2;") 32 | cur.execute("show table t regions;") 33 | rows = cur.fetchall() 34 | assert len(rows) == 2 35 | region_id = rows[1]['REGION_ID'] 36 | target_region_id = rows[0]['REGION_ID'] 37 | log.msg(f'region id: {region_id}, target region id: {target_region_id}') 38 | 39 | # the original peers count is 3 40 | region = pd.get_region(region_id) 41 | default_peers_count = len(region['peers']) 42 | log.msg(f'default peer count {default_peers_count}') 43 | 44 | # get a free store to add learner 45 | stores = pd.list_stores() 46 | store_ids = [store['store']['id'] for store in stores 47 | if 'labels' not in store['store']] 48 | log.msg(store_ids) 49 | 50 | target_store = None 51 | for peer in region['peers']: 52 | if peer.get('is_learner') is True: 53 | target_store = peer['store_id'] 54 | 55 | if target_store is None: 56 | raise Exception('no learner found') 57 | # used_stores = [peer['store_id'] for peer in region['peers']] 58 | # target_store = [store_id for store_id in store_ids 59 | # if store_id not in used_stores][0] 60 | 61 | # add learner 62 | # log.msg('add learner and peer for regions') 63 | # ok = pd.add_learner(region_id, target_store) 64 | # assert ok, 'add learner failed' 65 | # ok = pd.add_peer(target_region_id, target_store) 66 | # assert ok, 'add peer failed' 67 | 68 | # wait till learner is added 69 | # log.msg('wait till region peers count == 2') 70 | # wait_till_true()(lambda: len(pd.get_region(region_id)['peers']) == default_peers_count + 1) 71 | # wait_till_true()(lambda: len(pd.get_region(target_region_id)['peers']) == default_peers_count + 1) 72 | 73 | log.msg('sleep for 2s to wait learner apply snapshot') 74 | time.sleep(2) 75 | 76 | # partition target store 77 | target_store_port = None 78 | target_store_pid = None 79 | for store in stores: 80 | store_meta = store['store'] 81 | if store_meta['id'] == target_store: 82 | # port will be the proxy port, for example: 30160 83 | port = store_meta['address'].split(':')[1] 84 | # HACK: we convert 30160 to 20160 85 | target_store_port = '2' + port[1:] 86 | instances = pg.list_instances() 87 | for inst in instances: 88 | if inst.port == target_store_port: 89 | target_store_pid = inst.pid 90 | pg.partition(target_store_pid) 91 | 92 | pd.merge_region(region_id, target_region_id) 93 | 94 | input(f'the region id is {region_id}: ') 95 | 96 | pg.unpartition(target_store_pid) 97 | conn.close() 98 | 99 | # TODO: use tikv-ctl to check the region 100 | -------------------------------------------------------------------------------- /archive/tilo/cases/test_7444.py: -------------------------------------------------------------------------------- 1 | from tilo.clients import PdClient, PlaygroundClient, new_sql_conn 2 | 3 | 4 | def test_pull_7444(): 5 | pd = PdClient() 6 | conn = new_sql_conn() 7 | pg = PlaygroundClient() 8 | 9 | # try to disable auto region-merge 10 | pd.config_set('disable-remove-extra-replica', 'true') 11 | pd.config_set('patrol-region-interval', '50000ms') 12 | 13 | with conn.cursor() as cur: 14 | cur.execute("drop table if exists t;") 15 | cur.execute("create table t (a int primary key);") 16 | cur.execute("split table t between (0) and (10000) regions 3;") 17 | cur.execute("show table t regions;") 18 | rows = cur.fetchall() 19 | 20 | regions = [] 21 | for row in rows: 22 | regions.append((row['REGION_ID'], row['START_KEY'])) 23 | regions = sorted(regions, key=lambda v: v[1]) 24 | 25 | # insert data into three regions 26 | cur.execute("insert into t values (1111), (4444), (7777);") 27 | cur.execute("select * from t;") 28 | print(cur.fetchall()) 29 | 30 | print('three regions have been created') 31 | 32 | assert len(regions) == 3 33 | left, center, right = regions[0][0], regions[1][0], regions[2][0] 34 | 35 | instances = pg.list_instances() 36 | stores = pd.list_stores() 37 | tikvs = {} # {port: store_id} 38 | for store in stores: 39 | store_meta = store['store'] 40 | # port will be the proxy port, for example: 30160 41 | port = store_meta['address'].split(':')[1] 42 | # HACK: we convert 30160 to 20160 43 | port = '2' + port[1:] 44 | tikvs[port] = store_meta['id'] 45 | 46 | # [(store_id, pid, port), ] 47 | pg_stores = [] 48 | for instance in instances: 49 | port = str(instance.port) 50 | store_id = tikvs.get(port) 51 | if store_id is not None: 52 | pg_stores.append((store_id, instance.pid, port)) 53 | 54 | print(pg_stores) 55 | print('transfer leader', left, pg_stores[0][0]) 56 | print('transfer leader', right, pg_stores[0][0]) 57 | print('transfer leader', center, pg_stores[1][0]) 58 | # transfer left&right to pg_stores 0 59 | pd.transfer_leader(left, pg_stores[0][0]) 60 | pd.transfer_leader(right, pg_stores[0][0]) 61 | # transfer center to pg_stores 1 62 | pd.transfer_leader(center, pg_stores[1][0]) 63 | 64 | # wait for the leader to transfere 65 | time.sleep(1) 66 | log.msg('wait for leader transfer') 67 | wait_till_true()(lambda: pd.get_region(left)['leader']['store_id'] == pg_stores[0][0]) 68 | wait_till_true()(lambda: pd.get_region(right)['leader']['store_id'] == pg_stores[0][0]) 69 | wait_till_true()(lambda: pd.get_region(center)['leader']['store_id'] == pg_stores[1][0]) 70 | print('leader transfer finished') 71 | # partition pg_stores 2 72 | log.msg(f'partition store {pg_stores[2][0]}') 73 | assert pg.partition(pg_stores[2][1]) is True 74 | 75 | # merge left&right to center, and wait 76 | print('merge region begin') 77 | while True: 78 | if pd.merge_region(left, center): 79 | break 80 | time.sleep(1) 81 | while True: 82 | if pd.merge_region(right, center): 83 | break 84 | time.sleep(1) 85 | print('merge region finished') 86 | 87 | with conn.cursor() as cur: 88 | while True: 89 | time.sleep(1) 90 | cur.execute("show table t regions") 91 | rows = cur.fetchall() 92 | 93 | # left and right region are merged into center region 94 | if len(rows) == 1: 95 | assert rows[0]['REGION_ID'] == center 96 | break 97 | 98 | # insert data to center region 99 | with conn.cursor() as cur: 100 | cur.execute("insert into t values (2222), (5555), (8888);") 101 | 102 | # manual check log 103 | print('please check log', pg_stores[2], center) 104 | input() 105 | 106 | # recover pg_stores 2 network 107 | log.msg(f'unpartition store {pg_stores[2][0]}') 108 | assert pg.unpartition(pg_stores[2][1]) is True 109 | assert pg.unpartition(pg_stores[1][1]) is True 110 | 111 | pd.transfer_leader(center, pg_stores[2][0]) 112 | wait_till_true()(lambda: pd.get_region(center)['leader']['store_id'] == pg_stores[2][0]) 113 | 114 | with conn.cursor() as cur: 115 | cur.execute("insert into t values (3333), (6666), (9999);") 116 | cur.execute("select * from t;") 117 | print(cur.fetchall()) 118 | -------------------------------------------------------------------------------- /archive/tilo/cases/test_real_7386.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import structlog 4 | 5 | from tilo.clients import PdClient, PlaygroundClient, new_sql_conn 6 | from tilo.clients import wait_till_true 7 | 8 | log = structlog.get_logger() 9 | 10 | 11 | def test_real_issue_7386(): 12 | log.msg('test real 7386') 13 | 14 | pd = PdClient() 15 | pg = PlaygroundClient() 16 | conn = new_sql_conn() 17 | 18 | # try to disable auto region-merge 19 | pd.config_set('disable-remove-extra-replica', 'true') 20 | pd.config_set('patrol-region-interval', '50000ms') 21 | 22 | with conn.cursor() as cur: 23 | cur.execute("drop table if exists t;") 24 | cur.execute("create table t (a int primary key);") 25 | cur.execute("split table t between (0) and (10000) regions 2;") 26 | cur.execute("show table t regions;") 27 | rows = cur.fetchall() 28 | assert len(rows) == 2 29 | region_id = rows[1]['REGION_ID'] 30 | target_region_id = rows[0]['REGION_ID'] 31 | log.msg(f'region id: {region_id}, target region id: {target_region_id}') 32 | 33 | region = pd.get_region(region_id) 34 | default_peers_count = len(region['peers']) 35 | log.msg(f'default peer count {default_peers_count}') 36 | 37 | # get a free store to add learner 38 | stores = pd.list_stores() 39 | store_ids = [store['store']['id'] for store in stores 40 | if 'labels' not in store['store']] 41 | used_stores = [peer['store_id'] for peer in region['peers']] 42 | target_store = [store_id for store_id in store_ids 43 | if store_id not in used_stores][0] 44 | log.msg(f'{store_ids}, {target_store}') 45 | 46 | # add learner 47 | log.msg('add learner and peer for regions') 48 | ok = pd.add_learner(region_id, target_store) 49 | assert ok, 'add learner failed' 50 | #ok = pd.add_peer(target_region_id, target_store) 51 | #assert ok, 'add peer failed' 52 | 53 | # wait till learner is added 54 | log.msg('wait till region peers count == 2') 55 | wait_till_true()(lambda: len(pd.get_region(region_id)['peers']) == default_peers_count + 1) 56 | # wait_till_true()(lambda: len(pd.get_region(target_region_id)['peers']) == default_peers_count + 1) 57 | 58 | log.msg('sleep for 2s to wait learner apply snapshot') 59 | time.sleep(2) 60 | 61 | # partition target store 62 | target_store_port = None 63 | target_store_pid = None 64 | for store in stores: 65 | store_meta = store['store'] 66 | if store_meta['id'] == target_store: 67 | # port will be the proxy port, for example: 30160 68 | port = store_meta['address'].split(':')[1] 69 | # HACK: we convert 30160 to 20160 70 | target_store_port = '2' + port[1:] 71 | instances = pg.list_instances() 72 | for inst in instances: 73 | if inst.port == target_store_port: 74 | target_store_pid = inst.pid 75 | pg.partition(target_store_pid) 76 | 77 | pd.remove_peer(region_id, target_store) 78 | log.msg('sleep for 1s to wait remove peer') 79 | time.sleep(1) 80 | 81 | pd.merge_region(region_id, target_region_id) 82 | 83 | input(f'the region id is {region_id}: ') 84 | 85 | pg.unpartition(target_store_pid) 86 | conn.close() 87 | 88 | # TODO: use tikv-ctl to check the region 89 | -------------------------------------------------------------------------------- /archive/tilo/poetry.lock: -------------------------------------------------------------------------------- 1 | [[package]] 2 | name = "certifi" 3 | version = "2020.11.8" 4 | description = "Python package for providing Mozilla's CA Bundle." 5 | category = "main" 6 | optional = false 7 | python-versions = "*" 8 | 9 | [[package]] 10 | name = "chardet" 11 | version = "3.0.4" 12 | description = "Universal encoding detector for Python 2 and 3" 13 | category = "main" 14 | optional = false 15 | python-versions = "*" 16 | 17 | [[package]] 18 | name = "idna" 19 | version = "2.10" 20 | description = "Internationalized Domain Names in Applications (IDNA)" 21 | category = "main" 22 | optional = false 23 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 24 | 25 | [[package]] 26 | name = "pymysql" 27 | version = "0.10.1" 28 | description = "Pure Python MySQL Driver" 29 | category = "main" 30 | optional = false 31 | python-versions = "*" 32 | 33 | [package.extras] 34 | ed25519 = ["PyNaCl (>=1.4.0)"] 35 | rsa = ["cryptography"] 36 | 37 | [[package]] 38 | name = "requests" 39 | version = "2.25.0" 40 | description = "Python HTTP for Humans." 41 | category = "main" 42 | optional = false 43 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" 44 | 45 | [package.dependencies] 46 | certifi = ">=2017.4.17" 47 | chardet = ">=3.0.2,<4" 48 | idna = ">=2.5,<3" 49 | urllib3 = ">=1.21.1,<1.27" 50 | 51 | [package.extras] 52 | security = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)"] 53 | socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"] 54 | 55 | [[package]] 56 | name = "six" 57 | version = "1.15.0" 58 | description = "Python 2 and 3 compatibility utilities" 59 | category = "main" 60 | optional = false 61 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" 62 | 63 | [[package]] 64 | name = "structlog" 65 | version = "20.1.0" 66 | description = "Structured Logging for Python" 67 | category = "main" 68 | optional = false 69 | python-versions = "*" 70 | 71 | [package.dependencies] 72 | six = "*" 73 | 74 | [package.extras] 75 | azure-pipelines = ["coverage", "freezegun (>=0.2.8)", "pretend", "pytest (>=3.3.0)", "simplejson", "pytest-azurepipelines", "python-rapidjson", "pytest-asyncio"] 76 | dev = ["coverage", "freezegun (>=0.2.8)", "pretend", "pytest (>=3.3.0)", "simplejson", "sphinx", "twisted", "pre-commit", "python-rapidjson", "pytest-asyncio"] 77 | docs = ["sphinx", "twisted"] 78 | tests = ["coverage", "freezegun (>=0.2.8)", "pretend", "pytest (>=3.3.0)", "simplejson", "python-rapidjson", "pytest-asyncio"] 79 | 80 | [[package]] 81 | name = "urllib3" 82 | version = "1.26.2" 83 | description = "HTTP library with thread-safe connection pooling, file post, and more." 84 | category = "main" 85 | optional = false 86 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" 87 | 88 | [package.extras] 89 | brotli = ["brotlipy (>=0.6.0)"] 90 | secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"] 91 | socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] 92 | 93 | [metadata] 94 | lock-version = "1.1" 95 | python-versions = "^3.6" 96 | content-hash = "222808040ba3f5ab0a717f91a996992df06ced848fa3262a0031598c0788c62e" 97 | 98 | [metadata.files] 99 | certifi = [ 100 | {file = "certifi-2020.11.8-py2.py3-none-any.whl", hash = "sha256:1f422849db327d534e3d0c5f02a263458c3955ec0aae4ff09b95f195c59f4edd"}, 101 | {file = "certifi-2020.11.8.tar.gz", hash = "sha256:f05def092c44fbf25834a51509ef6e631dc19765ab8a57b4e7ab85531f0a9cf4"}, 102 | ] 103 | chardet = [ 104 | {file = "chardet-3.0.4-py2.py3-none-any.whl", hash = "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"}, 105 | {file = "chardet-3.0.4.tar.gz", hash = "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae"}, 106 | ] 107 | idna = [ 108 | {file = "idna-2.10-py2.py3-none-any.whl", hash = "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0"}, 109 | {file = "idna-2.10.tar.gz", hash = "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6"}, 110 | ] 111 | pymysql = [ 112 | {file = "PyMySQL-0.10.1-py2.py3-none-any.whl", hash = "sha256:44f47128dda8676e021c8d2dbb49a82be9e4ab158b9f03e897152a3a287c69ea"}, 113 | {file = "PyMySQL-0.10.1.tar.gz", hash = "sha256:263040d2779a3b84930f7ac9da5132be0fefcd6f453a885756656103f8ee1fdd"}, 114 | ] 115 | requests = [ 116 | {file = "requests-2.25.0-py2.py3-none-any.whl", hash = "sha256:e786fa28d8c9154e6a4de5d46a1d921b8749f8b74e28bde23768e5e16eece998"}, 117 | {file = "requests-2.25.0.tar.gz", hash = "sha256:7f1a0b932f4a60a1a65caa4263921bb7d9ee911957e0ae4a23a6dd08185ad5f8"}, 118 | ] 119 | six = [ 120 | {file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"}, 121 | {file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"}, 122 | ] 123 | structlog = [ 124 | {file = "structlog-20.1.0-py2.py3-none-any.whl", hash = "sha256:8a672be150547a93d90a7d74229a29e765be05bd156a35cdcc527ebf68e9af92"}, 125 | {file = "structlog-20.1.0.tar.gz", hash = "sha256:7a48375db6274ed1d0ae6123c486472aa1d0890b08d314d2b016f3aa7f35990b"}, 126 | ] 127 | urllib3 = [ 128 | {file = "urllib3-1.26.2-py2.py3-none-any.whl", hash = "sha256:d8ff90d979214d7b4f8ce956e80f4028fc6860e4431f731ea4a8c08f23f99473"}, 129 | {file = "urllib3-1.26.2.tar.gz", hash = "sha256:19188f96923873c92ccb987120ec4acaa12f0461fa9ce5d3d0772bc965a39e08"}, 130 | ] 131 | -------------------------------------------------------------------------------- /archive/tilo/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "tilo" 3 | version = "0.1.0" 4 | description = "" 5 | authors = ["cosven "] 6 | 7 | [tool.poetry.dependencies] 8 | python = "^3.6" 9 | requests = "^2.25.0" 10 | structlog = "^20.1.0" 11 | PyMySQL = "^0.10.1" 12 | 13 | [tool.poetry.dev-dependencies] 14 | 15 | [build-system] 16 | requires = ["poetry-core>=1.0.0"] 17 | build-backend = "poetry.core.masonry.api" 18 | -------------------------------------------------------------------------------- /archive/tilo/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cosven/tidb-testing/8e1ae96a45a85de11d905801bbf7914dc27c7e98/archive/tilo/tests/__init__.py -------------------------------------------------------------------------------- /archive/tilo/tests/codec/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cosven/tidb-testing/8e1ae96a45a85de11d905801bbf7914dc27c7e98/archive/tilo/tests/codec/__init__.py -------------------------------------------------------------------------------- /archive/tilo/tests/codec/test_tikv.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from tilo.codec.tikv import decode_write_key 4 | 5 | 6 | @pytest.fixture 7 | def key1(): 8 | return (b"t\x80\x00\x00\x00\x00\x00\x00\xff\x0f_" 9 | b"r\x80\x00\x00\x00\x00\xff\x00\x02\n\x00" 10 | b"\x00\x00\x00\x00\xfa\xfa'\xe3\x94\xb1\xf7\xff\xf9") 11 | 12 | 13 | def test_decode_write_key(key1): 14 | user_key, ts = decode_write_key(key1) 15 | print(ts >> 18) 16 | print(ts - (ts >> 18 << 18)) 17 | -------------------------------------------------------------------------------- /archive/tilo/tilo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cosven/tidb-testing/8e1ae96a45a85de11d905801bbf7914dc27c7e98/archive/tilo/tilo/__init__.py -------------------------------------------------------------------------------- /archive/tilo/tilo/clients.py: -------------------------------------------------------------------------------- 1 | import time 2 | from collections import namedtuple 3 | from urllib.parse import urlparse 4 | 5 | import requests 6 | import pymysql 7 | import structlog 8 | 9 | 10 | log = structlog.get_logger() 11 | 12 | 13 | def new_sql_conn(db_uri='mysql://root@127.0.0.1:4000'): 14 | """ 15 | 1. random choose one connection info 16 | 2. create a connection and use test database 17 | """ 18 | result = urlparse(db_uri) 19 | return pymysql.connect(host=result.hostname, 20 | port=result.port, 21 | user=result.username, 22 | password=result.password or '', 23 | 24 | db='test', 25 | charset='utf8mb4', 26 | cursorclass=pymysql.cursors.DictCursor) 27 | 28 | 29 | def wait_till_true(interval=1): 30 | def wrapper(func, *args, **kwargs): 31 | while True: 32 | if func(*args, **kwargs) is True: 33 | break 34 | time.sleep(interval) 35 | return wrapper 36 | 37 | 38 | class PdClient: 39 | def __init__(self, url='http://127.0.0.1:2379'): 40 | self._url = url 41 | self._pd_api = f'{url}/pd/api/v1' 42 | 43 | def add_learner(self, region_id, store_id): 44 | js = { 45 | 'name': 'add-learner', 46 | 'region_id': region_id, 47 | 'store_id': store_id, 48 | } 49 | return self._add_operator(js) 50 | 51 | def add_peer(self, region_id, store_id): 52 | js = { 53 | 'name': 'add-peer', 54 | 'region_id': region_id, 55 | 'store_id': store_id, 56 | } 57 | return self._add_operator(js) 58 | 59 | def remove_peer(self, region_id, store_id): 60 | js = { 61 | 'name': 'remove-peer', 62 | 'region_id': region_id, 63 | 'store_id': store_id, 64 | } 65 | return self._add_operator(js) 66 | 67 | def merge_region(self, source_region_id, target_region_id): 68 | js = { 69 | 'name': 'merge-region', 70 | 'source_region_id': source_region_id, 71 | 'target_region_id': target_region_id, 72 | } 73 | return self._add_operator(js) 74 | 75 | def transfer_leader(self, region_id, store_id): 76 | js = { 77 | 'name': 'transfer-leader', 78 | 'region_id': region_id, 79 | 'to_store_id': store_id, 80 | } 81 | return self._add_operator(js) 82 | 83 | def _add_operator(self, js): 84 | log.msg('add opereator', **js) 85 | resp = requests.post(f'{self._pd_api}/operators', json=js) 86 | if resp.status_code == 200: 87 | return True 88 | log.debug(f'failed: {resp.text}', **js) 89 | return False 90 | 91 | def config_set(self, key, value): 92 | js = { 93 | key: value 94 | } 95 | resp = requests.post(f'{self._pd_api}/config', json=js) 96 | return resp.status_code == 200 97 | 98 | def get_region(self, region_id): 99 | resp = requests.get(f'{self._pd_api}/region/id/{region_id}') 100 | js = resp.json() 101 | return js 102 | 103 | def list_stores(self): 104 | resp = requests.get(f'{self._pd_api}/stores') 105 | js = resp.json() 106 | return js['stores'] 107 | 108 | def apply_placement_rule(self, js): 109 | log.msg(f'apply placement rule', rule=js) 110 | resp = requests.post(f'{self._pd_api}/config/rule', json=js) 111 | if resp.status_code == 200: 112 | return True 113 | log.debug(f'failed: {resp.text}') 114 | return False 115 | 116 | 117 | PlaygroundInstance = namedtuple('PlaygroundInstance', ['pid', 'role', 'uptime', 'port']) 118 | 119 | 120 | class PlaygroundClient: 121 | def __init__(self, url='http://127.0.0.1:9527'): 122 | self._url = url 123 | 124 | def list_instances(self): 125 | resp = self._send_command('display') 126 | instances = [] 127 | for line in resp.text.split('\n')[2:]: 128 | parts = line.split() 129 | if len(parts) == 4: 130 | instance = PlaygroundInstance(*parts) 131 | instances.append(instance) 132 | return instances 133 | 134 | def partition(self, pid): 135 | resp = self._send_command('partition', pid) 136 | return resp.status_code == 200 137 | 138 | def unpartition(self, pid): 139 | resp = self._send_command('unpartition', pid) 140 | return resp.status_code == 200 141 | 142 | def _send_command(self, cmd_type, pid=None): 143 | js = { 144 | 'CommandType': cmd_type, 145 | } 146 | if pid is not None: 147 | js['PID'] = int(pid) 148 | return requests.post(f'{self._url}/command', json=js) 149 | -------------------------------------------------------------------------------- /archive/tilo/tilo/codec/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cosven/tidb-testing/8e1ae96a45a85de11d905801bbf7914dc27c7e98/archive/tilo/tilo/codec/__init__.py -------------------------------------------------------------------------------- /archive/tilo/tilo/codec/bytes_.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | ENC_GROUP_SIZE = 8 5 | ENC_MARKER = b'\xFF' 6 | ENC_PAD = b'\x00' 7 | PADS = b'\x00' * ENC_GROUP_SIZE 8 | 9 | 10 | def encode_bytes(b: bytes, data: bytes) -> bytes: 11 | d_len = len(data) 12 | 13 | i = 0 14 | ba = bytearray(b) 15 | while i <= d_len: 16 | remain = d_len - i 17 | pad_count = 0 18 | if remain >= ENC_GROUP_SIZE: 19 | ba.extend(data[i:i+ENC_GROUP_SIZE]) 20 | else: 21 | pad_count = ENC_GROUP_SIZE - remain 22 | ba.extend(data[i:]) 23 | ba.extend(PADS[:pad_count]) 24 | num = int.from_bytes(ENC_MARKER, sys.byteorder, signed=False) 25 | marker = (num - pad_count).to_bytes(1, sys.byteorder, signed=False) 26 | ba.extend(marker) 27 | 28 | i += ENC_GROUP_SIZE 29 | return ba 30 | -------------------------------------------------------------------------------- /archive/tilo/tilo/codec/excs.py: -------------------------------------------------------------------------------- 1 | class CodecError: 2 | pass 3 | 4 | 5 | class DecodeError(CodecError): 6 | pass 7 | 8 | 9 | class InsufficientBytesError(DecodeError): 10 | """ 11 | insufficient bytes to decode value 12 | """ 13 | -------------------------------------------------------------------------------- /archive/tilo/tilo/codec/number.py: -------------------------------------------------------------------------------- 1 | from .excs import InsufficientBytesError 2 | 3 | 4 | i64_sign_mask = 0x8000000000000000 5 | uint64_mask = 0xffffffffffffffff 6 | 7 | 8 | def i64_to_u64(v: int) -> int: 9 | """ 10 | >>> i64_to_u64(-2) 11 | 18446744073709551614 12 | >>> i64_to_u64(0) 13 | 0 14 | >>> i64_to_u64(2) 15 | 2 16 | """ 17 | if v < 0: 18 | return (~(-v) + 1) & uint64_mask 19 | return v 20 | 21 | 22 | def encode_int_to_cmp_uint(v: int) -> int: 23 | """ 24 | :param v: int64 25 | :return: uint64 26 | 27 | >>> encode_int_to_cmp_uint(-1) 28 | 9223372036854775807 29 | """ 30 | return i64_to_u64(v) ^ i64_sign_mask 31 | 32 | 33 | def encode_int(b: bytes, v: int) -> bytes: 34 | """ 35 | :param v: int64 36 | 37 | >>> import binascii 38 | >>> binascii.hexlify(encode_int(bytearray(), 1)) 39 | b'8000000000000001' 40 | """ 41 | u = encode_int_to_cmp_uint(v) 42 | return b + u.to_bytes(8, byteorder='big', signed=False) 43 | 44 | 45 | def decode_uint_desc(b: bytes) -> (bytes, int): 46 | """decodes value encoded by encode_int before 47 | 48 | It returns the leftover un-decoded slice, decoded value if no error. 49 | """ 50 | if len(b) < 8: 51 | raise InsufficientBytesError 52 | data = b[:8] 53 | v = int.from_bytes(data, byteorder='big', signed=False) 54 | return b[8:], (~v & 0xffffffffffffffff) 55 | -------------------------------------------------------------------------------- /archive/tilo/tilo/codec/tidb.py: -------------------------------------------------------------------------------- 1 | from .number import encode_int 2 | from .bytes_ import encode_bytes 3 | 4 | 5 | def encode_int_row_key(table_id: int, row_id: int) -> bytes: 6 | """ 7 | 8 | >>> encode_int_row_key(11, 10).hex() 9 | '7480000000000000ff0b5f728000000000ff00000a0000000000fa' 10 | """ 11 | result = bytearray() 12 | result.extend(b't') 13 | result.extend(encode_int(b'', table_id)) 14 | result.extend(b'_r') 15 | result.extend(encode_int(b'', row_id)) 16 | return encode_bytes(b'', result) 17 | -------------------------------------------------------------------------------- /archive/tilo/tilo/codec/tikv.py: -------------------------------------------------------------------------------- 1 | from .number import decode_uint_desc 2 | 3 | 4 | def decode_write_key(key: bytes): 5 | ts_len = 8 6 | if len(key) < ts_len: 7 | raise InsufficientBytesError 8 | ts_bytes = key[-ts_len:] 9 | user_key = key[:-ts_len] 10 | _, ts = decode_uint_desc(user_key) 11 | return user_key, ts 12 | -------------------------------------------------------------------------------- /bin/artifacts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Install 5 | ------- 6 | 7 | pip3 install aiohttp pyyaml requests click 8 | """ 9 | 10 | import asyncio 11 | import sys 12 | import os 13 | from functools import wraps 14 | 15 | import aiohttp 16 | import click 17 | import requests 18 | import yaml 19 | 20 | 21 | with open(os.path.expanduser('~/.config/gh/config.yml')) as f: 22 | data = yaml.safe_load(f) 23 | token = data['hosts']['github.com']['oauth_token'] 24 | 25 | 26 | headers={"Authorization": f"token {token}"} 27 | 28 | 29 | class new_session: 30 | def __init__(self): 31 | self._s = aiohttp.ClientSession(headers=headers) 32 | 33 | async def __aenter__(self): 34 | return self._s 35 | 36 | async def __aexit__(self, exc_type, exc_value, traceback): 37 | await self._s.close() 38 | 39 | 40 | def coro(f): 41 | @wraps(f) 42 | def wrapper(*args, **kwargs): 43 | loop = asyncio.get_event_loop() 44 | return loop.run_until_complete(f(*args, **kwargs)) 45 | return wrapper 46 | 47 | 48 | def cook_repo_with_defaults(repo): 49 | if '/' not in repo: 50 | owner = 'pingcap' 51 | if repo == 'tikv': 52 | owner = 'tikv' 53 | return f'{owner}/{repo}' 54 | return repo 55 | 56 | 57 | def cook_binary_url(component, sha1): 58 | tar_name = f'{component}-server' 59 | return f'http://fileserver.pingcap.net/download/builds/pingcap/'\ 60 | f'{component}/pr/{sha1}/centos7/{tar_name}.tar.gz' 61 | 62 | 63 | async def list_repo_latest_commits( 64 | s, repo, sha='master', limit=None, since=None, timeout=5): 65 | """ 66 | yield (sha, msg_title) 67 | """ 68 | repo = cook_repo_with_defaults(repo) 69 | params = {'sha': sha} 70 | if since is not None: 71 | params['since'] = since 72 | url = f'https://api.github.com/repos/{repo}/commits' 73 | async with s.get(url, timeout=timeout, params=params) as resp: 74 | commits = await resp.json() 75 | if limit is not None: 76 | commits = commits[:limit] 77 | for commit in commits: 78 | yield commit 79 | 80 | 81 | async def get_repo_commit(s, repo, sha1, timeout=2): 82 | """ 83 | return (msg_title, author_name, date) 84 | """ 85 | repo = cook_repo_with_defaults(repo) 86 | url = f'https://api.github.com/repos/{repo}/commits/{sha1}' 87 | async with s.get(url, timeout=timeout) as resp: 88 | commit = await resp.json() 89 | commit = commit['commit'] 90 | author = commit['author'] 91 | author_name = author['name'] 92 | date = author['date'] 93 | msg_title = commit['message'].split('\n\n')[0] 94 | return (msg_title, author_name, date) 95 | 96 | 97 | async def get_latest_binary_sha1(s, 98 | repo, branch='master', timeout=2): 99 | url = f'http://fileserver.pingcap.net/download/refs/pingcap/{repo}/{branch}/sha1' 100 | async with s.get(url) as resp: 101 | return await resp.text() 102 | 103 | 104 | @click.command() 105 | @click.argument('repo', default='tidb') 106 | @click.argument('sha', default='master') 107 | @click.option('-v', '--verbose', count=True) 108 | @coro 109 | async def cli(repo, sha, verbose): 110 | """show artifacts of REPO/SHA 111 | \b 112 | Usage examples: 113 | * tirelease artifacts tidb release-4.0 114 | """ 115 | click.echo(f'{repo}#{sha}', nl=False) 116 | 117 | loop = asyncio.get_event_loop() 118 | is_branch = len(sha) != 40 119 | async with new_session() as s: 120 | if is_branch: 121 | click.echo() 122 | latest_binary_sha1 = await get_latest_binary_sha1(s, repo, sha) 123 | latest_binary_sha1 = latest_binary_sha1.strip() 124 | async for commit in list_repo_latest_commits(s, repo, sha, limit=5): 125 | sha1, _ = commit['sha'], commit['commit']['message'].split('\n\n')[0] 126 | if sha1 == latest_binary_sha1: 127 | sha1_styled = click.style(sha1, fg='green') 128 | else: 129 | sha1_styled = sha1 130 | click.echo(f' {sha1_styled}') 131 | binary_url = cook_binary_url(repo, latest_binary_sha1) 132 | click.echo(f'url: {binary_url}') 133 | else: 134 | task = loop.create_task(get_repo_commit(s, repo, sha)) 135 | binary_url = cook_binary_url(repo, sha) 136 | resp = await s.head(binary_url) 137 | ok = resp.status == 200 138 | if ok: 139 | status = click.style('ok', fg='green') 140 | else: 141 | status = click.style('not found', fg='red') 142 | click.echo(f' ...{status}') 143 | 144 | msg_title, author_name, date = await task 145 | click.echo(f' {msg_title}') 146 | click.echo(f' {author_name} - {date}') 147 | if ok: 148 | click.echo(f'{binary_url}') 149 | else: 150 | sys.exit(1) 151 | 152 | 153 | if __name__ == '__main__': 154 | cli() 155 | -------------------------------------------------------------------------------- /bin/case2pr: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | [[ -z $1 ]] && echo "Usage: $0 " && exit 1 4 | gh --version >/dev/null 2>&1 || (echo "please install github-cli" && exit 1) 5 | 6 | case=$1 7 | 8 | # if grep failed, just exit 9 | git grep -n $case || (echo "case not found" && exit 1) 10 | 11 | info=`git grep -n $case | head -1 | cut -d':' -f 1,2` 12 | filename=`echo $info | cut -d':' -f 1` 13 | lineno=`echo $info | cut -d':' -f 2` 14 | 15 | # show blame info 16 | git blame -L $lineno,+1 -- $filename 17 | 18 | commit=`git blame -L $lineno,+1 -- $filename | cut -d' ' -f 1` 19 | 20 | # show tags which contains this commit 21 | git tag --contains $commit 22 | 23 | # show commit message 24 | git log --format=%B -n 1 $commit | head -n 1 25 | 26 | title=`git log --format=%B -n 1 $commit | head -n 1` 27 | 28 | pr=`echo $title | sed "s/.*(#\([0-9]*\))/\1/"` 29 | echo "gh pr view $pr" 30 | 31 | gh pr view $pr | grep "url:" 32 | -------------------------------------------------------------------------------- /bin/jira_to_gspread.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import logging 4 | import os 5 | from datetime import datetime 6 | 7 | import gspread 8 | import jira 9 | 10 | 11 | # Setup logger 12 | logging.basicConfig( 13 | format='[%(asctime)s %(name)s:%(lineno)d] [%(levelname)s]: %(message)s', 14 | level=logging.INFO) 15 | logger = logging.getLogger() 16 | 17 | try: 18 | JIRA_ENDPOINT = os.environ['JIRA_ENDPOINT'] 19 | JIRA_USERNAME = os.environ['JIRA_USERNAME'] 20 | JIRA_PASSWORD = os.environ['JIRA_PASSWORD'] 21 | except KeyError: 22 | raise SystemExit('Please set proper env vars.') 23 | 24 | DEFAULT_FROM_DATE = datetime.strptime('2021-04-12', '%Y-%m-%d') 25 | 26 | SPREAD_COLUMNS = [ 27 | 'oncall_id', 28 | 'version', 29 | 'status' 30 | ] 31 | 32 | 33 | def list_oncall_issues(from_date=DEFAULT_FROM_DATE, from_issue='', max_results=1000): 34 | jira_cli = jira.JIRA('https://internal.pingcap.net/jira', 35 | basic_auth=(JIRA_USERNAME, JIRA_PASSWORD)) 36 | jql_tpl = 'project=oncall and created>={from_date} order by created ASC' 37 | jql_ctx = {'from_date': from_date.strftime(format='%Y-%m-%d')} 38 | jql = jql_tpl.format(**jql_ctx) 39 | logger.info(f'jql is: {jql}') 40 | issues = jira_cli.search_issues(jql, maxResults=max_results) 41 | results = [] 42 | for issue in issues: 43 | versions = issue.fields.versions 44 | version_names = [version.name for version in versions] 45 | # for version_name in version_names: 46 | # if version_name.startswith('v5.0') or version_name == 'master': 47 | results.append((issue.key, 48 | issue.fields.status.name, 49 | ','.join(version_names), 50 | issue.fields.summary, 51 | issue.fields.customfield_10321 or '', # root cause 52 | )) 53 | return results 54 | 55 | 56 | def update_spreadsheet(issue_iterator): 57 | gc = gspread.oauth() 58 | sh = gc.open_by_key('1yEEcY2cXzcljgt5EWE6VTdlmmcox3QHg4ETR5IkZtwM') 59 | worksheet = sh.get_worksheet(0) 60 | 61 | # Get oncall id and row id mapping 62 | oncall_row_mapping = {} 63 | recorded_oncall_ids = worksheet.col_values(1) 64 | rows = worksheet.get(f'A2:C{len(recorded_oncall_ids) + 1}') 65 | for i, row in enumerate(rows): 66 | key, status, versions = row 67 | oncall_row_mapping[key] = (i + 2, (key, status, versions)) 68 | 69 | # Update or add issues 70 | next_row_id = len(rows) + 2 71 | issues_to_be_added = [] 72 | for issue in issue_iterator: 73 | key, *_ = issue 74 | if key in oncall_row_mapping: 75 | row_id, row = oncall_row_mapping[key] 76 | # Only compare issue status and versions 77 | if row[1:3] != issue[1:3]: 78 | # Update old data row by row 79 | worksheet.update(f'B{next_row_id}:D{next_row_id}', [issue[1:3]]) 80 | logger.info(f'row:{row_id} {row[0]} is updated.') 81 | else: 82 | logger.info(f'row:{row_id} {row[0]} is unchanged.') 83 | else: 84 | issues_to_be_added.append(list(issue)) 85 | if issues_to_be_added: 86 | for issue in issues_to_be_added: 87 | key, _, _, summary, _ = issue 88 | if JIRA_ENDPOINT.endswith('/'): 89 | url = f'{JIRA_ENDPOINT}browse/{key}' 90 | else: 91 | url = f'{JIRA_ENDPOINT}/browse/{key}' 92 | issue_link = f'=HYPERLINK("{url}","{summary}")' 93 | issue[3] = issue_link # summary 94 | table_range = f'A{next_row_id}:E{next_row_id + len(issues_to_be_added)}' 95 | # Add new issues in batch 96 | worksheet.append_rows(issues_to_be_added, 97 | value_input_option='USER_ENTERED', 98 | table_range=table_range) 99 | logger.info(f'{table_range} is added.') 100 | 101 | 102 | def main(): 103 | issues = list_oncall_issues(max_results=1000) 104 | update_spreadsheet(issues) 105 | 106 | 107 | if __name__ == '__main__': 108 | main() 109 | -------------------------------------------------------------------------------- /bin/tictl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | pip install tabulate 5 | """ 6 | 7 | import asyncio 8 | from functools import wraps 9 | 10 | import click 11 | import aiohttp 12 | from tabulate import tabulate 13 | 14 | 15 | ############################# 16 | # global variables 17 | ############################# 18 | http = None # global aiohttp session 19 | 20 | 21 | ############################# 22 | # utils 23 | ############################# 24 | 25 | def coro(f): 26 | @wraps(f) 27 | def wrapper(*args, **kwargs): 28 | loop = asyncio.get_event_loop() 29 | async def main(): 30 | global http 31 | http = aiohttp.ClientSession() 32 | try: 33 | await f(*args, **kwargs) 34 | finally: 35 | await http.close() 36 | return loop.run_until_complete(main()) 37 | return wrapper 38 | 39 | 40 | class new_http_session: 41 | def __init__(self, *args, **kwargs): 42 | self._s = aiohttp.ClientSession(*args, **kwargs) 43 | 44 | async def __aenter__(self): 45 | return self._s 46 | 47 | async def __aexit__(self, exc_type, exc_value, traceback): 48 | await self._s.close() 49 | 50 | 51 | @click.group() 52 | def cli(): 53 | pass 54 | 55 | 56 | @cli.command() 57 | @click.argument('pd', default='http://127.0.0.1:2379') 58 | @coro 59 | async def list_tikvs(pd): 60 | """list tikvs status""" 61 | headers = [ 62 | 'id', 63 | 'addr', 64 | 'state', 65 | 'uptime', 66 | 'region_size' 67 | ] 68 | rows = [] 69 | pd_api = f'{pd}/pd/api/v1' 70 | async with http.get(f'{pd_api}/stores') as resp: 71 | js = await resp.json() 72 | stores = js['stores'] 73 | for store in stores: 74 | store_meta = store['store'] 75 | store_status = store['status'] 76 | rows.append(( 77 | store_meta['id'], 78 | store_meta['address'], 79 | store_meta['state_name'], 80 | store_status['uptime'], 81 | store_status['region_size'], 82 | )) 83 | 84 | click.echo(tabulate(rows, headers)) 85 | 86 | 87 | if __name__ == '__main__': 88 | cli() 89 | -------------------------------------------------------------------------------- /hack/ctc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: pingcap.com/v1alpha1 2 | kind: TidbCluster 3 | metadata: 4 | name: ctc 5 | spec: 6 | # configUpdateStrategy: RollingUpdate 7 | enablePVReclaim: false 8 | imagePullPolicy: Always 9 | pvReclaimPolicy: Delete 10 | timezone: UTC 11 | pd: 12 | image: pingcap/pd:nightly 13 | replicas: 3 14 | limits: 15 | cpu: "8" 16 | memory: "8Gi" 17 | requests: 18 | storage: 10Gi 19 | config: {} 20 | tidb: 21 | image: pingcap/tidb:nightly 22 | replicas: 1 23 | service: 24 | type: NodePort 25 | limits: 26 | cpu: "16" 27 | memory: "16Gi" 28 | config: {} 29 | tiflash: 30 | image: pingcap/tiflash:nightly 31 | replicas: 1 32 | storageClaims: 33 | - resources: 34 | requests: 35 | storage: 5Gi 36 | limits: 37 | cpu: "3" 38 | memory: "8Gi" 39 | tikv: 40 | image: pingcap/tikv:nightly 41 | config: 42 | log-level: info 43 | coprocessor: 44 | region-max-size: "16MB" 45 | region-split-size: "10MB" 46 | region-max-keys: 144000 47 | region-split-keys: 96000 48 | raftstore: 49 | hibernate-regions: true 50 | replicas: 4 51 | requests: 52 | storage: 50Gi 53 | limits: 54 | cpu: "16" 55 | memory: "32Gi" 56 | 57 | --- 58 | 59 | apiVersion: pingcap.com/v1alpha1 60 | kind: TidbMonitor 61 | metadata: 62 | name: ctc 63 | spec: 64 | clusters: 65 | - name: ctc 66 | prometheus: 67 | baseImage: prom/prometheus 68 | version: v2.18.1 69 | grafana: 70 | baseImage: grafana/grafana 71 | version: 6.1.6 72 | service: 73 | type: NodePort 74 | initializer: 75 | baseImage: pingcap/tidb-monitor-initializer 76 | version: v4.0.2 77 | reloader: 78 | baseImage: pingcap/tidb-monitor-reloader 79 | version: v1.0.1 80 | imagePullPolicy: IfNotPresent 81 | -------------------------------------------------------------------------------- /hack/partition-one-tikv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: chaos-mesh.org/v1alpha1 2 | kind: NetworkChaos 3 | metadata: 4 | name: partition-one-tikv 5 | spec: 6 | action: partition 7 | mode: all 8 | selector: 9 | pods: 10 | cosven-debug-5544d: 11 | - tc-tikv-2 12 | direction: both 13 | target: 14 | mode: all 15 | selector: 16 | pods: 17 | cosven-debug-5544d: 18 | - tc-tikv-0 19 | - tc-tikv-1 20 | -------------------------------------------------------------------------------- /hack/random-partition-one-every-hour.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: chaos-mesh.org/v1alpha1 2 | kind: Schedule 3 | metadata: 4 | name: random-partition-tikv-every-hour 5 | spec: 6 | schedule: '@hourly' 7 | concurrencyPolicy: 'Forbid' 8 | type: 'NetworkChaos' 9 | networkChaos: 10 | action: partition 11 | mode: one 12 | selector: 13 | labelSelectors: 14 | app.kubernetes.io/component: tikv 15 | duration: '15s' 16 | direction: both 17 | target: 18 | mode: all 19 | selector: 20 | labelSelectors: 21 | app.kubernetes.io/component: tikv 22 | -------------------------------------------------------------------------------- /hack/tikv-dockerfile: -------------------------------------------------------------------------------- 1 | FROM pingcap/alpine-glibc 2 | 3 | COPY tikv-server /tikv-server 4 | COPY tikv-ctl /tikv-ctl 5 | 6 | EXPOSE 20160 20180 7 | 8 | ENTRYPOINT ["/tikv-server"] 9 | -------------------------------------------------------------------------------- /hack/tiup-localhost-topo-1pd1kv1db.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | user: "tidb" 3 | ssh_port: 22 4 | deploy_dir: "/data0/t111/deploy" 5 | data_dir: "/data0/t111/data" 6 | arch: "amd64" 7 | 8 | monitored: 9 | node_exporter_port: 19100 10 | blackbox_exporter_port: 19115 11 | 12 | server_configs: 13 | tikv: 14 | raftstore.apply-pool-size: 1 15 | 16 | pd_servers: 17 | - host: 0.0.0.0 18 | client_port: 12379 19 | peer_port: 12380 20 | 21 | tidb_servers: 22 | - host: 0.0.0.0 23 | port: 10160 24 | status_port: 10180 25 | 26 | tikv_servers: 27 | - host: 0.0.0.0 28 | port: 14000 29 | status_port: 11080 30 | 31 | monitoring_servers: 32 | - host: 0.0.0.0 33 | port: 19090 34 | 35 | grafana_servers: 36 | - host: 0.0.0.0 37 | port: 13000 38 | -------------------------------------------------------------------------------- /hack/tiup-localhost-topo-1pd3kv1db.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | user: "root" 3 | ssh_port: 22 4 | deploy_dir: "/home/data0/t111/deploy" 5 | data_dir: "/home/data0/t111/data" 6 | arch: "amd64" 7 | 8 | monitored: 9 | node_exporter_port: 19100 10 | blackbox_exporter_port: 19115 11 | 12 | server_configs: 13 | tikv: 14 | raftstore.apply-pool-size: 1 15 | pd: 16 | replication.location-labels: ["host"] 17 | 18 | pd_servers: 19 | - host: 172.16.4.164 20 | client_port: 12379 21 | peer_port: 12380 22 | 23 | tidb_servers: 24 | - host: 172.16.4.164 25 | port: 10160 26 | status_port: 10180 27 | 28 | tikv_servers: 29 | - host: 172.16.4.164 30 | port: 14000 31 | status_port: 11080 32 | config: 33 | server.labels: 34 | host: h1 35 | - host: 172.16.4.164 36 | port: 14001 37 | status_port: 11081 38 | config: 39 | server.labels: 40 | host: h1 41 | - host: 172.16.4.164 42 | port: 14002 43 | status_port: 11082 44 | config: 45 | server.labels: 46 | host: h1 47 | 48 | monitoring_servers: 49 | - host: 172.16.4.164 50 | port: 19090 51 | 52 | grafana_servers: 53 | - host: 172.16.4.164 54 | port: 13000 55 | -------------------------------------------------------------------------------- /ops/ansible/.gitignore: -------------------------------------------------------------------------------- 1 | *.retry 2 | hosts.ini 3 | -------------------------------------------------------------------------------- /ops/ansible/README.md: -------------------------------------------------------------------------------- 1 | # Ansible 运维脚本 2 | 3 | ## 集群部署流程 4 | 5 | 0. 检查各机器的信息,可以使用 playbook `sysinfo.yaml` 6 | 1. 检查是否有残留的 tidb 相关进程(参考本 README) 7 | 2. 清理残留的 service `remove_service.yaml` 8 | 2. 挂载磁盘,可以使用 playbook `disk.yaml` 9 | 3. 检查 numa 信息,可以使用 playbook `numa.yaml` 10 | 4. 部署 ntp 服务 11 | 5. 调节 CPU 节能策略(参考本 README) 12 | 6. 修改机器密码,可以使用 playbook `update_password.yaml` 13 | 7. 新建部署目录,在部署目录中按照官方文档部署集群。 14 | 部署目录建议为 `/data-{env}`,env 为 `production` 或者 `testing`。 15 | 8. 部署前使用 `tiup cluster check` 命令检查机器及操作系统配置 16 | 17 | ## 一些便利的命令 18 | 19 | ### 查看是否有残留的 tidb 相关进程 20 | 21 | ```sh 22 | ansible -i hosts.ini servers -m shell -a "systemctl status | grep -E 'tikv|tidb|grafana|export|prome'" 23 | ``` 24 | 25 | ### 调节 CPU 节能模式 26 | 27 | ```sh 28 | ``` 29 | -------------------------------------------------------------------------------- /ops/ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | display_skipped_hosts = false 3 | -------------------------------------------------------------------------------- /ops/ansible/create_user.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: servers 4 | tasks: 5 | - name: Create new user 'db' 6 | user: 7 | name: "db" 8 | group: "db" 9 | # password is `db`. 10 | # python3 -c "from passlib.hash import sha512_crypt; import getpass; print(sha512_crypt.using(rounds=5000).hash(getpass.getpass()))" 11 | password: "$6$Ru.USsaE/mp5zO9E$3VAUCCHBE.t45mrqnEOX9ciwLFPHuC9KE05BvuvkmvwIoCoz/SDLPAor/2G48L2p7X7Fokl9Ezib0Ofezs/Th0" 12 | create_home: true 13 | generate_ssh_key: yes 14 | ssh_key_bits: 2048 15 | ssh_key_file: ~/.ssh/db_id_rsa 16 | - name: Create new user 'admin' 17 | user: 18 | name: "admin" 19 | # password is `admin`. 20 | group: "db" 21 | password: "$6$rxj6OsdF4Cfd8MDJ$rqx5z6DXAaB6Svz./V/t8wzZNQRyo6uBhPAxHBCR6E8mFRzYJGgJQM1dmie8MLXtFkkSBgTnToEmQkdj657aB1" 22 | create_home: true 23 | generate_ssh_key: yes 24 | ssh_key_bits: 2048 25 | ssh_key_file: ~/.ssh/admin_id_rsa 26 | -------------------------------------------------------------------------------- /ops/ansible/deploy_ntp.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: servers 4 | tasks: 5 | - name: Install ntp 6 | yum: 7 | name: ntp 8 | state: present 9 | 10 | - name: Install ntpdate 11 | yum: 12 | name: ntpdate 13 | state: present 14 | 15 | - name: Stop ntpd 16 | service: 17 | name: ntpd 18 | state: stopped 19 | 20 | - name: Ajust time with pool.ntp.org 21 | shell: ntpdate pool.ntp.org 22 | 23 | - name: Start ntpd 24 | service: 25 | name: ntpd 26 | state: started 27 | -------------------------------------------------------------------------------- /ops/ansible/deploy_pubkey.yaml: -------------------------------------------------------------------------------- 1 | # ansible-playbook -i hosts.ini deploy_pubkey.yaml -k --extra-vars="pubkey='$(cat ~/.ssh/id_rsa.pub)'" 2 | 3 | - hosts: all 4 | tasks: 5 | - name: make direcotry 6 | file: 7 | path: "/root/.ssh" 8 | state: directory 9 | - name: create file authorized_keys if not exist 10 | file: 11 | path: "/root/.ssh/authorized_keys" 12 | state: touch 13 | - name: ensure pubkey exists 14 | lineinfile: 15 | path: "/root/.ssh/authorized_keys" 16 | line: "{{ pubkey }}" 17 | -------------------------------------------------------------------------------- /ops/ansible/dir_permission.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: servers 4 | tasks: 5 | - name: Set /data permission 6 | file: 7 | path: '{{ item.path }}' 8 | owner: db 9 | group: db 10 | loop: 11 | - { path: "/data1"} 12 | - { path: "/data2"} 13 | - { path: "/app1"} 14 | - { path: "/app2"} 15 | -------------------------------------------------------------------------------- /ops/ansible/disk.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Some machines have not mount the disks 4 | - hosts: servers 5 | vars: 6 | disks: 7 | #/dev/nvme0n1: /data1 8 | #/dev/nvme1n1: /data2 9 | /dev/sdb: /app1 10 | /dev/sdc: /app2 11 | 12 | vars_prompt: 13 | - name: re_mount_disk 14 | prompt: "!!! Are you sure that you want to re-mount the disk (yes/no)?" 15 | private: no 16 | 17 | tasks: 18 | - name: Unmount disk 19 | mount: 20 | path: "{{ item.value }}" 21 | state: absent 22 | with_dict: "{{ disks }}" 23 | when: re_mount_disk == "yes" 24 | 25 | - name: Format disk 26 | filesystem: 27 | fstype: ext4 28 | dev: "{{ item.key }}" 29 | with_dict: "{{ disks }}" 30 | when: re_mount_disk == "yes" 31 | 32 | - name: Mount disk 33 | mount: 34 | path: "{{ item.value }}" 35 | src: "{{ item.key }}" 36 | fstype: ext4 37 | opts: "defaults,nodelalloc,noatime" 38 | state: mounted 39 | with_dict: "{{ disks }}" 40 | when: re_mount_disk == "yes" 41 | -------------------------------------------------------------------------------- /ops/ansible/hosts.ini.example: -------------------------------------------------------------------------------- 1 | [servers] 2 | 1.1.1.1 ansible_user="root" ansible_password="root" 3 | -------------------------------------------------------------------------------- /ops/ansible/remove_service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Note that the tasks output a lot of logs, you can ignore them 4 | # and check the final result. 5 | - hosts: servers 6 | tasks: 7 | - name: Check if services are deployed 8 | shell: systemctl list-unit-files | grep "{{ item }}" 9 | register: services_present 10 | ignore_errors: true 11 | no_log: true 12 | with_items: 13 | - tikv-21161 14 | - node_exporter-9103 15 | 16 | #- debug: 17 | # var: services_present 18 | 19 | - name: Disable services 20 | systemd: 21 | state: stopped 22 | enabled: no 23 | name: "{{ item.item }}" 24 | with_items: "{{ services_present.results }}" 25 | when: item.rc == 0 26 | - name: Remove service files 27 | file: 28 | state: absent 29 | path: "/etc/systemd/system/{{ item }}.service" 30 | with_items: "{{ services_present.results }}" 31 | when: item.rc == 0 32 | 33 | - name: Reload systemd 34 | systemd: 35 | daemon_reload: yes 36 | -------------------------------------------------------------------------------- /ops/ansible/show_numa_info.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: servers 3 | tasks: 4 | - name: Install numactl 5 | yum: 6 | name: numactl 7 | state: present 8 | 9 | - name: Run numactl -s 10 | shell: numactl -H | head -1 11 | register: numactl 12 | 13 | - name: Print 14 | debug: var=numactl.stdout_lines 15 | -------------------------------------------------------------------------------- /ops/ansible/sysinfo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: servers 3 | tasks: 4 | - name: hostname 5 | vars: 6 | msg: | 7 | All Interface List: {{ ansible_interfaces }} 8 | All IP: {{ ansible_all_ipv4_addresses }} 9 | Gateway: {{ ansible_default_ipv4.gateway }} 10 | Distribution: {{ ansible_distribution }} 11 | Release: {{ ansible_distribution_release }} 12 | Distribution Version: {{ ansible_distribution_version }} 13 | Kernel: {{ ansible_kernel }} 14 | Architecture: {{ ansible_architecture }} 15 | Memory: {{ ansible_memtotal_mb }} 16 | CPU: {{ ansible_processor_cores }} 17 | Devices: {{ ansible_mounts | json_query('[].device') }} 18 | debug: 19 | msg: "{{ msg.split('\n') }}" 20 | -------------------------------------------------------------------------------- /ops/ansible/tuning_kernel_parameters.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: servers 3 | tasks: 4 | - name: Setting absent kernel params 5 | sysctl: 6 | name: "{{ item.name }}" 7 | value: "{{ item.value }}" 8 | sysctl_set: yes 9 | ignoreerrors: yes 10 | state: absent 11 | with_items: 12 | - { name: 'net.ipv4.tcp_tw_recycle', value: 0 } 13 | 14 | - name: Setting present kernel params 15 | sysctl: name="{{ item.name }}" value="{{ item.value }}" ignoreerrors=yes state=present 16 | with_items: 17 | - { name: 'net.core.somaxconn', value: 32768 } 18 | - { name: 'vm.swappiness', value: 0 } 19 | - { name: 'net.ipv4.tcp_syncookies', value: 0 } 20 | - { name: 'fs.file-max', value: 1000000 } 21 | - { name: 'fs.aio-max-nr', value: 1048576 } 22 | 23 | - name: update /etc/security/limits.conf 24 | vars: 25 | deploy_user: tidb 26 | blockinfile: 27 | dest: /etc/security/limits.conf 28 | insertbefore: '# End of file' 29 | #block: | 30 | # {{ deploy_user }} soft nofile 1000000 31 | # {{ deploy_user }} hard nofile 1000000 32 | # {{ deploy_user }} soft stack 10240 33 | block: | 34 | * soft nofile 1000000 35 | * hard nofile 1000000 36 | * soft stack 10240 37 | 38 | - name: Disable swap 39 | command: swapoff -a 40 | when: ansible_swaptotal_mb > 0 41 | 42 | - name: disable firewalld 43 | service: name=firewalld enabled=no state=stopped 44 | -------------------------------------------------------------------------------- /ops/ansible/update_password.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: servers 4 | gather_facts: false 5 | tasks: 6 | - name: Change root passwd 7 | user: name=root password={{ "xxx" | password_hash('sha512') }} update_password=always 8 | -------------------------------------------------------------------------------- /testbed/tikv-standard.yaml: -------------------------------------------------------------------------------- 1 | generateName: cosven-debug- 2 | reclaimPolicy: 3 | gcStrategy: 4 | onFailure: 5 | artifact: 6 | monitor: true 7 | log: true 8 | reserveEnv: 9 | durationOnWorkingHour: 6h 10 | items: 11 | - name: tc 12 | type: TIDB_CLUSTER 13 | spec: 14 | version: v6.1.0 15 | pd: 16 | replicas: 1 17 | requests: 18 | cpu: 2000m 19 | memory: 4Gi 20 | limits: 21 | cpu: 4000m 22 | memory: 8Gi 23 | baseImage: hub.pingcap.net/qa/pd 24 | storageClassName: "fast-disks" 25 | tikv: 26 | replicas: 3 27 | maxReplicas: 4 28 | requests: 29 | cpu: 8000m 30 | memory: 16Gi 31 | storage: 1500Gi 32 | limits: 33 | cpu: 8000m 34 | memory: 16Gi 35 | storage: 1500Gi 36 | baseImage: hub.pingcap.net/cosven/tikv 37 | version: v6.1.0-debug-1 38 | storageClassName: "fast-disks" 39 | tidb: 40 | replicas: 2 41 | maxReplicas: 2 42 | requests: 43 | cpu: 8000m 44 | memory: 16Gi 45 | limits: 46 | cpu: 8000m 47 | memory: 16Gi 48 | baseImage: hub.pingcap.net/qa/tidb 49 | - name: tools 50 | type: WORKLOAD_NODE 51 | spec: 52 | container: 53 | name: tools 54 | image: hub.pingcap.net/perf_testing/bench-toolset 55 | command: 56 | - tail 57 | - "-f" 58 | - "/dev/null" 59 | resources: 60 | requests: 61 | cpu: 12000m 62 | memory: 16Gi 63 | -------------------------------------------------------------------------------- /tipocket-ctl/.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | .venv/ 3 | dist/ 4 | tpctl.egg-info/ 5 | \#*\# 6 | .\#* 7 | 8 | # tpctl generated files 9 | tpctl-*.yaml 10 | -------------------------------------------------------------------------------- /tipocket-ctl/README.md: -------------------------------------------------------------------------------- 1 | # Tipocket-ctl 2 | 3 | Most of the tipocket test cases are supposed to run on K8s and tipocket use [argo](https://github.com/argoproj/argo) 4 | workflow to schedule them. Test case developers need to write several yaml files to 5 | describe the *argo workflow*. However, most of the test case developer are not familiar 6 | with K8s, let alone argo. It is pretty hard for us to write and maintain those yaml files. 7 | 8 | Tipocket-ctl is designed to auto-generate argo workflow yaml files and provide step-by-step 9 | guide for developers to **debug** and **run** tipocket test case on K8s. 10 | 11 | ## Installation 12 | 13 | ```sh 14 | # Please ensure that you have python3.6+ installed 15 | pip3 install 'git+https://github.com/cosven/tidb-testing.git#egg=tpctl&subdirectory=tipocket-ctl' 16 | 17 | # Development install 18 | git clone git@github.com:cosven/tidb-testing.git 19 | cd tidb-testing/tipocket-ctl 20 | pip3 install -e ./ 21 | 22 | # Help 23 | tpctl --help 24 | ``` 25 | 26 | ## Usage 27 | 28 | ```sh 29 | tpctl deploy --run-time='5m' --subscriber '@slack_id' -- bin/resolve-lock -enable-green-gc=false 30 | ``` 31 | 32 | The command output looks like the following: 33 | ``` 34 | Case name is resolve-lock 35 | Generating command for running case... 36 | /bin/resolve-lock -enable-green-gc=false -run-time="5m" -round="1" -client="5" -nemesis="" -purge="false" -delNS="false" -namespace="tpctl-resolve-lock-universal" -hub="docker.io" -repository="pingcap" -image-version="nightly" -tikv-image="" -tidb-image="" -pd-image="" -tikv-config="" -tidb-config="" -pd-config="" -tikv-replicas="5" -tidb-replicas="1" -pd-replicas="1" -storage-class="local-storage" -loki-addr="" -loki-username="" -loki-password="" 37 | Generating argo workflow tpctl-resolve-lock-universal.yaml... 38 | Run following commands to deploy the case 39 | argo submit tpctl-resolve-lock-universal.yaml 40 | ``` 41 | -------------------------------------------------------------------------------- /tipocket-ctl/scripts/clean_resource.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | shopt -s expand_aliases 4 | source .env 5 | 6 | if [[ -z "$1" ]]; then 7 | echo "Usage: $1 " 8 | exit 1 9 | fi 10 | 11 | ns="$1" 12 | instance="$1" 13 | 14 | k -n $ns delete tc $instance 15 | k -n $ns delete pvc -l app.kubernetes.io/instance=$instance 16 | -------------------------------------------------------------------------------- /tipocket-ctl/scripts/env: -------------------------------------------------------------------------------- 1 | alias k="kubectl -n argo" 2 | alias argo='argo -n argo' 3 | -------------------------------------------------------------------------------- /tipocket-ctl/scripts/slack-notify/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8.6 2 | 3 | RUN pip install slack_sdk click --no-cache-dir 4 | 5 | ADD notify.py /notify.py 6 | ENTRYPOINT [ "python", "/notify.py" ] 7 | -------------------------------------------------------------------------------- /tipocket-ctl/scripts/slack-notify/notify.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import logging 4 | import os 5 | import json 6 | from base64 import b64decode 7 | from datetime import datetime 8 | from enum import Enum 9 | 10 | import click 11 | from slack_sdk import WebClient 12 | from slack_sdk.errors import SlackApiError 13 | 14 | 15 | class Status(Enum): 16 | running = 'running' 17 | 18 | passed = 'passed' 19 | failed = 'failed' 20 | 21 | 22 | def decode(s): 23 | return b64decode(bytes(s, 'utf-8')).decode('utf-8') 24 | 25 | 26 | @click.command() 27 | @click.argument('channel') 28 | @click.argument('case') 29 | @click.argument('status') 30 | @click.option('--kv', multiple=True) # simple kv 31 | @click.option('--b64encodedkvs', default='') # complex kv 32 | def send_message(channel, case, status, kv, b64encodedkvs): 33 | status = Status(status) 34 | client = WebClient(token=os.getenv('SLACK_BOT_TOKEN')) 35 | fields = [] 36 | fields.append(('status', f'{status.value}')) 37 | fields.append(('time', f'{datetime.now()}')) 38 | 39 | if status is Status.running: 40 | color = '#268bd2' # blue, copied from solorized theme 41 | title = f"Test case `{case}` {status.value} 🙏" 42 | else: 43 | if status is Status.failed: 44 | color = 'danger' 45 | else: 46 | color = 'good' 47 | title = f"Test case `{case}` {status.value} --- {status.value}" 48 | 49 | if kv: 50 | for each in kv: 51 | key, value = each.split('=') 52 | fields.append((key, value)) 53 | 54 | kv_pairs_str = b64decode(b64encodedkvs).decode('utf-8') or '{}' 55 | kv_pairs = json.loads(kv_pairs_str) 56 | for key, value in kv_pairs.items(): 57 | fields.append((key, value)) 58 | 59 | channels = channel.split(',') 60 | for channel_ in channels: 61 | try: 62 | client.chat_postMessage( 63 | channel=channel_, 64 | attachments=[{ 65 | "mrkdwn_in": ["text", "title"], 66 | "color": color, 67 | "title": title, 68 | "text": "", 69 | "fields": [{'title': name, 70 | 'value': value, 71 | 'short': name in ('status', 'time')} 72 | for name, value in fields], 73 | }] 74 | ) 75 | except: # noqa 76 | logging.exception('send message failed') 77 | 78 | 79 | if __name__ == '__main__': 80 | send_message() 81 | -------------------------------------------------------------------------------- /tipocket-ctl/scripts/slack-notify/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: tipocket-slack-token 5 | data: 6 | token: xxx 7 | -------------------------------------------------------------------------------- /tipocket-ctl/scripts/tail.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | shopt -s expand_aliases 4 | source .env 5 | 6 | if [[ -z "$1" ]]; then 7 | echo "Usage: $0 " 8 | exit 1 9 | fi 10 | 11 | ns="$1" 12 | instance="$1" 13 | 14 | function getCasePodName(){ 15 | k get po --sort-by=.status.startTime | grep $instance | tail -1 | awk '{print $1}' 16 | } 17 | 18 | k logs `getCasePodName` -c main "${@:2}" 19 | -------------------------------------------------------------------------------- /tipocket-ctl/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from setuptools import setup 4 | 5 | setup( 6 | name='tpctl', 7 | version='0.1.dev0', 8 | description='run tipocket testcase easier', 9 | author='Cosven', 10 | author_email='yinshaowen241@gmail.com', 11 | packages=[ 12 | 'tpctl', 13 | ], 14 | package_data={ 15 | '': ['data/*'], 16 | '': ['scripts/env_raw.sh'] 17 | }, 18 | python_requires=">=3.6", 19 | url='https://github.com/cosven/tidb-testing/casectl', 20 | keywords=['tidb', 'testing'], 21 | classifiers=[ 22 | 'Programming Language :: Python :: 3.6', 23 | 'Programming Language :: Python :: 3.7', 24 | 'Programming Language :: Python :: 3.8', 25 | 'Programming Language :: Python :: 3 :: Only', 26 | "Operating System :: MacOS", 27 | "Operating System :: Microsoft :: Windows", 28 | "Operating System :: POSIX :: Linux", 29 | "License :: OSI Approved :: GNU General Public License v3 (GPLv3)" 30 | ], 31 | install_requires=[ 32 | 'click', 33 | 'pyyaml', 34 | 'click_option_group', 35 | ], 36 | extras_require={ 37 | }, 38 | tests_require=[], 39 | entry_points={ 40 | 'console_scripts': [ 41 | "tpctl=tpctl.__main__:main", 42 | ] 43 | }, 44 | ) 45 | -------------------------------------------------------------------------------- /tipocket-ctl/specs/config-compaction-guard.toml: -------------------------------------------------------------------------------- 1 | [server] 2 | background-thread-count = 2 3 | 4 | [rocksdb.defaultcf] 5 | write-buffer-size = "128KB" 6 | max-bytes-for-level-base = "128KB" 7 | target-file-size-base = "8KB" 8 | compaction-guard-min-output-file-size = "8KB" 9 | compaction-guard-max-output-file-size = "128KB" 10 | 11 | [rocksdb.writecf] 12 | write-buffer-size = "128KB" 13 | max-bytes-for-level-base = "128KB" 14 | target-file-size-base = "8KB" 15 | compaction-guard-min-output-file-size = "8KB" 16 | compaction-guard-max-output-file-size = "128KB" 17 | -------------------------------------------------------------------------------- /tipocket-ctl/specs/config-tidb-5.0.toml: -------------------------------------------------------------------------------- 1 | [tikv-client.copr-cache] 2 | enable = true -------------------------------------------------------------------------------- /tipocket-ctl/specs/config-tidb-log-warning.toml: -------------------------------------------------------------------------------- 1 | [log] 2 | level = "warn" 3 | -------------------------------------------------------------------------------- /tipocket-ctl/specs/config-tikv-5.0-rc.toml: -------------------------------------------------------------------------------- 1 | [gc] 2 | enable-compaction-filter = true 3 | compaction-filter-skip-version-check = true 4 | 5 | [pessimistic-txn] 6 | pipelined = true 7 | 8 | [rocksdb] 9 | auto-tuned = true 10 | 11 | -------------------------------------------------------------------------------- /tipocket-ctl/specs/config-tikv-5.0.toml: -------------------------------------------------------------------------------- 1 | [gc] 2 | enable-compaction-filter = true 3 | compaction-filter-skip-version-check = true 4 | 5 | [pessimistic-txn] 6 | pipelined = true 7 | 8 | [rocksdb] 9 | auto-tuned = true 10 | 11 | -------------------------------------------------------------------------------- /tipocket-ctl/specs/config-tikv-pipelined-locking.toml: -------------------------------------------------------------------------------- 1 | [pessimistic-txn] 2 | pipelined = true 3 | -------------------------------------------------------------------------------- /tipocket-ctl/tpctl/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cosven/tidb-testing/8e1ae96a45a85de11d905801bbf7914dc27c7e98/tipocket-ctl/tpctl/__init__.py -------------------------------------------------------------------------------- /tipocket-ctl/tpctl/__main__.py: -------------------------------------------------------------------------------- 1 | from tpctl.app import cli 2 | from tpctl.debug import debug 3 | from tpctl.deploy import deploy 4 | 5 | 6 | def main(): 7 | cli.add_command(deploy) 8 | cli.add_command(debug) 9 | cli() 10 | 11 | 12 | if __name__ == '__main__': 13 | main() 14 | -------------------------------------------------------------------------------- /tipocket-ctl/tpctl/app.py: -------------------------------------------------------------------------------- 1 | import click 2 | 3 | 4 | @click.group() 5 | def cli(): 6 | pass 7 | -------------------------------------------------------------------------------- /tipocket-ctl/tpctl/case.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import json 3 | 4 | from tpctl.yaml_dump_tidbcluster import dump 5 | 6 | 7 | class BinaryCase: 8 | def __init__(self, name, cmd): 9 | self.name = name 10 | self.cmd = cmd 11 | 12 | 13 | class ArgoCase: 14 | def __init__(self, name, case, image, 15 | tidb_cluster, description='', notify_users=None): 16 | self.name = name 17 | 18 | # case metadata and build info 19 | self.case = case 20 | self.image = image 21 | 22 | # notification 23 | # resources info 24 | self.tidb_cluster = tidb_cluster 25 | self.description = description 26 | self.notify_users = notify_users or [] 27 | 28 | def gen_workflow(self): 29 | main_steps = [] 30 | on_exit_steps = [] 31 | if self.notify_users: 32 | users = self.notify_users 33 | notify_step = self.gen_notify_step('notify-start', 'running') 34 | main_steps.append([notify_step]) 35 | 36 | notify_failed_step = self.gen_notify_step( 37 | 'notify-end-failed', 'failed', r'{{workflow.status}} != Succeeded') 38 | notify_passed_step = self.gen_notify_step( 39 | 'notify-end-passed', 'passed', r'{{workflow.status}} == Succeeded') 40 | on_exit_steps.append([notify_failed_step, notify_passed_step]) 41 | else: 42 | users = [] 43 | main_steps.append([self.gen_case_step()]) 44 | workflow = { 45 | 'metadata': { 46 | 'generateName': self.name + '-', 47 | 'namespace': 'argo', 48 | }, 49 | 'spec': { 50 | 'entrypoint': 'main', 51 | 'onExit': 'on-exit', 52 | 'templates': [ 53 | {'name': 'main', 'steps': main_steps}, 54 | {'name': 'on-exit', 'steps': on_exit_steps}, 55 | *([self.gen_notify_template(users)] if users else []), 56 | self.gen_case_template(), 57 | ], 58 | }, 59 | } 60 | return workflow 61 | 62 | def gen_case_step(self): 63 | step = { 64 | 'name': f'{self.case.name}', 65 | 'template': self._get_case_template_name(), 66 | } 67 | return step 68 | 69 | def gen_notify_step(self, name, status='passed', when=None): 70 | step = { 71 | 'name': f'{name}', 72 | 'template': 'notify', 73 | 'arguments': { 74 | 'parameters': [ 75 | {'name': 'status', 'value': status}, 76 | ] 77 | }, 78 | } 79 | if when is not None: 80 | step['when'] = when 81 | return step 82 | 83 | def gen_notify_template(self, users): 84 | def encode(s): 85 | return base64.b64encode(bytes(s, 'utf-8')).decode('utf-8') 86 | 87 | help_msg = ( 88 | 'Want to know how to debug a tipocket case?\n' 89 | 'Please Check ' 90 | 'https://docs.google.com/document/d/12YifSDvjKAh12P70Ch7jVCbi3zStEHA6mJp0gbGARyo/edit .' 91 | ) 92 | kvs = { 93 | 'cmd': self.case.cmd, 94 | 'tidb-cluster': dump(self.tidb_cluster.to_json()), 95 | 'help': help_msg 96 | } 97 | if self.description: 98 | kvs['description'] = self.description 99 | 100 | b64encodedkvs = encode(json.dumps(kvs)) 101 | return { 102 | 'name': 'notify', 103 | 'inputs': { 104 | 'parameters': [ 105 | {'name': 'status', 'default': 'passed'}, 106 | ] 107 | }, 108 | 'container': { 109 | 'name': 'notify-py', 110 | 'image': 'hub.pingcap.net/cosven/notify:2021-01-05', 111 | 'imagePullpolicy': 'Always', 112 | 'args': [ 113 | ','.join(users), 114 | self.case.name, 115 | r'{{inputs.parameters.status}}', 116 | '--kv', 117 | r'argo-workflow-id={{workflow.name}}', 118 | '--b64encodedkvs', 119 | b64encodedkvs 120 | ], 121 | 'env': [ 122 | { 123 | 'name': 'SLACK_BOT_TOKEN', 124 | 'valueFrom': { 125 | 'secretKeyRef': { 126 | 'name': 'tipocket-slack-token', 127 | 'key': 'token' 128 | } 129 | } 130 | } 131 | ] 132 | } 133 | } 134 | 135 | def gen_case_template(self): 136 | return { 137 | 'name': self._get_case_template_name(), 138 | 'metadata': { 139 | 'labels': { 140 | } 141 | }, 142 | 'container': { 143 | 'name': self.case.name, 144 | 'image': self.image, 145 | 'imagePullpolicy': 'Always', 146 | 'command': ['sh', '-c', self.case.cmd] 147 | } 148 | } 149 | 150 | def _get_case_template_name(self): 151 | return self.case.name 152 | 153 | def gen_cron_workflow(self, cron_params): 154 | workflow = self.gen_workflow() 155 | # use fixed name for cron workflow 156 | metadata = workflow['metadata'] 157 | metadata.pop('generateName') 158 | metadata['name'] = self.name 159 | workflow['kind'] = 'CronWorkflow' 160 | workflow['spec'] = {'workflowSpec': workflow['spec']} 161 | for k, v in cron_params.items(): 162 | workflow['spec'][k] = v 163 | return workflow 164 | -------------------------------------------------------------------------------- /tipocket-ctl/tpctl/debug.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pathlib 3 | 4 | import click 5 | 6 | HELP_STRING = """ 7 | generate debug environment into .env file. 8 | 9 | Run `source .env` to get commands. 10 | 11 | \b 12 | You can find argument {deploy-id} in slack notification. eg: 13 | ============ Slack Notification ====================== 14 | argo workflow 15 | tpctl-hello-test-tpctl-q948q 16 | ^^^ {DEPLOY ID} ^^^^^^^^^^^^ 17 | ======================================================= 18 | """ 19 | 20 | 21 | class DebugToolBox: 22 | """ 23 | Prototype of a certain debug task. 24 | Use it to generate debug/ dir with commands in debug/.env 25 | """ 26 | 27 | def __init__(self, deploy_id, debug_parent="/tmp/"): 28 | self.deploy_id = deploy_id 29 | self.debug_parent = pathlib.Path(debug_parent) 30 | self.debug_dir = pathlib.Path(debug_parent) / deploy_id 31 | 32 | def generate_all(self): 33 | if not os.path.exists(self.debug_dir): 34 | os.mkdir(self.debug_dir) 35 | with open(pathlib.Path(self.debug_dir) / ".env", 'wt') as f: 36 | f.write(self.script()) 37 | 38 | def script(self): 39 | variables = f'DEPLOY_ID={self.deploy_id}' 40 | with open(pathlib.Path(__file__).parent.absolute() / './scripts/env_raw.sh', 'rt') as f: 41 | functions = ''.join(f.readlines()) 42 | return variables + '\n' + functions 43 | 44 | def print_help(self): 45 | click.echo( 46 | 'Generate .env in {}\n'.format(self.debug_dir) + 47 | 'Run to get debug commands:\n' + 48 | click.style('cd {}\n'.format(self.debug_dir), fg='green') + 49 | click.style('source .env', fg='green') 50 | ) 51 | 52 | 53 | @click.command(help=HELP_STRING) 54 | @click.argument('deploy-id') 55 | def debug(**params): 56 | """ 57 | Dependency: argo and kubectl are installed and properly configured in current machine. 58 | """ 59 | toolbox = DebugToolBox(params['deploy_id']) 60 | toolbox.generate_all() 61 | toolbox.print_help() 62 | -------------------------------------------------------------------------------- /tipocket-ctl/tpctl/deploy.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import shlex 3 | import sys 4 | 5 | import click 6 | import yaml 7 | from click_option_group import optgroup 8 | 9 | from tpctl.case import BinaryCase, ArgoCase 10 | from tpctl.tidb_cluster import ComponentName, ComponentSpec, TidbClusterSpec 11 | 12 | 13 | # RESOURCES_DIR = 'tpctl-build/resources' 14 | COMPONENTS = ['tikv', 'tidb', 'pd'] 15 | 16 | # Those options won't be passed to tipocket case. 17 | IGNORE_OPTION_LIST = [ 18 | 'image', 19 | 'subscriber', 20 | 'feature', 21 | 'cron', 22 | 'cron_schedule', 23 | 'description', 24 | ] 25 | 26 | # Those options would be passed to tipocket case, 27 | # except those in IGNORE_OPTION_LIST. 28 | COMMON_OPTIONS = ( 29 | # !!! remember to update params.IGNORE_OPTION_LIST when 30 | # a parameter is modified 31 | optgroup.group('Test case deploy options'), 32 | optgroup.option('--subscriber', multiple=True), 33 | optgroup.option('--feature', default='universal'), 34 | optgroup.option('--image', default="hub.pingcap.net/qa/tipocket"), 35 | optgroup.option('--description', default=''), 36 | optgroup.option('--cron/--not-cron', default=False), 37 | optgroup.option('--cron-schedule', default='30 17 * * *'), 38 | 39 | optgroup.group('Test case common options'), 40 | optgroup.option('--prepare-sql', default=''), 41 | # HELP: We can add `failpoint.tidb` as option since click can't recognize 42 | # the option name when there is a dot. 43 | # optgroup.option('--failpoint.tidb', 'failpoint.tidb', default='',), 44 | optgroup.option('--round', default='1'), 45 | optgroup.option('--client', default='5'), 46 | optgroup.option('--run-time', default='10m'), 47 | optgroup.option('--wait-duration', default='10m'), 48 | optgroup.option('--nemesis', default=''), 49 | optgroup.option('--purge/--no-purge', default=True), 50 | optgroup.option('--delns/--no-delns', 'delNS', default=True), 51 | 52 | optgroup.group('TiDB cluster options'), 53 | optgroup.option('--namespace', default=''), 54 | optgroup.option('--hub', default='docker.io'), 55 | optgroup.option('--repository', default='pingcap'), 56 | optgroup.option('--image-version', default='nightly'), 57 | *[optgroup.option(f'--{component}-image', default='') 58 | for component in COMPONENTS], 59 | *[optgroup.option(f'--{component}-config', default='', type=click.Path()) 60 | for component in COMPONENTS], 61 | optgroup.option('--tikv-replicas', default='5'), 62 | optgroup.option('--tidb-replicas', default='1'), 63 | optgroup.option('--pd-replicas', default='1'), 64 | 65 | optgroup.group('K8s options', 66 | help='different K8s cluster has different values'), 67 | optgroup.option('--storage-class', default='local-path', 68 | show_default=True), 69 | 70 | # optgroup.group('Test case logging options', 71 | # help='usually, you need not to change this'), 72 | # set loki settings to empty since loki does not work well currently 73 | # optgroup.option('--loki-addr', default=''), # http://gateway.loki.svc' 74 | # optgroup.option('--loki-username', default=''), # loki 75 | # optgroup.option('--loki-password', default=''), # admin 76 | ) 77 | 78 | 79 | def testcase_common_options(func): 80 | for option in reversed(COMMON_OPTIONS): 81 | func = option(func) 82 | return func 83 | 84 | 85 | def get_case_params(params): 86 | """ 87 | validate params and generate params for test case 88 | """ 89 | # convert parameters to the format that tipocket test case recognize 90 | case_params = {} 91 | for key, value in params.items(): 92 | if key in IGNORE_OPTION_LIST: 93 | continue 94 | # convert True/False to 'true/false' 95 | if key in ('purge', 'delNS'): 96 | if value is True: 97 | value = 'true' 98 | else: 99 | value = 'false' 100 | if key.endswith('_config'): 101 | # value should be a valid config file path 102 | # TODO: catch FileNotExist error or validate the path somewhere 103 | if value: 104 | # read the content in config file and encode it with base64 105 | # https://github.com/pingcap/tipocket/pull/330 106 | with open(value) as f: 107 | content = f.read() 108 | content_bytes = bytes(content, 'utf-8') 109 | b64content = base64.b64encode(content_bytes).decode('utf-8') 110 | value = f'base64://{b64content}' 111 | case_params[key.replace('_', '-')] = value 112 | return case_params 113 | 114 | 115 | def get_tidb_cluster_spec_from_params(params): 116 | hub = params['hub'] 117 | repository = params['repository'] 118 | tag = params['image_version'] 119 | 120 | components = [] 121 | component_names = ComponentName.list_names() 122 | for component in component_names: 123 | config_path = params[f'{component}_config'] 124 | # FIXME: the program must run in tipocket root directory 125 | if config_path: 126 | with open(config_path) as f: 127 | config = f.read() 128 | else: 129 | config = '' 130 | replicas = params[f'{component}_replicas'] 131 | image = params[f'{component}_image'] 132 | if not image: 133 | image = f'{hub}/{repository}/{component}:{tag}' 134 | component = ComponentSpec(component, image, replicas, config) 135 | components.append(component) 136 | return TidbClusterSpec.create_from_components(components) 137 | 138 | 139 | @click.command(context_settings=dict(ignore_unknown_options=True)) 140 | @testcase_common_options 141 | @click.argument('--', nargs=-1, required=True, type=click.UNPROCESSED) 142 | def deploy(**params): 143 | """Deploy(debug/run) tipocket case on K8s 144 | 145 | \b 146 | Several usage examples: 147 | * tpctl deploy --subscriber '@slack_id' -- bin/bank2 148 | * tpctl deploy --image='myhub.io/tom/tipocket:case' --subscriber '@slack_id' -- bin/case -xxx 149 | * tpctl deploy --image='{your_tipocket_image}' --subscriber '@slack_id' -- bin/case -xxx 150 | * tpctl deploy --run-time='5m' --subscriber '@slack_id' -- bin/resolve-lock -enable-green-gc=false 151 | 152 | Note: case specific options(like `enable-green-gc`) should be followed 153 | by `--`, and the common options (like `run-time`) should be specified in 154 | command options. 155 | """ 156 | case_cmd_args = params.pop('__') 157 | assert case_cmd_args and case_cmd_args[0].startswith('bin/') 158 | 159 | # Generate deploy id 160 | case_name = case_cmd_args[0].split('/')[1] 161 | is_cron = params['cron'] is True 162 | feature = params['feature'] 163 | deploy_id = f'tpctl-{case_name}-{feature}' 164 | if is_cron: 165 | deploy_id += '-cron' 166 | click.echo(f'Case name is {click.style(case_name, fg="blue")}') 167 | # Generate case 168 | case_params = get_case_params(params) 169 | # Set namespace to deploy_id by default 170 | if not case_params['namespace']: 171 | case_params['namespace'] = deploy_id 172 | # Check case options 173 | for arg in case_cmd_args: 174 | if arg.startswith('-'): 175 | option_name = arg.split('=')[0][1:] 176 | if option_name not in case_params: 177 | continue 178 | click.secho(f"You should specify option '{option_name}' before --", 179 | fg='red') 180 | sys.exit(1) 181 | # Input: tpctl deploy -- bin/cdc-bank -failpoint.tidb="set a=1" 182 | # Output: /bin/cdc-bank '-failpoint.tidb=set a=1' ... 183 | case_cmd = ' '.join(shlex.quote(arg) for arg in case_cmd_args) 184 | case_cmd = f'/{case_cmd}' 185 | for key, value in case_params.items(): 186 | case_cmd += f' -{key}="{value}"' 187 | case = BinaryCase(case_name, case_cmd) 188 | click.echo('Generating command for running case...') 189 | click.secho(case_cmd, fg='blue') 190 | 191 | # generate argo workflow yaml 192 | argo_workflow_filepath = f'/tmp/{deploy_id}.yaml' 193 | image = params['image'] 194 | tidb_cluster = get_tidb_cluster_spec_from_params(params) 195 | subscribers = params['subscriber'] or None 196 | argo_case = ArgoCase(deploy_id, case, image, 197 | tidb_cluster, 198 | description=params['description'], 199 | notify_users=subscribers) 200 | click.echo(f'Generating argo workflow {click.style(argo_workflow_filepath, fg="blue")}...') 201 | with open(argo_workflow_filepath, 'w') as f: 202 | if is_cron: 203 | workflow_dict = argo_case.gen_cron_workflow({ 204 | 'schedule': params['cron_schedule'], 205 | 'concurrencyPolicy': 'Forbid', 206 | 'startingDeadlineSeconds': 0, 207 | 'timezone': 'Asia/Shanghai', 208 | }) 209 | else: 210 | workflow_dict = argo_case.gen_workflow() 211 | yaml.dump(workflow_dict, f) 212 | 213 | # show hints 214 | if is_cron: 215 | deploy_cmd = f'argo cron create {argo_workflow_filepath}' 216 | else: 217 | deploy_cmd = f'argo submit {argo_workflow_filepath}' 218 | click.echo('Run following commands to deploy the case') 219 | click.secho(deploy_cmd, fg='green') 220 | -------------------------------------------------------------------------------- /tipocket-ctl/tpctl/dockerfile.py: -------------------------------------------------------------------------------- 1 | local_dockerfile = ''' 2 | FROM pingcap/alpine-glibc 3 | 4 | RUN mkdir -p /config 5 | COPY bin/ /bin/ 6 | COPY config /config 7 | 8 | EXPOSE 8080 9 | ''' 10 | -------------------------------------------------------------------------------- /tipocket-ctl/tpctl/scripts/env_raw.sh: -------------------------------------------------------------------------------- 1 | # WARNING: All commands connect to `tidb` container of tidb pods by default. 2 | # As tidb pod has two containers 3 | OUTPUT_DIR=output 4 | CASE_POD_ID=$(argo get -n argo $DEPLOY_ID -o json \ 5 | | jq -r '.status.nodes[] | select(.type == "Pod" and .templateName != "notify") | .id') 6 | CLUSTER_NAMESPACE=$(argo get -n argo $DEPLOY_ID -o json \ 7 | | jq -r '.spec.templates[-1].container.command[2]' \ 8 | | grep -oP '\-namespace=".*?"' \ 9 | | grep -o '".*"' \ 10 | | sed -e 's/^"//' -e 's/"$//') 11 | 12 | function get_grafana_port { 13 | echo $(kubectl -n $CLUSTER_NAMESPACE get svc -o json \ 14 | | jq -r '.items[].spec.ports[] 15 | | select(.name == "http-grafana") 16 | | .nodePort') 17 | } 18 | 19 | function t_get_grafana_addr { 20 | grafana_port=$(get_grafana_port) 21 | host_ip=$(hostname) 22 | if [ "$grafana_port" == "" ]; then 23 | echo "Grafana is not ready yet, please run t_get_grafana_addr later." 24 | else 25 | echo "Grafana is on http://$host_ip:$grafana_port, username and password are both admin." 26 | fi 27 | } 28 | 29 | echo "available commands:" 30 | echo "- t_log_case" 31 | echo "- t_ls_pod" 32 | echo "- t_log_pod {pod-name}" 33 | echo "- t_ssh_pod {pod-name}" 34 | echo "- t_get_grafana_addr" 35 | echo "" 36 | echo "logs are stored in ./$OUTPUT_DIR/" 37 | t_get_grafana_addr # Print grafana port. 38 | 39 | 40 | mkdir -p $OUTPUT_DIR 41 | 42 | function is_name_tidb { 43 | if [[ "$1" =~ .+-tidb-[0-9]+$ ]]; then 44 | return 0 45 | else 46 | return 1 47 | fi 48 | } 49 | 50 | function output_filepath { 51 | echo "$OUTPUT_DIR/$1" 52 | } 53 | 54 | function get_argo_steps { 55 | sed -n '/STEP */,$p' <(argo get -n argo $DEPLOY_ID) 56 | } 57 | 58 | function t_log_case { 59 | OP=$(output_filepath log_case) 60 | kubectl logs --namespace argo -c main $CASE_POD_ID > $OP 61 | echo "log stored in $OP" 62 | } 63 | 64 | function t_ls_pod { 65 | kubectl -n $CLUSTER_NAMESPACE get pods 66 | } 67 | 68 | function t_log_pod { 69 | if [ "$1" == "" ]; then 70 | echo "{pod-name} not set" 71 | echo "Usage: t_log_pod {pod-name}" 72 | return 1 73 | fi 74 | 75 | OP=$(output_filepath $1) 76 | if is_name_tidb "$1"; then 77 | kubectl logs --namespace $CLUSTER_NAMESPACE $1 -c tidb > $OP 78 | else 79 | kubectl logs --namespace $CLUSTER_NAMESPACE $1 > $OP 80 | fi 81 | echo "log stored in $OP" 82 | } 83 | 84 | function t_ssh_pod { 85 | if [ "$1" == "" ]; then 86 | echo "{pod-name} not set" 87 | echo "Usage: t_ssh_pod {pod-name}" 88 | return 1 89 | fi 90 | 91 | if is_name_tidb "$1"; then 92 | kubectl -n $CLUSTER_NAMESPACE exec --stdin --tty $1 -c tidb -- /bin/sh 93 | else 94 | kubectl -n $CLUSTER_NAMESPACE exec --stdin --tty $1 -- /bin/sh 95 | fi 96 | } 97 | 98 | -------------------------------------------------------------------------------- /tipocket-ctl/tpctl/tidb_cluster.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class ComponentName(Enum): 5 | tidb = 'tidb' 6 | pd = 'pd' 7 | tikv = 'tikv' 8 | 9 | @classmethod 10 | def list_names(cls): 11 | names = [member.value for _, member in cls.__members__.items()] 12 | return names 13 | 14 | 15 | class ComponentSpec: 16 | def __init__(self, name, image, replicas, config): 17 | self.name = name 18 | self.image = image 19 | self.replicas = replicas 20 | self.config = config 21 | 22 | def to_json(self): 23 | return { 24 | 'name': self.name, 25 | 'image': self.image, 26 | 'repliacs': self.replicas, 27 | 'config': self.config 28 | } 29 | 30 | 31 | class TidbClusterSpec: 32 | def __init__(self, pd_spec, tikv_spec, tidb_spec): 33 | self.pd_spec = pd_spec 34 | self.tikv_spec = tikv_spec 35 | self.tidb_spec = tidb_spec 36 | 37 | @classmethod 38 | def create_from_components(cls, component_specs): 39 | tidb = pd = tikv = None 40 | for component_spec in component_specs: 41 | name = ComponentName(component_spec.name) 42 | if name == ComponentName.tidb: 43 | tidb = component_spec 44 | elif name == ComponentName.pd: 45 | pd = component_spec 46 | elif name == ComponentName.tikv: 47 | tikv = component_spec 48 | return TidbClusterSpec(tidb_spec=tidb, 49 | pd_spec=pd, 50 | tikv_spec=tikv) 51 | 52 | def to_json(self): 53 | return { 54 | 'pd': self.pd_spec.to_json(), 55 | 'tikv': self.tikv_spec.to_json(), 56 | 'tidb': self.tidb_spec.to_json(), 57 | } 58 | -------------------------------------------------------------------------------- /tipocket-ctl/tpctl/utils.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cosven/tidb-testing/8e1ae96a45a85de11d905801bbf7914dc27c7e98/tipocket-ctl/tpctl/utils.py -------------------------------------------------------------------------------- /tipocket-ctl/tpctl/yaml_dump_tidbcluster.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | 3 | 4 | class Config(str): 5 | pass 6 | 7 | 8 | def literal_str_representer(dumper, data): 9 | return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') 10 | 11 | 12 | yaml.add_representer(Config, literal_str_representer) 13 | 14 | 15 | def dump(tidbcluster_dict): 16 | for component in tidbcluster_dict: 17 | config = tidbcluster_dict[component]['config'] 18 | if config: 19 | # strip every line, otherwise pyyaml can't dump with block style 20 | # https://github.com/yaml/pyyaml/issues/121 21 | lines = [] 22 | for line in config.split('\n'): 23 | lines.append(line.strip()) 24 | config = '\n'.join(lines) 25 | 26 | tidbcluster_dict[component]['config'] = Config(config) 27 | s = yaml.dump(tidbcluster_dict) 28 | return s 29 | -------------------------------------------------------------------------------- /txn-test/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | _ "github.com/go-sql-driver/mysql" 7 | "strings" 8 | "time" 9 | ) 10 | 11 | func MustExec(s *sql.DB, q string) (result sql.Result) { 12 | // FIXME: 13 | for _, stmt := range strings.Split(q, ";") { 14 | stmt = strings.TrimSpace(stmt) 15 | if stmt == "" { 16 | continue 17 | } 18 | res, err := s.Exec(stmt) 19 | if err != nil { 20 | panic(err) 21 | } 22 | result = res 23 | } 24 | return result 25 | } 26 | 27 | func table1(s *sql.DB) { 28 | MustExec(s, ` 29 | drop table if exists t1; 30 | create table t1 (i int, j int, k int, unique index unq_j (j)); 31 | insert into t1 values (1, 1, 1), (2, 2, 2); 32 | `) 33 | } 34 | 35 | func table2(s *sql.DB) { 36 | MustExec(s, ` 37 | drop table if exists t1; 38 | create table t1 (i int key, j int, k int, unique index unq_j (j)); 39 | insert into t1 values (1, 1, 1), (2, 2, 2); 40 | `) 41 | } 42 | 43 | func main() { 44 | DSN := "root:@tcp(172.16.4.82:30488)/test" 45 | // DSN := "root:@tcp(127.0.0.1:3306)/test" 46 | s1, err := sql.Open("mysql", DSN) 47 | if err != nil { 48 | panic("connect failed") 49 | } 50 | defer s1.Close() 51 | s2, err := sql.Open("mysql", DSN) 52 | if err != nil { 53 | panic("connect failed") 54 | } 55 | defer s2.Close() 56 | 57 | table1(s1) 58 | //table2(s1) 59 | 60 | MustExec(s1, ` 61 | begin ; 62 | insert into t1 values (3, 3, 3); 63 | `) 64 | start := time.Now().Unix() 65 | fmt.Printf("start s2: %d\n", start) 66 | go MustExec(s2, ` 67 | set innodb_lock_wait_timeout=5; 68 | begin ; 69 | update t1 set k = 33 where j = 3; 70 | `) 71 | time.Sleep(1) 72 | fmt.Printf("wait time: %d\n", time.Now().Unix()-start) 73 | MustExec(s1, "commit;") 74 | MustExec(s2, "commit;") 75 | } 76 | --------------------------------------------------------------------------------