├── .coveragerc ├── .flake8 ├── .github └── workflows │ └── test.yml ├── .gitignore ├── .gitmodules ├── .luacheckrc ├── .tarantoolctl ├── Makefile ├── README.md ├── bin └── luatest ├── dispatcher.py ├── lib ├── __init__.py ├── admin_connection.py ├── app_server.py ├── box_connection.py ├── colorer.py ├── connpool.py ├── error.py ├── inspector.py ├── luatest_server.py ├── options.py ├── preprocessor.py ├── pytap13.py ├── sampler.py ├── server.py ├── server_mixins.py ├── tarantool_connection.py ├── tarantool_server.py ├── test.py ├── test_suite.py ├── unittest_server.py ├── utils.py └── worker.py ├── listeners.py ├── requirements-test.txt ├── requirements.txt ├── tarantoolctl ├── test-run.py ├── test ├── instances │ └── default.lua ├── luatest_helpers │ └── server.lua ├── test-app │ ├── cfg.test.lua │ └── suite.ini ├── test-luatest │ ├── smoke_check_test.lua │ └── suite.ini ├── test-run.py ├── test-tarantool │ ├── box.lua │ ├── call.result │ ├── call.test.py │ ├── engine.cfg │ ├── iproto.result │ ├── iproto.test.py │ ├── replica.lua │ ├── set_language.result │ ├── set_language.test.sql │ ├── setopt_delimeter.result │ ├── setopt_delimeter.test.lua │ ├── suite.ini │ ├── worker_hang_when_gc_triggered_inside_colorer.result │ └── worker_hang_when_gc_triggered_inside_colorer.test.lua ├── test-unit │ ├── broken_unicode.result │ ├── broken_unicode.test │ └── suite.ini └── unittest │ ├── 00000000000000000003.snap │ ├── box-cc0544b6afd1.lua │ ├── hang.result │ ├── hang.test.lua │ ├── replica-7f4d4895ff58.lua │ ├── suite.ini │ ├── test_lib_utils.py │ └── test_tarantool_server.py └── test_run.lua /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | concurrency = 4 | gevent 5 | multiprocessing 6 | 7 | [report] 8 | precision = 1 9 | include = 10 | ./* 11 | omit = 12 | lib/tarantool-python/* 13 | lib/msgpack-python/* 14 | test/* 15 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 100 3 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test-run 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | if: ( github.event_name == 'push' || 8 | github.event.pull_request.head.repo.full_name != github.repository ) && 9 | ( github.repository == 'tarantool/test-run' ) 10 | 11 | runs-on: ubuntu-latest 12 | 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] 17 | tarantool-version: ['2.10', '2.11'] 18 | 19 | steps: 20 | - uses: actions/checkout@v3 21 | with: 22 | fetch-depth: 1 23 | submodules: recursive 24 | - name: set up Tarantool ${{ matrix.tarantool-version }} 25 | uses: tarantool/setup-tarantool@v2 26 | with: 27 | tarantool-version: ${{ matrix.tarantool-version }} 28 | - name: set up Python ${{ matrix.python-version }} 29 | uses: actions/setup-python@v4 30 | with: 31 | python-version: ${{ matrix.python-version }} 32 | - name: display python version 33 | run: python -c "import sys; print(sys.version)" 34 | - name: setup tt 35 | run: | 36 | curl -L https://tarantool.io/release/2/installer.sh | sudo bash 37 | sudo apt install -y tt 38 | tt version 39 | - name: setup dependencies 40 | run: | 41 | tt rocks install luatest 42 | tt rocks install luacheck 43 | - name: setup python dependencies 44 | run: | 45 | pip install -r requirements.txt 46 | pip install -r requirements-test.txt 47 | - name: run static analysis 48 | run: | 49 | make lint 50 | - name: run unit testing 51 | run: | 52 | # Flush the variable to don't spoil job summary with tests, 53 | # which are expected to fail. 54 | GITHUB_STEP_SUMMARY= make test_unittest 55 | - name: run integration testing 56 | run: | 57 | make test_integration 58 | - name: code coverage 59 | if: ${{ matrix.python-version == '3.8' && matrix.tarantool-version == '2.10' }} 60 | run: | 61 | pip install coveralls==3.* 62 | make coverage 63 | - name: upload coverage data to coveralls.io 64 | if: ${{ matrix.python-version == '3.8' && matrix.tarantool-version == '2.10' }} 65 | run: coveralls --service=github 66 | env: 67 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 68 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # General 2 | *~ 3 | .*.sw[a-z] 4 | 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | env/ 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | .idea/ 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .coverage 43 | .coverage.* 44 | .cache 45 | .hypothesis/ 46 | nosetests.xml 47 | coverage.xml 48 | *,cover 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Sphinx documentation 55 | docs/_build/ 56 | 57 | # PyBuilder 58 | target/ 59 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "lib/msgpack-python"] 2 | path = lib/msgpack-python 3 | url = https://github.com/msgpack/msgpack-python.git 4 | [submodule "lib/tarantool-python"] 5 | path = lib/tarantool-python 6 | url = https://github.com/tarantool/tarantool-python.git 7 | -------------------------------------------------------------------------------- /.luacheckrc: -------------------------------------------------------------------------------- 1 | globals = {"box", "_TARANTOOL", "tonumber64", "os", "test_run"} 2 | ignore = { 3 | -- Accessing an undefined field of a global variable . 4 | "143/debug", 5 | -- Accessing an undefined field of a global variable . 6 | "143/string", 7 | -- Accessing an undefined field of a global variable . 8 | "143/table", 9 | -- Unused argument . 10 | "212/self", 11 | -- Redefining a local variable. 12 | "411", 13 | -- Redefining an argument. 14 | "412", 15 | -- Shadowing a local variable. 16 | "421", 17 | -- Shadowing an upvalue. 18 | "431", 19 | -- Shadowing an upvalue argument. 20 | "432", 21 | } 22 | 23 | include_files = { 24 | "**/*.lua", 25 | } 26 | 27 | exclude_files = { 28 | "lib/tarantool-python", 29 | "test/test-tarantool/*.test.lua", 30 | ".rocks/**/*.lua", 31 | } 32 | -------------------------------------------------------------------------------- /.tarantoolctl: -------------------------------------------------------------------------------- 1 | -- Options for test-run tarantoolctl 2 | 3 | -- Note: tonumber(nil) is nil. 4 | local workdir = os.getenv('TEST_WORKDIR') 5 | local replication_sync_timeout = tonumber(os.getenv('REPLICATION_SYNC_TIMEOUT')) 6 | 7 | default_cfg = { 8 | pid_file = workdir, 9 | wal_dir = workdir, 10 | memtx_dir = workdir, 11 | vinyl_dir = workdir, 12 | log = workdir, 13 | background = false, 14 | replication_sync_timeout = replication_sync_timeout, 15 | } 16 | 17 | instance_dir = workdir 18 | 19 | -- vim: set ft=lua : 20 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | export PATH := .rocks/bin:$(PATH) 2 | 3 | MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST))) 4 | PROJECT_DIR := $(patsubst %/,%,$(dir $(MAKEFILE_PATH))) 5 | TEST_RUN_EXTRA_PARAMS?= 6 | PYTHON?=python 7 | 8 | default: 9 | false 10 | 11 | lint: flake8 luacheck 12 | 13 | flake8: 14 | $(PYTHON) -m flake8 *.py lib/*.py 15 | 16 | luacheck: 17 | luacheck --config .luacheckrc . 18 | 19 | test_integration: 20 | PYTHONPATH=$(PROJECT_DIR) $(PYTHON) test/test-run.py --force --exclude unittest $(TEST_RUN_EXTRA_PARAMS) 21 | 22 | test_unittest: 23 | $(PYTHON) -m unittest discover test/unittest/ 24 | 25 | test: test_unittest test_integration 26 | 27 | coverage: 28 | PYTHON="coverage run" make -f $(MAKEFILE_PATH) test 29 | coverage combine $(PROJECT_DIR) $(PROJECT_DIR)/test 30 | coverage report 31 | 32 | clean: 33 | coverage erase 34 | 35 | .PHONY: lint flake8 luacheck test test_integration test_unittest 36 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Tarantool Functional testing framework 2 | 3 | [![Coverage Status](https://coveralls.io/repos/github/tarantool/test-run/badge.svg)](https://coveralls.io/github/tarantool/test-run) 4 | 5 | ### Test Suite 6 | 7 | Bunch of tests, that lay down in the subfolder (recursively) with `suite.ini` 8 | file. `suite.ini` is basic ini-file, that consists of one section `default`, 9 | and a number of fields: 10 | 11 | * `core` 12 | * `description` - Test Suite description 13 | * `script` - shebang file to start tarantool with 14 | * disables: 15 | * `disabled` - tests that must be skipped 16 | * `release_disabled` - tests that must be skipped when Tarantool has been 17 | builded with `Release` 18 | * `valgrind_disabled` - tests that must be skipped when Valgrind is enabled 19 | * `lua_libs` - paths for lua files, that should be copied into the folder, 20 | where server is started (delimited with the space, e.g. `lua_libs=lua/1.lua 21 | lua/2.lua`) 22 | * `long_run` - mark tests as long, enabled only with `--long` option (delimited 23 | with the space, e.g. `long_run=t1.test.lua t2.test.lua`) 24 | * `config` - test configuration file name 25 | 26 | Field `core` must be one of: 27 | 28 | * `luatest` - [luatest][luatest] compatible test suite 29 | * `tarantool` - Test-Suite for Functional Testing 30 | * `app` - Another functional Test-Suite 31 | * `unittest` - Unit-Testing Test Suite 32 | 33 | ### Test 34 | 35 | Each test consists of files `*.test(.lua|.sql|.py)?`, `*.result`, and may have 36 | skip condition file `*.skipcond`. On first run (without `.result`) `.result` 37 | is generated from output. Each run, in the beggining, `.skipcond` file is 38 | executed. In the local env there's object `self`, that's `Test` object. If test 39 | must be skipped - you must put `self.skip = 1` in this file. Next, 40 | `.test(.lua|.py)?` is executed and file `.reject` is created, then `.reject` is 41 | compared with `.result`. If something differs, then 15 last string of this diff 42 | file are printed and `.reject` file is saving in the `/rejects/` 43 | subfolder given in options or set localy as `var/rejects/` by default. 44 | If not, then `.reject` file is deleted. 45 | 46 | ### Test configuration 47 | 48 | Test configuration file contains config for multiple run. For each test section 49 | system runs separated test and compares result with common `.result` file. For 50 | example we need to run one test for different db engines("*" means default 51 | configuration): 52 | 53 | ```json 54 | { 55 | "my.test.lua": { 56 | "first": {"a": 1, "b": 2}, 57 | "second": {"a": 1, "b": 3} 58 | }, 59 | "*": { 60 | "memtx": {"engine": "memtx"}, 61 | "vinyl": {"engine": "vinyl"} 62 | } 63 | } 64 | ``` 65 | 66 | In test case we can get configuration from inspector: 67 | 68 | ```lua 69 | engine = test_run:get_cfg('engine') 70 | -- first run engine is 'memtx' 71 | -- second run engine is 'vinyl' 72 | ``` 73 | 74 | "engine" value has a special meaning for *.test.sql files: if it is "memtx" or 75 | "vinyl", then the corresponding default engine will be set before executing 76 | commands from a test file. An engine is set with the following commands: 77 | 78 | ```sql 79 | UPDATE "_session_settings" SET "value" = 'memtx|vinyl' WHERE "name" = 'sql_default_engine' 80 | pragma sql_default_engine='memtx|vinyl' 81 | ``` 82 | 83 | If the first fails, then the second will be executed. When both fails, fail the test. 84 | 85 | #### Python 86 | 87 | Files: `.test.py`, `.result` and `.skipcond`(optionaly). 88 | 89 | Environment: 90 | 91 | * `sql` - `BoxConnection` class. Convert our subclass of SQL into IProto query 92 | and then decode it. Print into `.result` in YAML. Examples: 93 | * `sql("select * from t where k=[ limit ]")` 94 | * `sql("insert into t values ([ [, ]*])")` 95 | * `sql("delete from t where k=")` 96 | * `sql("call ([string|number]*)")` 97 | * `sql("update t set [k= [, k=]*] where k="")` 98 | * `sql("ping")` 99 | * `admin` - `AdminConnection` - simply send admin query on admin port (LUA), 100 | then, receive answer. Examples 101 | * `admin('box.info')` 102 | 103 | **Example:** 104 | 105 | ```python 106 | import os 107 | import time 108 | 109 | from lib.admin_connection import AdminConnection 110 | from lib.tarantool_server import TarantoolServer 111 | 112 | master = server 113 | admin("box.info.lsn") # equivalent to master.admin("box.info.lsn") and server.admin(...) 114 | sql("select * from t0 where k0=1") 115 | replica = TarantoolServer() 116 | replica.script = 'replication/replica.lua' 117 | replica.vardir = os.path.join(server.vardir, "replica") 118 | replica.deploy() 119 | master.admin("box.insert(0, 1, 'hello')") 120 | print('sleep_1') 121 | time.sleep(0.1) 122 | print('sleep_finished') 123 | print('sleep_2') 124 | admin("require('fiber').sleep(0.1)") 125 | print('sleep_finished') 126 | replica.admin("box.select(0, 0, 1)") 127 | con2 = AdminConnection('localhost', server.admin.port) 128 | con2("box.info.lsn") 129 | replica.stop() 130 | replica.cleanup() 131 | con2.disconnect() 132 | ``` 133 | 134 | **Result:** 135 | 136 | ```yaml 137 | box.info.lsn 138 | --- 139 | - null 140 | ... 141 | select * from t0 where k0=1 142 | --- 143 | - error: 144 | errcode: ER_NO_SUCH_SPACE 145 | errmsg: Space '#0' does not exist 146 | ... 147 | box.insert(0, 1, 'hello') 148 | --- 149 | - error: '[string "return box.insert(0, 1, ''hello'')"]:1: attempt to call field ''insert'' 150 | (a nil value)' 151 | ... 152 | sleep_1 153 | sleep_finished 154 | sleep_2 155 | require('fiber').sleep(0.1) 156 | --- 157 | ... 158 | sleep_finished 159 | box.select(0, 0, 1) 160 | --- 161 | - error: '[string "return box.select(0, 0, 1)"]:1: attempt to call field ''select'' 162 | (a nil value)' 163 | ... 164 | box.info.lsn 165 | --- 166 | - null 167 | ... 168 | ``` 169 | 170 | #### Lua 171 | 172 | Files: `.test.lua`, `.result` and `.skipcond`(optionaly). 173 | Tests interact only with `AdminConnection`. Supports some preprocessor functions (eg `delimiter`) 174 | 175 | **Delimiter example:** 176 | 177 | ``` 178 | env = require('test_run') 179 | test_run = env.new() 180 | box.schema.space.create('temp') 181 | t1 = box.space.temp 182 | t1:create_index('primary', { type = 'hash', parts = {1, 'num'}, unique = true}) 183 | t1:insert{0, 1, 'hello'} 184 | test_run:cmd("setopt delimiter ';'") 185 | function test() 186 | return {1,2,3} 187 | end; 188 | test( 189 | ); 190 | test_run:cmd("setopt delimiter ''"); 191 | test( 192 | ); 193 | test 194 | ``` 195 | 196 | **Delimiter result:** 197 | 198 | ``` 199 | env = require('test_run') 200 | test_run = env.new() 201 | box.schema.space.create('temp') 202 | --- 203 | - index: [] 204 | on_replace: 'function: 0x40e4fdf0' 205 | temporary: false 206 | id: 512 207 | engine: memtx 208 | enabled: false 209 | name: temp 210 | field_count: 0 211 | - created 212 | ... 213 | t1 = box.space.temp 214 | --- 215 | ... 216 | t1:create_index('primary', { type = 'hash', parts = {1, 'num'}, unique = true}) 217 | --- 218 | ... 219 | t1:insert{0, 1, 'hello'} 220 | --- 221 | - [0, 1, 'hello'] 222 | ... 223 | test_run:cmd("setopt delimiter ';'") 224 | function test() 225 | return {1,2,3} 226 | end; 227 | --- 228 | ... 229 | test( 230 | ); 231 | --- 232 | - - 1 233 | - 2 234 | - 3 235 | ... 236 | test_run:cmd("setopt delimiter ''"); 237 | test( 238 | --- 239 | - error: '[string "test( "]:1: unexpected symbol near ''''' 240 | ... 241 | ); 242 | --- 243 | - error: '[string "); "]:1: unexpected symbol near '')''' 244 | ... 245 | test 246 | --- 247 | - 'function: 0x40e533b8' 248 | ... 249 | ``` 250 | 251 | It is possible to use backslash at and of a line to carry it. 252 | 253 | ```lua 254 | function echo(...) \ 255 | return ... \ 256 | end 257 | ``` 258 | 259 | #### SQL 260 | 261 | *.test.sql files are just SQL statements written line-by-line. 262 | 263 | It is possible to mix SQL and Lua commands using `\set language lua` and `\set 264 | language sql` commands. 265 | 266 | ##### Interaction with the test environment 267 | 268 | In lua test you can use `test_run` module to interact with the test 269 | environment. 270 | 271 | ```lua 272 | env = require('test_run') 273 | test_run = env.new() 274 | test_run:cmd("") 275 | ``` 276 | 277 | __Base directives:__ 278 | 279 | * `setopt delimiter ''` - Sets delimiter to ``\n 280 | 281 | __Server directives:__ 282 | 283 | * `create server with ...` - Create server with name ``, where `...` 284 | may be: 285 | * `script = ''` - script to start 286 | * `rpl_master = ` - replication master server name 287 | * `start server ` - Run server `` 288 | * `stop server [with signal=]` - Stop server `` 289 | * `` is a signal name (with or without 'SIG' prefix, uppercased) or 290 | a signal number to use instead of default SIGTERM 291 | * `cleanup server ` - Cleanup (basically after server has been stopped) 292 | * `restart server ` - Restart server `` (you can restart yourself 293 | from lua!) 294 | 295 | __Connection switch:__ 296 | 297 | * `switch ` - Switch connection to server `` and add test run into 298 | global scope 299 | 300 | __Connection directives(low level):__ 301 | 302 | * `create connection to ` - create connection named 303 | `` to `` server 304 | * `drop connection ` - Turn connection `` off and delete it 305 | * `set connection ` - Set connection `` to be main, for next commands 306 | 307 | __Filter directives:__ 308 | 309 | * `push filter '' to ''` - e.g. `push filter 'listen: .*' to 'listen: '` 310 | 311 | __Set variables:__ 312 | 313 | * `set variables '' to ''` - execute 314 | ` = *` where * is value of where. Where must be 315 | * `.admin` - admin port of this server 316 | * `.master` - listen port of master of this replica 317 | * `.listen` - listen port of this server 318 | 319 | __Dev ops features:__ 320 | 321 | You can power on any tarantool replicas in a loop. 322 | 323 | ```lua 324 | test_run:cmd('setopt delimiter ";"') 325 | function join(inspector, n) 326 | for i=1,n do 327 | local rid = tostring(i) 328 | os.execute('mkdir -p tmp') 329 | os.execute('cp ../replication/replica.lua ./tmp/replica'..rid..'.lua') 330 | os.execute('chmod +x ./tmp/replica'..rid..'.lua') 331 | inspector:cmd("create server replica"..rid.." with rpl_master=default, script='./var/tmp/replica"..rid..".lua'") 332 | inspector:cmd("start server replica"..rid) 333 | end 334 | end; 335 | test_run:cmd('setopt delimiter ""'); 336 | 337 | -- create 30 replicas for current tarantool 338 | join(test_run, 30) 339 | ``` 340 | 341 | ### pretest_clean() 342 | 343 | Nothing will be done before a Python test and for `core = unittest` 344 | test suites. 345 | 346 | For a `core = [app|tarantool]` test suites this function removes tarantool WAL 347 | and snapshot files before each test. 348 | 349 | The following files will be removed: 350 | 351 | * `*.snap` 352 | * `*.xlog` 353 | * `*.vylog` 354 | * `*.inprogress` 355 | * `[0-9]*/` 356 | 357 | ### Tags 358 | 359 | Usage: 360 | 361 | ```sh 362 | ./test-run.py --tags foo 363 | ./test-run.py --tags foo,bar app/ app-tap/ 364 | ``` 365 | 366 | test-run will run only those tests, which have at least one of the 367 | provided tags. 368 | 369 | Show a list of tags: 370 | 371 | ```sh 372 | ./test-run.py --tags 373 | ./test-run.py app-tap/ --tags 374 | ``` 375 | 376 | The tags metainfo should be placed within a first comment of a test 377 | file. 378 | 379 | Examples: 380 | 381 | * .lua file: 382 | 383 | ```lua 384 | #!/usr/bin/tarantool 385 | 386 | -- tags: foo, bar 387 | -- tags: one, more 388 | 389 | <...> 390 | ``` 391 | 392 | * .sql file: 393 | 394 | ```sql 395 | -- tags: foo 396 | -- tags: bar 397 | <...> 398 | ``` 399 | 400 | * .py file: 401 | 402 | ```python 403 | # tags: foo 404 | 405 | <...> 406 | ``` 407 | 408 | Unsupported features: 409 | 410 | * Marking unit tests with tags. 411 | * Multiline comments (use singleline ones for now). 412 | 413 | ### Using luatest 414 | 415 | test-run supports tests written in the [luatest][luatest] format. `*_test.lua` 416 | files in a `core = luatest` test suite are run as part of `./test/test-run.py` 417 | invocation: no extra actions are needed. 418 | 419 | You can also run a particular test using a substring of its full name: 420 | 421 | ```shell 422 | $ ./test/test-run.py foo-luatest/bar_test.lua 423 | $ ./test/test-run.py bar_test.lua 424 | $ ./test/test-run.py bar 425 | ``` 426 | 427 | If you need to run a particular test case from a luatest compatible test, use 428 | `luatest` command directly. In order to use luatest, which is bundled into 429 | test-run, source test-run's environment: 430 | 431 | ```shell 432 | $ . <(./test/test-run.py --env) 433 | $ luatest -v -p my_specific_test_case 434 | ``` 435 | 436 | ### Used By 437 | 438 | - [Tarantool](https://github.com/tarantool/tarantool) - in-memory database and application server 439 | - [memcached](https://github.com/tarantool/memcached) - Memcached protocol 'wrapper' for Tarantool 440 | - [vshard](https://github.com/tarantool/vshard) - sharding based on virtual buckets 441 | - xsync (internal project) 442 | 443 | 444 | 445 | [luatest]: https://github.com/tarantool/luatest 446 | -------------------------------------------------------------------------------- /bin/luatest: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | -- 4 | -- Add the luatest module to LUA_PATH so that it can be used in processes 5 | -- spawned by tests. 6 | -- 7 | local fio = require('fio') 8 | local path = package.search('luatest') 9 | if path == nil then 10 | error('luatest not found') 11 | end 12 | path = fio.dirname(path) -- strip init.lua 13 | path = fio.dirname(path) -- strip luatest 14 | os.setenv('LUA_PATH', 15 | path .. '/?.lua;' .. path .. '/?/init.lua;' .. 16 | (os.getenv('LUA_PATH') or ';')) 17 | 18 | print(('Tarantool version is %s'):format(require('tarantool').version)) 19 | 20 | require('luatest.cli_entrypoint')() 21 | -------------------------------------------------------------------------------- /dispatcher.py: -------------------------------------------------------------------------------- 1 | import os 2 | import signal 3 | import time 4 | import select 5 | import random 6 | import functools 7 | import yaml 8 | 9 | import multiprocessing 10 | 11 | # Queue is available from multiprocessing.queues on all Python 12 | # versions known at the moment of writting the code (up to 3.12). 13 | # 14 | # However the mandatory argument 'ctx' 15 | # (see multiprocessing.get_context()) was added to the constructor 16 | # of Queue from multiprocessing.queues since Python 3.4 ([1]). 17 | # 18 | # So we should import Queue from multiprocessing on Python 3.4+ 19 | # to uniformly instantiate it (without constructor arguments). 20 | # 21 | # [1]: https://bugs.python.org/issue18999 22 | try: 23 | # Python 3.4+ 24 | from multiprocessing import Queue 25 | except ImportError: 26 | # Python 2 27 | from multiprocessing.queues import Queue 28 | 29 | from lib import Options 30 | from lib.sampler import sampler 31 | from lib.utils import set_fd_cloexec 32 | from lib.worker import WorkerTaskResult, WorkerDone 33 | from lib.colorer import color_stdout 34 | from listeners import ArtifactsWatcher 35 | from listeners import FailWatcher 36 | from listeners import HangWatcher 37 | from listeners import LogOutputWatcher 38 | from listeners import OutputWatcher 39 | from listeners import StatisticsWatcher 40 | 41 | 42 | class Dispatcher: 43 | """Run specified count of worker processes ('max_workers_cnt' arg), pass 44 | task IDs (via 'task_queue'), receive results and output (via 45 | 'result_queue') and pass it to listeners. Workers as well as tasks have 46 | types and certain task can be run only on worker of that type. To being 47 | abstract we get 'task_groups' argument contains worker generators (the 48 | callable working as factory of workers) and IDs of task that can be 49 | executed on such workers. The structure of this argument is the following: 50 | ``` 51 | task_groups = { 52 | 'some_key_1': { 53 | 'gen_worker': function, 54 | 'task_ids': list, 55 | 'is_parallel': bool, 56 | 'show_reproduce_content': bool, 57 | } 58 | ... 59 | } 60 | 61 | ``` 62 | Usage (simplified and w/o exception catching): 63 | ``` 64 | task_groups = ... 65 | dispatcher = Dispatcher(task_groups, max_workers_count=8, randomize=True) 66 | dispatcher.start() 67 | dispatcher.wait() 68 | dispatcher.statistics.print_statistics() 69 | dispatcher.wait_processes() 70 | ``` 71 | """ 72 | def __init__(self, task_groups, max_workers_cnt, randomize): 73 | self.pids = [] 74 | self.processes = [] 75 | self.result_queues = [] 76 | self.task_queues = [] 77 | self.workers_cnt = 0 78 | self.worker_next_id = 1 79 | 80 | tasks_cnt = 0 81 | self.task_queue_disps = dict() 82 | for key, task_group in task_groups.items(): 83 | tasks_cnt += len(task_group['task_ids']) 84 | task_queue_disp = TaskQueueDispatcher(key, task_group, randomize) 85 | self.task_queue_disps[key] = task_queue_disp 86 | self.result_queues.append(task_queue_disp.result_queue) 87 | self.task_queues.append(task_queue_disp.task_queue) 88 | self.total_tasks_cnt = tasks_cnt 89 | 90 | self.report_timeout = 0.1 91 | 92 | self.statistics = None 93 | self.artifacts = None 94 | self.fail_watcher = None 95 | self.listeners = None 96 | self.init_listeners() 97 | 98 | self.max_workers_cnt = min(max_workers_cnt, tasks_cnt) 99 | 100 | self.pid_to_worker_id = dict() 101 | self.worker_id_to_pid = dict() 102 | 103 | self.randomize = randomize 104 | 105 | def terminate_all_workers(self): 106 | for process in self.processes: 107 | if process.is_alive(): 108 | try: 109 | process.terminate() 110 | except OSError: 111 | pass 112 | 113 | def kill_all_workers(self): 114 | for pid in self.pids: 115 | try: 116 | os.kill(pid, signal.SIGKILL) 117 | except OSError: 118 | pass 119 | 120 | def init_listeners(self): 121 | args = Options().args 122 | watch_hang = args.no_output_timeout >= 0 and \ 123 | not args.gdb and \ 124 | not args.gdbserver and \ 125 | not args.lldb and \ 126 | not args.valgrind 127 | watch_fail = not Options().args.is_force 128 | 129 | log_output_watcher = LogOutputWatcher() 130 | self.statistics = StatisticsWatcher(log_output_watcher.get_logfile, 131 | self.total_tasks_cnt) 132 | self.artifacts = ArtifactsWatcher(log_output_watcher.get_logfile) 133 | output_watcher = OutputWatcher() 134 | self.listeners = [self.statistics, log_output_watcher, output_watcher, self.artifacts] 135 | if sampler.is_enabled: 136 | self.listeners.append(sampler.watcher) 137 | if watch_fail: 138 | self.fail_watcher = FailWatcher(self.terminate_all_workers) 139 | self.listeners.append(self.fail_watcher) 140 | if watch_hang: 141 | warn_timeout = 60.0 if args.long else 10.0 142 | hang_watcher = HangWatcher(output_watcher.not_done_worker_ids, 143 | self.kill_all_workers, warn_timeout, 144 | float(args.no_output_timeout)) 145 | self.listeners.append(hang_watcher) 146 | 147 | def run_max_workers(self): 148 | ok = True 149 | new_workers_cnt = self.max_workers_cnt - self.workers_cnt 150 | while ok and new_workers_cnt > 0: 151 | ok = self.add_worker() 152 | new_workers_cnt = self.max_workers_cnt - self.workers_cnt 153 | 154 | def start(self): 155 | self.run_max_workers() 156 | 157 | def find_nonempty_task_queue_disp(self): 158 | """Find TaskQueueDispatcher that doesn't reported it's 'done' (don't 159 | want more workers created for working on its task queue). 160 | """ 161 | task_queue_disps_rnd = list( 162 | self.task_queue_disps.values()) 163 | if self.randomize: 164 | random.shuffle(task_queue_disps_rnd) 165 | # run all parallel groups first 166 | for task_queue_disp in task_queue_disps_rnd: 167 | if not task_queue_disp.is_parallel: 168 | continue 169 | if task_queue_disp.done: 170 | continue 171 | return task_queue_disp 172 | # then run all rest groups in a sequence 173 | self.max_workers_cnt = 1 174 | for task_queue_disp in task_queue_disps_rnd: 175 | if len(task_queue_disp.worker_ids) > 0: 176 | continue 177 | if task_queue_disp.done: 178 | continue 179 | return task_queue_disp 180 | return None 181 | 182 | def get_task_queue_disp(self, worker_id): 183 | """Get TaskQueueDispatcher instance which contains certain worker by 184 | worker_id. 185 | """ 186 | for task_queue_disp in self.task_queue_disps.values(): 187 | if worker_id in task_queue_disp.worker_ids: 188 | return task_queue_disp 189 | return None 190 | 191 | def add_worker(self): 192 | # don't add new workers if fail occured and --force not passed 193 | if self.fail_watcher and self.fail_watcher.got_fail: 194 | return False 195 | task_queue_disp = self.find_nonempty_task_queue_disp() 196 | if not task_queue_disp: 197 | return False 198 | # self.max_workers_cnt can be changed in 199 | # find_nonempty_task_queue_disp() 200 | if self.workers_cnt >= self.max_workers_cnt: 201 | return False 202 | process = task_queue_disp.add_worker(self.worker_next_id) 203 | self.processes.append(process) 204 | self.pids.append(process.pid) 205 | self.pid_to_worker_id[process.pid] = self.worker_next_id 206 | self.worker_id_to_pid[self.worker_next_id] = process.pid 207 | 208 | self.workers_cnt += 1 209 | self.worker_next_id += 1 210 | 211 | return True 212 | 213 | def del_worker(self, worker_id): 214 | pid = self.worker_id_to_pid[worker_id] 215 | 216 | task_queue_disp = self.get_task_queue_disp(worker_id) 217 | task_queue_disp.del_worker(worker_id) 218 | self.workers_cnt -= 1 219 | 220 | self.pids.remove(pid) 221 | del self.worker_id_to_pid[worker_id] 222 | del self.pid_to_worker_id[pid] 223 | for process in self.processes: 224 | if process.pid == pid: 225 | self.processes.remove(process) 226 | break 227 | 228 | def mark_task_done(self, worker_id, task_id): 229 | task_queue_disp = self.get_task_queue_disp(worker_id) 230 | task_queue_disp.mark_task_done(task_id) 231 | 232 | def undone_tasks(self): 233 | res = [] 234 | for task_queue_disp in self.task_queue_disps.values(): 235 | res.extend(task_queue_disp.undone_tasks()) 236 | return res 237 | 238 | def report_undone(self, verbose): 239 | undone = self.undone_tasks() 240 | if not bool(undone): 241 | return False 242 | if verbose: 243 | color_stdout( 244 | '[Internal test-run error] ' 245 | 'The following tasks were dispatched to some worker task ' 246 | 'queue, but were not reported as done (does not matters ' 247 | 'success or fail):\n', schema='test_var') 248 | for task_id in undone: 249 | task_id_str = yaml.safe_dump(task_id, default_flow_style=True) 250 | color_stdout('- %s' % task_id_str) 251 | else: 252 | # Visually continue StatisticsWatcher.print_statistics() output. 253 | color_stdout('* undone: %d\n' % len(undone), schema='test_var') 254 | return True 255 | 256 | def wait(self): 257 | """Wait all workers reported its done via result_queues. But in the 258 | case when some worker process terminated prematurely 'invoke_listeners' 259 | can add fake WorkerDone markers (see also 'check_for_dead_processes'). 260 | """ 261 | while self.workers_cnt > 0: 262 | try: 263 | inputs = [q._reader for q in self.result_queues] 264 | ready_inputs, _, _ = select.select( 265 | inputs, [], [], self.report_timeout) 266 | except KeyboardInterrupt: 267 | self.flush_ready(inputs) 268 | raise 269 | 270 | objs = self.invoke_listeners(inputs, ready_inputs) 271 | for obj in objs: 272 | if isinstance(obj, WorkerTaskResult): 273 | self.mark_task_done(obj.worker_id, obj.task_id) 274 | elif isinstance(obj, WorkerDone): 275 | self.del_worker(obj.worker_id) 276 | if not objs: 277 | self.check_for_dead_processes() 278 | 279 | self.run_max_workers() 280 | 281 | def invoke_listeners(self, inputs, ready_inputs): 282 | """Returns received objects from result queue to allow Dispatcher 283 | update its structures. 284 | """ 285 | # process timeout 286 | if not ready_inputs: 287 | for listener in self.listeners: 288 | listener.process_timeout(self.report_timeout) 289 | return [] 290 | 291 | # collect received objects 292 | objs = [] 293 | for ready_input in ready_inputs: 294 | result_queue = self.result_queues[inputs.index(ready_input)] 295 | while not result_queue.empty(): 296 | objs.append(result_queue.get()) 297 | 298 | # process received objects 299 | for obj in objs: 300 | for listener in self.listeners: 301 | listener.process_result(obj) 302 | 303 | return objs 304 | 305 | def flush_ready(self, inputs): 306 | """Write output from workers to stdout.""" 307 | # leave only output listeners in self.listeners 308 | new_listeners = [] 309 | for listener in self.listeners: 310 | if isinstance(listener, (LogOutputWatcher, 311 | OutputWatcher)): 312 | listener.report_at_timeout = False 313 | new_listeners.append(listener) 314 | self.listeners = new_listeners 315 | # wait some time until processes in our group get its SIGINTs and give 316 | # us some last output 317 | time.sleep(0.1) 318 | # collect and process ready inputs 319 | ready_inputs, _, _ = select.select(inputs, [], [], 0) 320 | self.invoke_listeners(inputs, ready_inputs) 321 | 322 | def check_for_dead_processes(self): 323 | for pid in self.pids: 324 | exited = False 325 | try: 326 | os.waitpid(pid, os.WNOHANG) 327 | except OSError: 328 | exited = True 329 | if exited: 330 | worker_id = self.pid_to_worker_id[pid] 331 | color_stdout( 332 | "[Main process] Worker %d don't reported work " 333 | "done using results queue, but the corresponding " 334 | "process seems dead. Removing it from Dispatcher.\n" 335 | % worker_id, schema='test_var') 336 | self.del_worker(worker_id) 337 | 338 | def wait_processes(self): 339 | for process in self.processes: 340 | process.join() 341 | self.processes = [] 342 | 343 | 344 | class TaskQueueDispatcher: 345 | """Incapsulate data structures necessary for dispatching workers working on 346 | the one task queue. 347 | """ 348 | def __init__(self, key, task_group, randomize): 349 | self.key = key 350 | self.gen_worker = task_group['gen_worker'] 351 | self.task_ids = task_group['task_ids'] 352 | self.is_parallel = task_group['is_parallel'] 353 | if self.is_parallel: 354 | self.randomize = randomize 355 | if self.randomize: 356 | random.shuffle(self.task_ids) 357 | else: 358 | self.randomize = False 359 | self.result_queue = Queue() 360 | self.task_queue = Queue() 361 | 362 | # Don't expose queues file descriptors over Popen to, say, tarantool 363 | # running tests. 364 | set_fd_cloexec(self.result_queue._reader.fileno()) 365 | set_fd_cloexec(self.result_queue._writer.fileno()) 366 | set_fd_cloexec(self.task_queue._reader.fileno()) 367 | set_fd_cloexec(self.task_queue._writer.fileno()) 368 | 369 | for task_id in self.task_ids: 370 | self.task_queue.put(task_id) 371 | self.worker_ids = set() 372 | self.done = False 373 | self.done_task_ids = set() 374 | 375 | def _run_worker(self, worker_id): 376 | """Entry function for worker processes.""" 377 | os.environ['TEST_RUN_WORKER_ID'] = str(worker_id) 378 | color_stdout.queue = self.result_queue 379 | worker = self.gen_worker(worker_id) 380 | sampler.set_queue(self.result_queue, worker_id, worker.name) 381 | worker.run_all(self.task_queue, self.result_queue) 382 | 383 | def add_worker(self, worker_id): 384 | # Note: each of our workers should consume only one None, but for the 385 | # case of abnormal circumstances we listen for processes termination 386 | # (method 'check_for_dead_processes') and for time w/o output from 387 | # workers (class 'HangWatcher'). 388 | self.task_queue.put(None) # 'stop worker' marker 389 | 390 | entry = functools.partial(self._run_worker, worker_id) 391 | 392 | self.worker_ids.add(worker_id) 393 | process = multiprocessing.Process(target=entry) 394 | process.start() 395 | return process 396 | 397 | def del_worker(self, worker_id): 398 | self.worker_ids.remove(worker_id) 399 | # mark task queue as done when the first worker done to prevent cycling 400 | # with add-del workers 401 | self.done = True 402 | 403 | def mark_task_done(self, task_id): 404 | self.done_task_ids.add(task_id) 405 | 406 | def undone_tasks(self): 407 | # keeps an original order 408 | res = [] 409 | for task_id in self.task_ids: 410 | if task_id not in self.done_task_ids: 411 | res.append(task_id) 412 | return res 413 | -------------------------------------------------------------------------------- /lib/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import shutil 4 | 5 | from lib.options import Options 6 | from lib.tarantool_server import TarantoolServer 7 | from lib.unittest_server import UnittestServer 8 | from lib.app_server import AppServer 9 | from lib.luatest_server import LuatestServer 10 | from lib.utils import warn_unix_sockets_at_start 11 | from lib.utils import prepend_path 12 | 13 | 14 | __all__ = ['Options', 'saved_env'] 15 | 16 | 17 | def setenv(): 18 | """Find where is tarantool dir by check_file""" 19 | check_file = 'src/trivia/util.h' 20 | path = os.path.abspath('../') 21 | while path != '/': 22 | if os.path.isfile('%s/%s' % (path, check_file)): 23 | os.environ['TARANTOOL_SRC_DIR'] = path 24 | break 25 | path = os.path.abspath(os.path.join(path, '../')) 26 | 27 | 28 | _saved_env = None 29 | 30 | 31 | def saved_env(): 32 | return _saved_env 33 | 34 | 35 | def module_init(): 36 | """ Called at import """ 37 | global _saved_env 38 | _saved_env = dict(os.environ) 39 | 40 | args = Options().args 41 | # Change the current working directory to where all test 42 | # collections are supposed to reside 43 | # If script executed with (python test-run.py) dirname is '' 44 | # so we need to make it . 45 | path = os.path.dirname(sys.argv[0]) 46 | os.environ['TEST_RUN_DIR'] = os.path.dirname(os.path.realpath(sys.argv[0])) 47 | if not path: 48 | path = '.' 49 | os.chdir(path) 50 | setenv() 51 | 52 | # Keep the PWD environment variable in sync with a current 53 | # working directory. It does not strictly necessary, just to 54 | # avoid any confusion. 55 | os.environ['PWD'] = os.getcwd() 56 | 57 | warn_unix_sockets_at_start(args.vardir) 58 | 59 | # always run with clean (non-existent) 'var' directory 60 | try: 61 | shutil.rmtree(args.vardir) 62 | except OSError: 63 | pass 64 | 65 | args.builddir = os.path.abspath(os.path.expanduser(args.builddir)) 66 | 67 | SOURCEDIR = os.path.dirname(os.getcwd()) 68 | BUILDDIR = args.builddir 69 | os.environ["SOURCEDIR"] = SOURCEDIR 70 | os.environ["BUILDDIR"] = BUILDDIR 71 | soext = sys.platform == 'darwin' and 'dylib' or 'so' 72 | 73 | os.environ["LUA_PATH"] = SOURCEDIR+"/?.lua;"+SOURCEDIR+"/?/init.lua;;" 74 | os.environ["LUA_CPATH"] = BUILDDIR+"/?."+soext+";;" 75 | os.environ["REPLICATION_SYNC_TIMEOUT"] = str(args.replication_sync_timeout) 76 | os.environ['MEMTX_ALLOCATOR'] = args.memtx_allocator 77 | 78 | prepend_path(os.path.join(os.environ['TEST_RUN_DIR'], 'bin')) 79 | 80 | TarantoolServer.find_exe(args.builddir, executable=args.executable) 81 | UnittestServer.find_exe(args.builddir) 82 | AppServer.find_exe(args.builddir) 83 | LuatestServer.find_exe(args.builddir) 84 | 85 | Options().check_schema_upgrade_option(TarantoolServer.debug) 86 | 87 | 88 | # Init 89 | ###### 90 | 91 | 92 | module_init() 93 | -------------------------------------------------------------------------------- /lib/admin_connection.py: -------------------------------------------------------------------------------- 1 | __author__ = "Konstantin Osipov " 2 | 3 | # Redistribution and use in source and binary forms, with or without 4 | # modification, are permitted provided that the following conditions 5 | # are met: 6 | # 1. Redistributions of source code must retain the above copyright 7 | # notice, this list of conditions and the following disclaimer. 8 | # 2. Redistributions in binary form must reproduce the above copyright 9 | # notice, this list of conditions and the following disclaimer in the 10 | # documentation and/or other materials provided with the distribution. 11 | # 12 | # THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 13 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 14 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 15 | # ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 16 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 17 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 18 | # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 19 | # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 20 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 21 | # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 22 | # SUCH DAMAGE. 23 | 24 | import re 25 | import sys 26 | 27 | from lib.tarantool_connection import TarantoolConnection 28 | from lib.tarantool_connection import TarantoolPool 29 | from lib.tarantool_connection import TarantoolAsyncConnection 30 | 31 | from lib.utils import bytes_to_str 32 | from lib.utils import str_to_bytes 33 | 34 | ADMIN_SEPARATOR = '\n' 35 | 36 | 37 | def get_handshake(sock, length=128, max_try=100): 38 | """ 39 | Correct way to get tarantool handshake 40 | """ 41 | result = b"" 42 | i = 0 43 | while len(result) != length and i < max_try: 44 | result = b"%s%s" % (result, sock.recv(length-len(result))) 45 | # max_try counter for tarantool/gh-1362 46 | i += 1 47 | return bytes_to_str(result) 48 | 49 | 50 | class AdminPool(TarantoolPool): 51 | def _new_connection(self): 52 | s = super(AdminPool, self)._new_connection() 53 | handshake = get_handshake(s) 54 | if handshake and not re.search(r'^Tarantool.*console.*', 55 | str(handshake)): 56 | # tarantool/gh-1163 57 | # 1. raise only if handshake is not full 58 | # 2. be silent on crashes or if it's server.stop() operation 59 | print('Handshake error {\n', handshake, '\n}') 60 | raise RuntimeError('Broken tarantool console handshake') 61 | return s 62 | 63 | 64 | class ExecMixIn(object): 65 | def cmd(self, socket, cmd, silent): 66 | socket.sendall(str_to_bytes(cmd)) 67 | 68 | bufsiz = 4096 69 | res = "" 70 | while True: 71 | buf = bytes_to_str(socket.recv(bufsiz)) 72 | if not buf: 73 | break 74 | res = res + buf 75 | if (res.rfind("\n...\n") >= 0 or res.rfind("\r\n...\r\n") >= 0): 76 | break 77 | 78 | if not silent: 79 | sys.stdout.write(res.replace("\r\n", "\n")) 80 | return res 81 | 82 | 83 | class BrokenConsoleHandshake(RuntimeError): 84 | pass 85 | 86 | 87 | class AdminConnection(TarantoolConnection, ExecMixIn): 88 | def execute_no_reconnect(self, command, silent): 89 | if not command: 90 | return 91 | if not silent: 92 | sys.stdout.write(command + ADMIN_SEPARATOR) 93 | cmd = command.replace('\n', ' ') + ADMIN_SEPARATOR 94 | return self.cmd(self.socket, cmd, silent) 95 | 96 | def connect(self): 97 | super(AdminConnection, self).connect() 98 | handshake = get_handshake(self.socket) 99 | if not re.search(r'^Tarantool.*console.*', str(handshake)): 100 | raise BrokenConsoleHandshake('Broken tarantool console handshake') 101 | 102 | 103 | class AdminAsyncConnection(TarantoolAsyncConnection, ExecMixIn): 104 | pool = AdminPool 105 | 106 | def execute_no_reconnect(self, command, silent): 107 | if not command: 108 | return 109 | if not silent: 110 | sys.stdout.write(command + ADMIN_SEPARATOR) 111 | cmd = command.replace('\n', ' ') + ADMIN_SEPARATOR 112 | 113 | result = None 114 | with self.connections.get() as sock: 115 | result = self.cmd(sock, cmd, silent) 116 | return result 117 | 118 | def execute(self, command, silent=True): 119 | if not self.is_connected: 120 | self.connect() 121 | try: 122 | return self.execute_no_reconnect(command, silent) 123 | except Exception: 124 | return None 125 | -------------------------------------------------------------------------------- /lib/app_server.py: -------------------------------------------------------------------------------- 1 | import errno 2 | import glob 3 | import os 4 | import re 5 | import shutil 6 | import signal 7 | import sys 8 | 9 | from gevent.subprocess import Popen 10 | 11 | from lib.colorer import color_stdout 12 | from lib.colorer import color_log 13 | from lib.colorer import qa_notice 14 | from lib.options import Options 15 | from lib.preprocessor import TestState 16 | from lib.sampler import sampler 17 | from lib.server import Server 18 | from lib.server import DEFAULT_SNAPSHOT_NAME 19 | from lib.tarantool_server import Test 20 | from lib.tarantool_server import TarantoolServer 21 | from lib.tarantool_server import TarantoolStartError 22 | from lib.utils import format_process 23 | from lib.utils import signame 24 | from lib.utils import warn_unix_socket 25 | from threading import Timer 26 | from lib.test import TestRunGreenlet, TestExecutionError 27 | 28 | 29 | def timeout_handler(server_process, test_timeout): 30 | color_stdout("Test timeout of %d secs reached\t" % test_timeout, schema='error') 31 | server_process.kill() 32 | 33 | 34 | def run_server(execs, cwd, server, logfile, retval, test_id): 35 | os.putenv("LISTEN", server.listen_uri) 36 | with open(logfile, 'ab') as f: 37 | server.process = Popen(execs, stdout=sys.stdout, stderr=f, cwd=cwd) 38 | sampler.register_process(server.process.pid, test_id, server.name) 39 | test_timeout = Options().args.test_timeout 40 | timer = Timer(test_timeout, timeout_handler, (server.process, test_timeout)) 41 | timer.start() 42 | retval['returncode'] = server.process.wait() 43 | timer.cancel() 44 | server.process = None 45 | 46 | 47 | class AppTest(Test): 48 | def execute(self, server): 49 | super(AppTest, self).execute(server) 50 | ts = TestState(self.suite_ini, None, TarantoolServer, 51 | self.run_params, 52 | default_server_no_connect=server) 53 | self.inspector.set_parser(ts) 54 | 55 | execs = server.prepare_args() 56 | retval = dict() 57 | tarantool = TestRunGreenlet(run_server, execs, server.vardir, server, 58 | server.logfile, retval, self.id) 59 | self.current_test_greenlet = tarantool 60 | 61 | # Copy the snapshot right before starting the server. 62 | # Otherwise pretest_clean() would remove it. 63 | if server.snapshot_path: 64 | snapshot_dest = os.path.join(server.vardir, DEFAULT_SNAPSHOT_NAME) 65 | color_log("Copying snapshot {} to {}\n".format( 66 | server.snapshot_path, snapshot_dest)) 67 | shutil.copy(server.snapshot_path, snapshot_dest) 68 | 69 | try: 70 | tarantool.start() 71 | tarantool.join() 72 | except TarantoolStartError: 73 | # A non-default server failed to start. 74 | raise TestExecutionError 75 | finally: 76 | self.teardown(server, ts) 77 | if retval.get('returncode', None) != 0: 78 | raise TestExecutionError 79 | 80 | def teardown(self, server, ts): 81 | # Stop any servers created by the test, except the 82 | # default one. 83 | # 84 | # See a comment in LuaTest.execute() for motivation of 85 | # SIGKILL usage. 86 | ts.stop_nondefault(signal=signal.SIGKILL) 87 | 88 | # When a supplementary (non-default) server fails, we 89 | # should not leave the process that executes an app test. 90 | # Let's kill it. 91 | # 92 | # Reuse AppServer.stop() code for convenience. 93 | server.stop(signal=signal.SIGKILL) 94 | 95 | 96 | class AppServer(Server): 97 | """A dummy server implementation for application server tests""" 98 | def __new__(cls, ini=None, *args, **kwargs): 99 | cls = Server.get_mixed_class(cls, ini) 100 | return object.__new__(cls) 101 | 102 | def __init__(self, _ini=None, test_suite=None): 103 | ini = dict(vardir=None) 104 | ini.update({} if _ini is None else _ini) 105 | Server.__init__(self, ini, test_suite) 106 | self.testdir = os.path.abspath(os.curdir) 107 | self.vardir = ini['vardir'] 108 | self.builddir = ini['builddir'] 109 | self.lua_libs = ini['lua_libs'] 110 | self.name = 'app_server' 111 | self.process = None 112 | self.localhost = '127.0.0.1' 113 | self.use_unix_sockets_iproto = ini['use_unix_sockets_iproto'] 114 | 115 | @property 116 | def logfile(self): 117 | # remove suite name using basename 118 | test_name = os.path.basename(self.current_test.name) 119 | # add .conf_name if any 120 | if self.current_test.conf_name is not None: 121 | test_name += '.' + self.current_test.conf_name 122 | # add '.tarantool.log' 123 | file_name = test_name + '.tarantool.log' 124 | # put into vardir 125 | return os.path.join(self.vardir, file_name) 126 | 127 | def prepare_args(self, args=[]): 128 | # Disable stdout bufferization. 129 | cli_args = [self.binary, '-e', "io.stdout:setvbuf('no')"] 130 | 131 | # Disable schema upgrade if requested. 132 | if self.disable_schema_upgrade: 133 | cli_args.extend(['-e', self.DISABLE_AUTO_UPGRADE]) 134 | 135 | # Add path to the script (the test). 136 | cli_args.extend([os.path.join(os.getcwd(), self.current_test.name)]) 137 | 138 | # Add extra args if provided. 139 | cli_args.extend(args) 140 | 141 | return cli_args 142 | 143 | def deploy(self, vardir=None, silent=True, need_init=True): 144 | self.vardir = vardir 145 | if not os.access(self.vardir, os.F_OK): 146 | os.makedirs(self.vardir) 147 | if self.lua_libs: 148 | for i in self.lua_libs: 149 | source = os.path.join(self.testdir, i) 150 | try: 151 | if os.path.isdir(source): 152 | shutil.copytree(source, 153 | os.path.join(self.vardir, 154 | os.path.basename(source))) 155 | else: 156 | shutil.copy(source, self.vardir) 157 | except IOError as e: 158 | if (e.errno == errno.ENOENT): 159 | continue 160 | raise 161 | if self.use_unix_sockets_iproto: 162 | path = os.path.join(self.vardir, self.name + ".i") 163 | warn_unix_socket(path) 164 | self.listen_uri = path 165 | else: 166 | self.listen_uri = self.localhost + ':0' 167 | shutil.copy(os.path.join(self.TEST_RUN_DIR, 'test_run.lua'), 168 | self.vardir) 169 | 170 | # Note: we don't know the instance name of the tarantool server, so 171 | # cannot check length of path of *.control unix socket created by it. 172 | # So for 'app' tests type we don't check *.control unix sockets paths. 173 | 174 | def stop(self, silent=True, signal=signal.SIGTERM): 175 | # FIXME: Extract common parts of AppServer.stop() and 176 | # TarantoolServer.stop() to an utility function. 177 | 178 | color_log('DEBUG: [app server] Stopping the server...\n', 179 | schema='info') 180 | 181 | if not self.process: 182 | color_log(' | Nothing to do: the process does not exist\n', 183 | schema='info') 184 | return 185 | 186 | if self.process.returncode: 187 | if self.process.returncode < 0: 188 | signaled_by = -self.process.returncode 189 | color_log(' | Nothing to do: the process was terminated by ' 190 | 'signal {} ({})\n'.format(signaled_by, 191 | signame(signaled_by)), 192 | schema='info') 193 | else: 194 | color_log(' | Nothing to do: the process was exited with code ' 195 | '{}\n'.format(self.process.returncode), 196 | schema='info') 197 | return 198 | 199 | color_log(' | Sending signal {0} ({1}) to {2}\n'.format( 200 | signal, signame(signal), 201 | format_process(self.process.pid))) 202 | try: 203 | self.process.send_signal(signal) 204 | except OSError: 205 | pass 206 | 207 | # Waiting for stopping the server. If the timeout 208 | # reached, send SIGKILL. 209 | timeout = 5 210 | 211 | def kill(): 212 | qa_notice('The app server does not stop during {} ' 213 | 'seconds after the {} ({}) signal.\n' 214 | 'Info: {}\n' 215 | 'Sending SIGKILL...'.format( 216 | timeout, signal, signame(signal), 217 | format_process(self.process.pid))) 218 | try: 219 | self.process.kill() 220 | except OSError: 221 | pass 222 | 223 | timer = Timer(timeout, kill) 224 | timer.start() 225 | self.process.wait() 226 | timer.cancel() 227 | 228 | @classmethod 229 | def find_exe(cls, builddir): 230 | cls.builddir = builddir 231 | cls.binary = TarantoolServer.binary 232 | cls.debug = bool(re.findall(r'^Target:.*-Debug$', str(cls.version()), 233 | re.M)) 234 | 235 | @staticmethod 236 | def find_tests(test_suite, suite_path): 237 | def patterned(test_name, patterns): 238 | answer = [] 239 | for i in patterns: 240 | if test_name.find(i) != -1: 241 | answer.append(test_name) 242 | return answer 243 | 244 | def is_correct(run): 245 | return test_suite.args.conf is None or test_suite.args.conf == run 246 | 247 | test_suite.ini['suite'] = suite_path 248 | 249 | test_names = sorted(glob.glob(os.path.join(suite_path, "*.test.lua"))) 250 | test_names = Server.exclude_tests(test_names, test_suite.args.exclude) 251 | test_names = sum(map((lambda x: patterned(x, test_suite.args.tests)), 252 | test_names), []) 253 | tests = [] 254 | 255 | for test_name in test_names: 256 | runs = test_suite.get_multirun_params(test_name) 257 | if runs: 258 | tests.extend([AppTest( 259 | test_name, 260 | test_suite.args, 261 | test_suite.ini, 262 | params=params, 263 | conf_name=conf_name 264 | ) for conf_name, params in runs.items() 265 | if is_correct(conf_name)]) 266 | else: 267 | tests.append(AppTest(test_name, 268 | test_suite.args, 269 | test_suite.ini)) 270 | 271 | test_suite.tests = tests 272 | -------------------------------------------------------------------------------- /lib/box_connection.py: -------------------------------------------------------------------------------- 1 | __author__ = "Konstantin Osipov " 2 | 3 | # Redistribution and use in source and binary forms, with or without 4 | # modification, are permitted provided that the following conditions 5 | # are met: 6 | # 1. Redistributions of source code must retain the above copyright 7 | # notice, this list of conditions and the following disclaimer. 8 | # 2. Redistributions in binary form must reproduce the above copyright 9 | # notice, this list of conditions and the following disclaimer in the 10 | # documentation and/or other materials provided with the distribution. 11 | # 12 | # THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 13 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 14 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 15 | # ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 16 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 17 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 18 | # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 19 | # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 20 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 21 | # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 22 | # SUCH DAMAGE. 23 | 24 | import errno 25 | import ctypes 26 | import re 27 | import socket 28 | 29 | from lib.tarantool_connection import TarantoolConnection 30 | 31 | # monkey patch tarantool and msgpack 32 | from lib.utils import check_libs 33 | from lib.utils import warn_unix_socket 34 | check_libs() 35 | 36 | from tarantool import Connection as tnt_connection # noqa: E402 37 | from tarantool import Schema # noqa: E402 38 | 39 | 40 | SEPARATOR = '\n' 41 | 42 | 43 | class BoxConnection(TarantoolConnection): 44 | def __init__(self, host, port): 45 | super(BoxConnection, self).__init__(host, port) 46 | if self.host == 'unix/' or re.search(r'^/', str(self.port)): 47 | warn_unix_socket(self.port) 48 | host = None 49 | 50 | self.py_con = tnt_connection(host, port, connect_now=False, 51 | socket_timeout=100) 52 | self.py_con.error = False 53 | self.sort = False 54 | 55 | def connect(self): 56 | self.py_con.connect() 57 | 58 | def authenticate(self, user, password): 59 | self.py_con.authenticate(user, password) 60 | 61 | def disconnect(self): 62 | self.py_con.close() 63 | 64 | def reconnect(self): 65 | if self.py_con.connected: 66 | self.disconnect() 67 | self.connect() 68 | 69 | def set_schema(self, schemadict): 70 | self.py_con.schema = Schema(schemadict) 71 | 72 | def check_connection(self): 73 | self.py_con._sys_recv(self.py_con._socket.fileno(), ' ', 1, 74 | socket.MSG_DONTWAIT | socket.MSG_PEEK) 75 | if ctypes.get_errno() == errno.EAGAIN: 76 | ctypes.set_errno(0) 77 | return True 78 | return False 79 | 80 | def execute_no_reconnect(self, command, silent=True): 81 | if not command: 82 | return 83 | if not silent: 84 | print(command) 85 | cmd = command.replace(SEPARATOR, ' ') + SEPARATOR 86 | response = self.py_con.call(cmd) 87 | if not silent: 88 | print(response) 89 | return response 90 | 91 | def execute(self, command, silent=True): 92 | return self.execute_no_reconnect(command, silent) 93 | 94 | def call(self, command, *args): 95 | if not command: 96 | return 97 | print('call {} {}'.format(command, args)) 98 | response = self.py_con.call(command, *args) 99 | result = str(response) 100 | print(result) 101 | return result 102 | -------------------------------------------------------------------------------- /lib/colorer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import sys 4 | 5 | 6 | # Use it to print messages on the screen and to the worker's log. 7 | color_stdout = None # = Colorer(); below the class definition 8 | 9 | 10 | def color_log(*args, **kwargs): 11 | """ Print the message only to log file, not on the screen. The intention is 12 | use this function only for regular, non-error output that appears every run 13 | and mostly not needed for a user (but useful when investigating occured 14 | problem). Don't hide errors and backtraces (or any other details of an 15 | exceptional circumstances) from the screen, because such details especially 16 | useful with CI bots. 17 | """ 18 | kwargs['log_only'] = True 19 | color_stdout(*args, **kwargs) 20 | 21 | 22 | def qa_notice(*args, **kwargs): 23 | """ Print a notice for an QA engineer at the terminal. 24 | 25 | Example:: 26 | 27 | * [QA Notice] 28 | * 29 | * Attempt to stop already stopped server 'foo' 30 | * 31 | """ 32 | # Import from the function to avoid recursive import. 33 | from lib.utils import prefix_each_line 34 | 35 | # Use 'info' color by default (yellow). 36 | if 'schema' not in kwargs: 37 | kwargs = dict(kwargs, schema='info') 38 | 39 | # Join all positional arguments (like color_stdout() do) and 40 | # decorate with a header and asterisks. 41 | data = ''.join([str(msg) for msg in args]) 42 | data = prefix_each_line('* ', data) 43 | data = '\n* [QA Notice]\n*\n{}*\n'.format(data) 44 | 45 | # Write out. 46 | color_stdout(data, **kwargs) 47 | 48 | 49 | def final_report(*args, **kwargs): 50 | color_stdout(*args, **kwargs) 51 | 52 | # https://docs.github.com/en/actions/learn-github-actions/environment-variables#default-environment-variables 53 | summary_file = os.environ.get('GITHUB_STEP_SUMMARY') 54 | if not summary_file: 55 | return 56 | 57 | # Join all positional arguments (like color_stdout() do) and 58 | # decolor it. 59 | data = ''.join([str(msg) for msg in args]) 60 | data = color_stdout.decolor(data) 61 | 62 | with open(summary_file, 'a') as f: 63 | f.write(data) 64 | 65 | 66 | def separator(sep): 67 | # Import from the function to avoid recursive import. 68 | from lib.utils import terminal_columns 69 | 70 | columns = terminal_columns() 71 | color_stdout(sep * columns, '\n', schema='separator') 72 | 73 | 74 | def test_line(name, conf=None): 75 | # Import from the function to avoid recursive import. 76 | from lib.utils import terminal_columns 77 | from lib.utils import just_and_trim 78 | 79 | columns = terminal_columns() 80 | 81 | color_stdout(just_and_trim(name, 47) + ' ', schema='t_name') 82 | 83 | if conf is None: 84 | conf = '' 85 | color_stdout(just_and_trim(conf, columns - 67) + ' ', schema='test_var') 86 | 87 | 88 | class CSchema(object): 89 | objects = {} 90 | 91 | def __init__(self): 92 | self.main_objects = { 93 | 'diff_mark': {}, 94 | 'diff_in': {}, 95 | 'diff_out': {}, 96 | 'test_pass': {}, 97 | 'test_fail': {}, 98 | 'test_new': {}, 99 | 'test_skip': {}, 100 | 'test_disa': {}, 101 | 'error': {}, 102 | 'lerror': {}, 103 | 'tail': {}, 104 | 'ts_text': {}, 105 | 'path': {}, 106 | 'info': {}, 107 | 'separator': {}, 108 | 't_name': {}, 109 | 'serv_text': {}, 110 | 'version': {}, 111 | 'tr_text': {}, 112 | 'log': {}, 113 | } 114 | self.main_objects.update(self.objects) 115 | 116 | 117 | class SchemaAscetic(CSchema): 118 | objects = { 119 | 'diff_mark': {'fgcolor': 'magenta'}, 120 | 'diff_in': {'fgcolor': 'green'}, 121 | 'diff_out': {'fgcolor': 'red'}, 122 | 'test_pass': {'fgcolor': 'green'}, 123 | 'test_fail': {'fgcolor': 'red'}, 124 | 'test_new': {'fgcolor': 'lblue'}, 125 | 'test_skip': {'fgcolor': 'grey'}, 126 | 'test_disa': {'fgcolor': 'grey'}, 127 | 'error': {'fgcolor': 'red'}, 128 | 'info': {'fgcolor': 'yellow'}, 129 | 'good_status': {'fgcolor': 'black', 'bgcolor': 'lgreen', 'bold': True}, 130 | 'tentative_status': {'fgcolor': 'black', 'bgcolor': 'yellow', 'bold': True}, 131 | 'bad_status': {'fgcolor': 'white', 'bgcolor': 'red', 'bold': True}, 132 | 'test_var': {'fgcolor': 'yellow'}, 133 | 'test-run command': {'fgcolor': 'green'}, 134 | 'tarantool command': {'fgcolor': 'blue'}, 135 | } 136 | 137 | 138 | class SchemaPretty(CSchema): 139 | objects = { 140 | 'diff_mark': {'fgcolor': 'magenta'}, 141 | 'diff_in': {'fgcolor': 'blue'}, 142 | 'diff_out': {'fgcolor': 'red'}, 143 | 'test_pass': {'fgcolor': 'green'}, 144 | 'test_fail': {'fgcolor': 'red'}, 145 | 'test_new': {'fgcolor': 'lblue'}, 146 | 'test_skip': {'fgcolor': 'grey'}, 147 | 'test_disa': {'fgcolor': 'grey'}, 148 | 'error': {'fgcolor': 'red'}, 149 | 'lerror': {'fgcolor': 'lred'}, 150 | 'tail': {'fgcolor': 'lblue'}, 151 | 'ts_text': {'fgcolor': 'lmagenta'}, 152 | 'path': {'fgcolor': 'green', 'bold': True}, 153 | 'info': {'fgcolor': 'yellow', 'bold': True}, 154 | 'good_status': {'fgcolor': 'black', 'bgcolor': 'lgreen', 'bold': True}, 155 | 'tentative_status': {'fgcolor': 'black', 'bgcolor': 'yellow', 'bold': True}, 156 | 'bad_status': {'fgcolor': 'white', 'bgcolor': 'red', 'bold': True}, 157 | 'separator': {'fgcolor': 'blue'}, 158 | 't_name': {'fgcolor': 'lblue'}, 159 | 'serv_text': {'fgcolor': 'lmagenta'}, 160 | 'version': {'fgcolor': 'yellow', 'bold': True}, 161 | 'tr_text': {'fgcolor': 'green'}, 162 | 'log': {'fgcolor': 'grey'}, 163 | 'test_var': {'fgcolor': 'yellow'}, 164 | 'test-run command': {'fgcolor': 'green'}, 165 | 'tarantool command': {'fgcolor': 'blue'}, 166 | } 167 | 168 | 169 | class Colorer(object): 170 | """ 171 | Colorer/Styler based on VT220+ specifications (Not full). Based on: 172 | 1. ftp://ftp.cs.utk.edu/pub/shuford/terminal/dec_vt220_codes.txt 173 | 2. http://invisible-island.net/xterm/ctlseqs/ctlseqs.html 174 | """ 175 | fgcolor = { 176 | "black": '0;30', 177 | "red": '0;31', 178 | "green": '0;32', 179 | "brown": '0;33', 180 | "blue": '0;34', 181 | "magenta": '0;35', 182 | "cyan": '0;36', 183 | "grey": '0;37', 184 | "lgrey": '1;30', 185 | "lred": '1;31', 186 | "lgreen": '1;32', 187 | "yellow": '1;33', 188 | "lblue": '1;34', 189 | "lmagenta": '1;35', 190 | "lcyan": '1;36', 191 | "white": '1;37', 192 | } 193 | bgcolor = { 194 | "black": '0;40', 195 | "red": '0;41', 196 | "green": '0;42', 197 | "brown": '0;43', 198 | "blue": '0;44', 199 | "magenta": '0;45', 200 | "cyan": '0;46', 201 | "grey": '0;47', 202 | "lgrey": '1;40', 203 | "lred": '1;41', 204 | "lgreen": '1;42', 205 | "yellow": '1;43', 206 | "lblue": '1;44', 207 | "lmagenta": '1;45', 208 | "lcyan": '1;46', 209 | "white": '1;47', 210 | } 211 | attributes = { 212 | "bold": '1', 213 | "underline": '4', 214 | "blinking": '5', 215 | "negative": '7', 216 | "invisible": '8', 217 | } 218 | begin = "\033[" 219 | end = "m" 220 | disable = begin+'0'+end 221 | color_re = re.compile('\033' + r'\[\d(?:;\d\d)?m') 222 | 223 | def __init__(self): 224 | # These two fields can be filled later. It's for passing output from 225 | # workers via result queue. When worker initializes, it set these 226 | # fields and just use Colorer as before having multiplexed output. 227 | self.queue_msg_wrapper = None 228 | self.queue = None 229 | 230 | self.stdout = sys.stdout 231 | self.is_term = self.stdout.isatty() 232 | self.colors = None 233 | if self.is_term: 234 | try: 235 | p = os.popen('tput colors 2>/dev/null') 236 | self.colors = int(p.read()) 237 | except: # noqa: E722 238 | pass 239 | finally: 240 | p.close() 241 | schema = os.getenv('TT_SCHEMA', 'ascetic') 242 | if schema == 'ascetic': 243 | self.schema = SchemaAscetic() 244 | elif schema == 'pretty': 245 | self.schema = SchemaPretty() 246 | else: 247 | self.schema = CSchema() 248 | self.schema = self.schema.main_objects 249 | 250 | def set_stdout(self): 251 | sys.stdout = self 252 | 253 | def ret_stdout(self): 254 | sys.stdout = self.stdout 255 | 256 | def _write(self, obj, log_only): 257 | if self.queue: 258 | if self.queue_msg_wrapper: 259 | obj = self.queue_msg_wrapper(obj, log_only) 260 | self.queue.put(obj) 261 | elif not log_only: 262 | self.stdout.write(obj) 263 | 264 | def _flush(self): 265 | if not self.queue: 266 | self.stdout.flush() 267 | 268 | def write(self, *args, **kwargs): 269 | flags = [] 270 | if 'schema' in kwargs: 271 | kwargs.update(self.schema[kwargs['schema']]) 272 | for i in self.attributes: 273 | if i in kwargs and kwargs[i] is True: 274 | flags.append(self.attributes[i]) 275 | flags.append(self.fgcolor[kwargs['fgcolor']]) \ 276 | if 'fgcolor' in kwargs else None 277 | flags.append(self.bgcolor[kwargs['bgcolor']]) \ 278 | if 'bgcolor' in kwargs else None 279 | 280 | data = '' 281 | if self.is_term and flags: 282 | data += self.begin + (';'.join(flags)) + self.end 283 | for i in args: 284 | data += str(i) 285 | if self.is_term: 286 | # write 'color disable' before newline to better work with parallel 287 | # processes writing signle stdout/stderr 288 | if data.endswith('\n'): 289 | data = data[:-1] + self.disable + '\n' 290 | else: 291 | data += self.disable 292 | if data: 293 | self._write(data, kwargs.get('log_only', False)) 294 | self._flush() 295 | 296 | def __call__(self, *args, **kwargs): 297 | self.write(*args, **kwargs) 298 | 299 | def writeout_unidiff(self, diff): 300 | for i in diff: 301 | 302 | if not i.endswith('\n'): 303 | i += "\n\\ No newline\n" 304 | 305 | if i.startswith('+'): 306 | self.write(i, schema='diff_in') 307 | elif i.startswith('-'): 308 | self.write(i, schema='diff_out') 309 | elif i.startswith('@'): 310 | self.write(i, schema='diff_mark') 311 | else: 312 | self.write(i) 313 | 314 | def flush(self): 315 | return self.stdout.flush() 316 | 317 | def fileno(self): 318 | return self.stdout.fileno() 319 | 320 | def isatty(self): 321 | return self.is_term 322 | 323 | def decolor(self, data): 324 | return self.color_re.sub('', data) 325 | 326 | 327 | # Globals 328 | ######### 329 | 330 | 331 | color_stdout = Colorer() 332 | 333 | 334 | def decolor(data): 335 | return color_stdout.decolor(data) 336 | -------------------------------------------------------------------------------- /lib/connpool.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import gevent 4 | try: 5 | from gevent.lock import BoundedSemaphore 6 | except ImportError: 7 | from gevent.coros import BoundedSemaphore # before gevent-1.0 8 | from gevent import socket 9 | from collections import deque 10 | from contextlib import contextmanager 11 | from functools import wraps 12 | 13 | from lib.test import TestRunGreenlet 14 | 15 | __all__ = ["ConnectionPool", "retry"] 16 | 17 | DEFAULT_EXC_CLASSES = (socket.error,) 18 | 19 | 20 | class ConnectionPool(object): 21 | """ 22 | Generic TCP connection pool, with the following features: 23 | * Configurable pool size 24 | * Auto-reconnection when a broken socket is detected 25 | * Optional periodic keepalive 26 | """ 27 | 28 | # Frequency at which the pool is populated at startup 29 | SPAWN_FREQUENCY = 0.1 30 | 31 | def __init__(self, size, exc_classes=DEFAULT_EXC_CLASSES, keepalive=None): 32 | self.size = size 33 | self.conn = deque() 34 | self.lock = BoundedSemaphore(size) 35 | self.keepalive = keepalive 36 | # Exceptions list must be in tuple form to be caught properly 37 | self.exc_classes = tuple(exc_classes) 38 | for i in range(size): 39 | self.lock.acquire() 40 | for i in range(size): 41 | greenlet = TestRunGreenlet(self._addOne) 42 | greenlet.start_later(self.SPAWN_FREQUENCY * i) 43 | if self.keepalive: 44 | greenlet = TestRunGreenlet(self._keepalive_periodic) 45 | greenlet.start_later() 46 | 47 | def _new_connection(self): 48 | """ 49 | Estabilish a new connection (to be implemented in subclasses). 50 | """ 51 | raise NotImplementedError 52 | 53 | def _keepalive(self, c): 54 | """ 55 | Implement actual application-level keepalive (to be 56 | reimplemented in subclasses). 57 | 58 | :raise: socket.error if the connection has been closed or is broken. 59 | """ 60 | raise NotImplementedError() 61 | 62 | def _keepalive_periodic(self): 63 | delay = float(self.keepalive) / self.size 64 | while 1: 65 | try: 66 | with self.get() as c: 67 | self._keepalive(c) 68 | except self.exc_classes: 69 | # Nothing to do, the pool will generate a new connection later 70 | pass 71 | gevent.sleep(delay) 72 | 73 | def _addOne(self): 74 | stime = 0.1 75 | while 1: 76 | c = self._new_connection() 77 | if c: 78 | break 79 | gevent.sleep(stime) 80 | if stime < 400: 81 | stime *= 2 82 | 83 | self.conn.append(c) 84 | self.lock.release() 85 | 86 | @contextmanager 87 | def get(self): 88 | """ 89 | Get a connection from the pool, to make and receive traffic. 90 | 91 | If the connection fails for any reason (socket.error), it is dropped 92 | and a new one is scheduled. Please use @retry as a way to automatically 93 | retry whatever operation you were performing. 94 | """ 95 | self.lock.acquire() 96 | try: 97 | c = self.conn.popleft() 98 | yield c 99 | except self.exc_classes: 100 | # The current connection has failed, drop it and create a new one 101 | greenlet = TestRunGreenlet(self._addOne) 102 | greenlet.start_later(1) 103 | raise 104 | except: # noqa: E722 105 | self.conn.append(c) 106 | self.lock.release() 107 | raise 108 | else: 109 | # NOTE: cannot use finally because MUST NOT reuse the connection 110 | # if it failed (socket.error) 111 | self.conn.append(c) 112 | self.lock.release() 113 | 114 | 115 | def retry(f, exc_classes=DEFAULT_EXC_CLASSES, logger=None, 116 | retry_log_level=logging.INFO, 117 | retry_log_message="Connection broken in '{f}' (error: '{e}'); " 118 | "retrying with new connection.", 119 | max_failures=None, interval=0, 120 | max_failure_log_level=logging.ERROR, 121 | max_failure_log_message="Max retries reached for '{f}'. Aborting."): 122 | """ 123 | Decorator to automatically reexecute a function if the connection is 124 | broken for any reason. 125 | """ 126 | exc_classes = tuple(exc_classes) 127 | 128 | @wraps(f) 129 | def deco(*args, **kwargs): 130 | failures = 0 131 | while True: 132 | try: 133 | return f(*args, **kwargs) 134 | except exc_classes as e: 135 | if logger is not None: 136 | logger.log(retry_log_level, 137 | retry_log_message.format(f=f.__name__, e=e)) 138 | gevent.sleep(interval) 139 | failures += 1 140 | if max_failures is not None \ 141 | and failures > max_failures: 142 | if logger is not None: 143 | logger.log(max_failure_log_level, 144 | max_failure_log_message.format( 145 | f=f.__name__, e=e)) 146 | raise 147 | return deco 148 | -------------------------------------------------------------------------------- /lib/error.py: -------------------------------------------------------------------------------- 1 | class TestRunInitError(Exception): 2 | def __init__(self, *args, **kwargs): 3 | super(TestRunInitError, self).__init__(*args, **kwargs) 4 | -------------------------------------------------------------------------------- /lib/inspector.py: -------------------------------------------------------------------------------- 1 | import os 2 | import yaml 3 | import traceback 4 | 5 | import gevent 6 | from gevent.lock import Semaphore 7 | from gevent.server import StreamServer 8 | 9 | from lib.utils import bytes_to_str 10 | from lib.utils import prefix_each_line 11 | from lib.utils import str_to_bytes 12 | from lib.colorer import color_stdout 13 | from lib.colorer import color_log 14 | from lib.colorer import qa_notice 15 | 16 | from lib.tarantool_server import TarantoolStartError 17 | from lib.preprocessor import LuaPreprocessorException 18 | 19 | 20 | # Module initialization 21 | ####################### 22 | 23 | 24 | def gevent_propagate_exc(): 25 | """Don't print backtraces and propagate the exception to the parent 26 | greenlet when Ctrl+C or startup fail hit the process when the active 27 | greenlet is one of the StreamServer owned. 28 | """ 29 | ghub = gevent.get_hub() 30 | for exc_t in [KeyboardInterrupt, TarantoolStartError]: 31 | if exc_t not in ghub.NOT_ERROR: 32 | ghub.NOT_ERROR = ghub.NOT_ERROR + (exc_t,) 33 | if exc_t not in ghub.SYSTEM_ERROR: 34 | ghub.SYSTEM_ERROR = ghub.SYSTEM_ERROR + (exc_t,) 35 | 36 | 37 | gevent_propagate_exc() 38 | 39 | 40 | # TarantoolInspector 41 | #################### 42 | 43 | 44 | class TarantoolInspector(StreamServer): 45 | """ 46 | Tarantool inspector daemon. Usage: 47 | inspector = TarantoolInspector('localhost', 8080) 48 | inspector.start() 49 | # run some tests 50 | inspector.stop() 51 | """ 52 | 53 | def __init__(self, host, port): 54 | super(TarantoolInspector, self).__init__((host, port)) 55 | self.parser = None 56 | 57 | def start(self): 58 | super(TarantoolInspector, self).start() 59 | os.environ['INSPECTOR_HOST'] = str(self.server_host) 60 | os.environ['INSPECTOR_PORT'] = str(self.server_port) 61 | 62 | def stop(self): 63 | del os.environ['INSPECTOR_HOST'] 64 | del os.environ['INSPECTOR_PORT'] 65 | 66 | def set_parser(self, parser): 67 | self.parser = parser 68 | self.sem = Semaphore() 69 | 70 | @staticmethod 71 | def readline(socket, delimiter='\n', size=4096): 72 | result = '' 73 | data = True 74 | 75 | while data: 76 | try: 77 | data = bytes_to_str(socket.recv(size)) 78 | except IOError: 79 | # catch instance halt connection refused errors 80 | data = '' 81 | result += data 82 | 83 | while result.find(delimiter) != -1: 84 | line, result = result.split(delimiter, 1) 85 | yield line 86 | return 87 | 88 | def handle(self, socket, addr): 89 | if self.parser is None: 90 | raise AttributeError('Parser is not defined') 91 | self.sem.acquire() 92 | 93 | for line in self.readline(socket): 94 | color_log('DEBUG: test-run received command: {}\n'.format(line), 95 | schema='test-run command') 96 | 97 | try: 98 | result = self.parser.parse_preprocessor(line) 99 | except (KeyboardInterrupt, TarantoolStartError): 100 | # propagate to the main greenlet 101 | raise 102 | except LuaPreprocessorException as e: 103 | qa_notice(str(e)) 104 | result = {'error': str(e)} 105 | except Exception as e: 106 | self.parser.kill_current_test() 107 | color_stdout('\nTarantoolInpector.handle() received the ' + 108 | 'following error:\n' + traceback.format_exc() + 109 | '\n', schema='error') 110 | result = {"error": repr(e)} 111 | if result is None: 112 | result = True 113 | result = yaml.dump(result) 114 | if not result.endswith('...\n'): 115 | result = result + '...\n' 116 | color_log("DEBUG: test-run's response for [{}]\n{}\n".format( 117 | line, prefix_each_line(' | ', result)), 118 | schema='test-run command') 119 | socket.sendall(str_to_bytes(result)) 120 | 121 | self.sem.release() 122 | 123 | def cleanup_nondefault(self): 124 | if self.parser: 125 | self.parser.cleanup_nondefault() 126 | -------------------------------------------------------------------------------- /lib/luatest_server.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import re 4 | import sys 5 | 6 | from subprocess import PIPE 7 | from subprocess import Popen 8 | from threading import Timer 9 | 10 | from lib.colorer import color_stdout 11 | from lib.error import TestRunInitError 12 | from lib.options import Options 13 | from lib.sampler import sampler 14 | from lib.server import Server 15 | from lib.tarantool_server import Test 16 | from lib.tarantool_server import TestExecutionError 17 | from lib.tarantool_server import TarantoolServer 18 | from lib.utils import bytes_to_str 19 | from lib.utils import find_tags 20 | 21 | 22 | def timeout_handler(process, test_timeout): 23 | color_stdout("Test timeout of %d secs reached\t" % test_timeout, schema='error') 24 | process.kill() 25 | 26 | 27 | class LuatestTest(Test): 28 | """ Handle *_test.lua. 29 | 30 | Provide method for executing luatest _test.lua test. 31 | """ 32 | 33 | def __init__(self, *args, **kwargs): 34 | super(LuatestTest, self).__init__(*args, **kwargs) 35 | self.valgrind = kwargs.get('valgrind', False) 36 | 37 | def execute(self, server): 38 | """Execute test by luatest command 39 | 40 | Execute `luatest -c --no-clean --verbose _test.lua --output tap --log ` 41 | command. Disable capture mode and deletion of the var directory. Provide a verbose 42 | output in the tap format. Extend the command by `--pattern ` if the 43 | corresponding option is provided. 44 | """ 45 | server.current_test = self 46 | script = os.path.join(os.path.basename(server.testdir), self.name) 47 | 48 | # Disable stdout buffering. 49 | command = [server.binary, '-e', "io.stdout:setvbuf('no')"] 50 | # Add luatest as the script. 51 | command.extend([server.luatest]) 52 | # Add luatest command-line options. 53 | command.extend(['-c', '--no-clean', '--verbose', script, '--output', 'tap']) 54 | # Add luatest logging option. 55 | command.extend(['--log', os.path.join(server.vardir, 'run.log')]) 56 | if Options().args.pattern: 57 | for p in Options().args.pattern: 58 | command.extend(['--pattern', p]) 59 | 60 | # Run a specific test case. See find_tests() for details. 61 | if 'test_case' in self.run_params: 62 | command.extend(['--run-test-case', self.run_params['test_case']]) 63 | 64 | # We start luatest from the project source directory, it 65 | # is the usual way to use luatest. 66 | # 67 | # VARDIR (${BUILDDIR}/test/var/001_foo) will be used for 68 | # write ahead logs, snapshots, logs, unix domain sockets 69 | # and so on. 70 | os.environ['VARDIR'] = server.vardir 71 | project_dir = os.environ['SOURCEDIR'] 72 | 73 | with open(server.logfile, 'ab') as f: 74 | proc = Popen(command, cwd=project_dir, stdout=sys.stdout, stderr=f) 75 | sampler.register_process(proc.pid, self.id, server.name) 76 | test_timeout = Options().args.test_timeout 77 | timer = Timer(test_timeout, timeout_handler, (proc, test_timeout)) 78 | timer.start() 79 | proc.wait() 80 | timer.cancel() 81 | if proc.returncode != 0: 82 | raise TestExecutionError 83 | 84 | 85 | class LuatestServer(Server): 86 | """A dummy server implementation for luatest server tests""" 87 | 88 | def __new__(cls, ini=None, *args, **kwargs): 89 | cls = Server.get_mixed_class(cls, ini) 90 | return object.__new__(cls) 91 | 92 | def __init__(self, _ini=None, test_suite=None): 93 | if _ini is None: 94 | _ini = {} 95 | ini = {'vardir': None} 96 | ini.update(_ini) 97 | super(LuatestServer, self).__init__(ini, test_suite) 98 | self.testdir = os.path.abspath(os.curdir) 99 | self.vardir = ini['vardir'] 100 | self.builddir = ini['builddir'] 101 | self.name = 'luatest_server' 102 | 103 | @property 104 | def logfile(self): 105 | # Remove the suite name using basename(). 106 | test_name = os.path.basename(self.current_test.name) 107 | # Strip '.lua' from the end. 108 | # 109 | # The '_test' postfix is kept to ease distinguish this 110 | # log file from luatest.server instance logs. 111 | test_name = test_name[:-len('.lua')] 112 | # Add '.log'. 113 | file_name = test_name + '.log' 114 | # Put into vardir. 115 | return os.path.join(self.vardir, file_name) 116 | 117 | def deploy(self, vardir=None, silent=True, wait=True): 118 | self.vardir = vardir 119 | if not os.access(self.vardir, os.F_OK): 120 | os.makedirs(self.vardir) 121 | 122 | @classmethod 123 | def find_exe(cls, builddir): 124 | cls.builddir = builddir 125 | cls.binary = TarantoolServer.binary 126 | cls.debug = bool(re.findall(r'^Target:.*-Debug$', str(cls.version()), 127 | re.M)) 128 | cls.luatest = os.environ['TEST_RUN_DIR'] + '/bin/luatest' 129 | 130 | @classmethod 131 | def verify_luatest_exe(cls): 132 | """Verify that luatest executable is available.""" 133 | try: 134 | # Just check that the command returns zero exit code. 135 | with open(os.devnull, 'w') as devnull: 136 | returncode = Popen([cls.luatest, '--version'], 137 | stdout=devnull, 138 | stderr=devnull).wait() 139 | if returncode != 0: 140 | raise TestRunInitError('Unable to run `luatest --version`', 141 | {'returncode': returncode}) 142 | except OSError as e: 143 | # Python 2 raises OSError if the executable is not 144 | # found or if it has no executable bit. Python 3 145 | # raises FileNotFoundError and PermissionError in 146 | # those cases, which are childs of OSError anyway. 147 | raise TestRunInitError('Unable to find luatest executable', e) 148 | 149 | @classmethod 150 | def test_cases(cls, test_name): 151 | p = Popen([cls.luatest, test_name, '--list-test-cases'], stdout=PIPE) 152 | output = bytes_to_str(p.stdout.read()).rstrip() 153 | p.wait() 154 | 155 | # Exclude the first line if it is a tarantool version 156 | # report. 157 | res = output.split('\n') 158 | if len(res) > 0 and res[0].startswith('Tarantool version is'): 159 | return res[1:] 160 | 161 | return res 162 | 163 | @staticmethod 164 | def find_tests(test_suite, suite_path): 165 | """Looking for *_test.lua, which are can be executed by luatest.""" 166 | 167 | # TODO: Investigate why this old hack is needed and drop 168 | # it if possible (move the assignment to test_suite.py). 169 | # 170 | # cdc70f94701f suggests that it is related to the out of 171 | # source build. 172 | test_suite.ini['suite'] = suite_path 173 | 174 | # A pattern here means just a substring to find in a test 175 | # name. 176 | include_patterns = Options().args.tests 177 | exclude_patterns = Options().args.exclude 178 | 179 | accepted_tags = Options().args.tags 180 | 181 | tests = [] 182 | for test_name in glob.glob(os.path.join(suite_path, '*_test.lua')): 183 | # Several include patterns may match the given 184 | # test[^1]. 185 | # 186 | # The primary usage of this behavior is to run a test 187 | # many times in parallel to verify its stability or 188 | # to debug an unstable behavior. 189 | # 190 | # Execute the test once for each of the matching 191 | # patterns. 192 | # 193 | # [^1]: A pattern matches a test if the pattern is a 194 | # substring of the test name. 195 | repeat = sum(1 for p in include_patterns if p in test_name) 196 | # If neither of the include patterns matches the given 197 | # test, skip the test. 198 | if repeat == 0: 199 | continue 200 | 201 | # If at least one of the exclude patterns matches the 202 | # given test, skip the test. 203 | if any(p in test_name for p in exclude_patterns): 204 | continue 205 | 206 | tags = find_tags(test_name) 207 | 208 | # If --tags <...> CLI option is provided... 209 | if accepted_tags: 210 | # ...and the test has neither of the given tags, 211 | # skip the test. 212 | if not any(t in accepted_tags for t in tags): 213 | continue 214 | 215 | # Add the test to the execution list otherwise. 216 | if 'parallel' in tags: 217 | # If the test has the 'parallel' tag, split the 218 | # test to test cases to run in separate tasks in 219 | # parallel. 220 | test_cases = LuatestServer.test_cases(test_name) 221 | 222 | # Display shorter test case names on the screen: 223 | # strip the common prefix. 224 | prefix_len = len(os.path.commonprefix(test_cases)) 225 | 226 | for test_case in test_cases: 227 | test_obj = LuatestTest(test_name, test_suite.args, test_suite.ini, 228 | params={"test_case": test_case}, 229 | conf_name=test_case[prefix_len:]) 230 | tests.extend([test_obj] * repeat) 231 | else: 232 | # If the test has no 'parallel' tag, run all the 233 | # test cases as one task. 234 | test_obj = LuatestTest(test_name, test_suite.args, test_suite.ini) 235 | tests.extend([test_obj] * repeat) 236 | 237 | tests.sort(key=lambda t: t.name) 238 | 239 | # TODO: Don't modify a test suite object's field from 240 | # another object directly. It is much better to just 241 | # return a list of tests from this method. 242 | test_suite.tests = tests 243 | -------------------------------------------------------------------------------- /lib/pytap13.py: -------------------------------------------------------------------------------- 1 | # Copyright 2013, Red Hat, Inc. 2 | # 3 | # This program is free software; you can redistribute it and/or modify 4 | # it under the terms of the GNU General Public License as published by 5 | # the Free Software Foundation; either version 2 of the License, or 6 | # (at your option) any later version. 7 | # 8 | # This program is distributed in the hope that it will be useful, 9 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | # GNU General Public License for more details. 12 | # 13 | # You should have received a copy of the GNU General Public License along 14 | # with this program; if not, write to the Free Software Foundation, Inc., 15 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 16 | # 17 | # Author: Josef Skladanka 18 | 19 | import re 20 | 21 | try: 22 | # Python 2 23 | from StringIO import StringIO 24 | except ImportError: 25 | # Python 3 26 | from io import StringIO 27 | 28 | import yaml 29 | from lib.utils import string_types 30 | 31 | 32 | RE_VERSION = re.compile(r"^\s*TAP version 13\s*$") 33 | RE_PLAN = re.compile( 34 | r"^\s*(?P\d+)\.\.(?P\d+)\s*(#\s*(?P.*))?\s*$") 35 | RE_TEST_LINE = re.compile( 36 | r"^\s*(?P(not\s+)?ok)\s*(?P\d+)?\s*(?P[^#]+)?" + 37 | r"\s*(#\s*(?PTODO|SKIP)?\s*(?P.+)?)?\s*$", 38 | re.IGNORECASE) 39 | RE_DIAGNOSTIC = re.compile(r"^\s*#\s*(?P.+)?\s*$") 40 | RE_YAMLISH_START = re.compile(r"^\s*---.*$") 41 | RE_YAMLISH_END = re.compile(r"^\s*\.\.\.\s*$") 42 | 43 | 44 | class Test(object): 45 | def __init__(self, result, id, description=None, directive=None, 46 | comment=None): 47 | self.result = result 48 | self.id = id 49 | self.description = description 50 | try: 51 | self.directive = directive.upper() 52 | except AttributeError: 53 | self.directive = directive 54 | self.comment = comment 55 | self.yaml = None 56 | self._yaml_buffer = StringIO() 57 | self.diagnostics = [] 58 | 59 | 60 | class TAP13(object): 61 | def __init__(self, strict=False): 62 | self.tests = [] 63 | self.__tests_counter = 0 64 | self.tests_planned = None 65 | self.strict = strict 66 | 67 | def _parse(self, source): 68 | seek_version = True 69 | seek_plan = False 70 | seek_test = False 71 | 72 | in_test = False 73 | in_yaml = False 74 | for line in source: 75 | if not seek_version and RE_VERSION.match(line): 76 | raise ValueError("Bad TAP format, multiple TAP headers") 77 | 78 | if in_yaml: 79 | if RE_YAMLISH_END.match(line): 80 | test = self.tests[-1] 81 | try: 82 | test.yaml = yaml.safe_load( 83 | test._yaml_buffer.getvalue()) 84 | except Exception as e: 85 | if not self.strict: 86 | continue 87 | test_num = len(self.tests) + 1 88 | comment = 'DIAG: Test %s has wrong YAML: %s' % ( 89 | test_num, str(e)) 90 | self.tests.append(Test('not ok', test_num, 91 | comment=comment)) 92 | in_yaml = False 93 | else: 94 | self.tests[-1]._yaml_buffer.write(line) 95 | continue 96 | 97 | if in_test: 98 | if RE_DIAGNOSTIC.match(line): 99 | self.tests[-1].diagnostics.append(line.strip()) 100 | continue 101 | if RE_YAMLISH_START.match(line): 102 | in_yaml = True 103 | continue 104 | 105 | on_top_level = not line.startswith(' ') 106 | raw_line = line.rstrip('\n') 107 | line = line.strip() 108 | 109 | if RE_DIAGNOSTIC.match(line): 110 | continue 111 | 112 | # this is "beginning" of the parsing, skip all lines until 113 | # version is found (in non-strict mode) 114 | if seek_version: 115 | m = RE_VERSION.match(line) 116 | if m: 117 | seek_version = False 118 | seek_plan = True 119 | seek_test = True 120 | continue 121 | elif not self.strict: 122 | continue 123 | 124 | m = RE_PLAN.match(line) 125 | if m: 126 | if seek_plan and on_top_level: 127 | d = m.groupdict() 128 | self.tests_planned = int(d.get('end', 0)) 129 | seek_plan = False 130 | 131 | # Stop processing if tests were found before the plan 132 | # if plan is at the end, it must be the last line 133 | # -> stop processing 134 | if self.__tests_counter > 0: 135 | break 136 | continue 137 | elif not on_top_level: 138 | continue 139 | 140 | if seek_test: 141 | m = RE_TEST_LINE.match(line) 142 | if m and on_top_level: 143 | self.__tests_counter += 1 144 | t_attrs = m.groupdict() 145 | if t_attrs['id'] is None: 146 | t_attrs['id'] = self.__tests_counter 147 | t_attrs['id'] = int(t_attrs['id']) 148 | if t_attrs['id'] < self.__tests_counter: 149 | raise ValueError( 150 | "Descending test id on line: %r" % line) 151 | # according to TAP13 specs, missing tests must be handled 152 | # as 'not ok' 153 | # here we add the missing tests in sequence 154 | while t_attrs['id'] > self.__tests_counter: 155 | comment = 'DIAG: Test %s not present' % \ 156 | self.__tests_counter 157 | self.tests.append(Test('not ok', self.__tests_counter, 158 | comment=comment)) 159 | self.__tests_counter += 1 160 | t = Test(**t_attrs) 161 | self.tests.append(t) 162 | in_test = True 163 | continue 164 | elif not on_top_level: 165 | continue 166 | 167 | if self.strict: 168 | raise ValueError('Wrong TAP line: [' + raw_line + ']') 169 | 170 | if self.tests_planned is None: 171 | # TODO: raise better error than ValueError 172 | raise ValueError("Missing plan in the TAP source") 173 | 174 | if len(self.tests) != self.tests_planned: 175 | comment = 'DIAG: Expected %s tests, got %s' % \ 176 | (self.tests_planned, len(self.tests)) 177 | self.tests.append(Test('not ok', len(self.tests), comment=comment)) 178 | 179 | def parse(self, source): 180 | if isinstance(source, string_types): 181 | self._parse(StringIO(source)) 182 | elif hasattr(source, "__iter__"): 183 | self._parse(source) 184 | 185 | 186 | if __name__ == "__main__": 187 | input = """ 188 | TAP version 13 189 | ok 1 - Input file opened 190 | not ok 2 - First line of the input valid 191 | --- 192 | message: 'First line invalid' 193 | severity: fail 194 | data: 195 | got: 'Flirble' 196 | expect: 'Fnible' 197 | ... 198 | ok - Read the rest of the file 199 | not ok 5 - Summarized correctly # TODO Not written yet 200 | --- 201 | message: "Can't make summary yet" 202 | severity: todo 203 | ... 204 | ok Description 205 | # Diagnostic 206 | --- 207 | message: 'Failure message' 208 | severity: fail 209 | data: 210 | got: 211 | - 1 212 | - 3 213 | - 2 214 | expect: 215 | - 1 216 | - 2 217 | - 3 218 | ... 219 | 1..6 220 | """ 221 | t = TAP13() 222 | t.parse(input) 223 | 224 | import pprint 225 | for test in t.tests: 226 | print(test.result, test.id, test.description, "#", test.directive, 227 | test.comment) 228 | pprint.pprint(test._yaml_buffer) 229 | pprint.pprint(test.yaml) 230 | -------------------------------------------------------------------------------- /lib/sampler.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | 5 | from lib.colorer import color_log 6 | from lib.colorer import qa_notice 7 | from lib.utils import format_process 8 | from lib.utils import get_proc_stat_rss 9 | from lib.utils import proc_stat_rss_supported 10 | 11 | 12 | if sys.version_info[0] == 2: 13 | ProcessLookupError = OSError 14 | 15 | 16 | # Don't inherit BaseWorkerMessage to bypass cyclic import. 17 | class RegisterProcessMessage(object): 18 | """Ask the sampler in the main test-run process to register 19 | given process. 20 | """ 21 | def __init__(self, worker_id, worker_name, pid, task_id, server_name): 22 | self.worker_id = worker_id 23 | self.worker_name = worker_name 24 | self.pid = pid 25 | self.task_id = task_id 26 | self.server_name = server_name 27 | 28 | 29 | # Don't inherit BaseWatcher to bypass cyclic import. 30 | class SamplerWatcher(object): 31 | def __init__(self, sampler): 32 | self._sampler = sampler 33 | self._last_sample = 0 34 | self._sample_interval = 0.1 # seconds 35 | self._warn_interval = self._sample_interval * 4 36 | 37 | def process_result(self, obj): 38 | if isinstance(obj, RegisterProcessMessage): 39 | self._sampler.register_process( 40 | obj.pid, obj.task_id, obj.server_name, obj.worker_id, 41 | obj.worker_name) 42 | self._wakeup() 43 | 44 | def process_timeout(self, delta_seconds): 45 | self._wakeup() 46 | 47 | @property 48 | def sample_interval(self): 49 | return self._sample_interval 50 | 51 | def _wakeup(self): 52 | """Invoke Sampler.sample() if enough time elapsed since 53 | the previous call. 54 | """ 55 | now = time.time() 56 | delta = now - self._last_sample 57 | if self._last_sample > 0 and delta > self._warn_interval: 58 | template = 'Low sampling resolution. The expected interval\n' + \ 59 | 'is {:.2f} seconds ({:.2f} seconds without warnings),\n' + \ 60 | 'but the last sample was collected {:.2f} seconds ago.' 61 | qa_notice(template.format(self._sample_interval, self._warn_interval, 62 | delta)) 63 | if delta > self._sample_interval: 64 | self._sampler._sample() 65 | self._last_sample = now 66 | 67 | 68 | class Sampler: 69 | def __init__(self): 70 | # The instance is created in the test-run main process. 71 | 72 | # Field for an instance in a worker. 73 | self._worker_id = None 74 | self._worker_name = None 75 | self._queue = None 76 | 77 | # Field for an instance in the main process. 78 | self._watcher = SamplerWatcher(self) 79 | 80 | self._processes = dict() 81 | self._rss_summary = dict() 82 | 83 | def set_queue(self, queue, worker_id, worker_name): 84 | # Called from a worker process (_run_worker()). 85 | self._worker_id = worker_id 86 | self._worker_name = worker_name 87 | self._queue = queue 88 | self._watcher = None 89 | 90 | @property 91 | def rss_summary(self): 92 | """Task ID to maximum RSS mapping.""" 93 | return self._rss_summary 94 | 95 | @property 96 | def sample_interval(self): 97 | return self._watcher.sample_interval 98 | 99 | @property 100 | def watcher(self): 101 | if not self._watcher: 102 | raise RuntimeError('sampler: watcher is available only in the ' + 103 | 'main test-run process') 104 | return self._watcher 105 | 106 | @property 107 | def is_enabled(self): 108 | return proc_stat_rss_supported() 109 | 110 | def register_process(self, pid, task_id, server_name, worker_id=None, 111 | worker_name=None): 112 | """Register a process to sampling. 113 | 114 | Call it without worker_* arguments from a worker 115 | process. 116 | """ 117 | if not self._queue: 118 | # In main test-run process. 119 | self._processes[pid] = { 120 | 'task_id': task_id, 121 | 'server_name': server_name, 122 | 'worker_id': worker_id, 123 | 'worker_name': worker_name, 124 | } 125 | self._log('register', pid) 126 | return 127 | 128 | # Pass to the main test-run process. 129 | self._queue.put(RegisterProcessMessage( 130 | self._worker_id, self._worker_name, pid, task_id, server_name)) 131 | 132 | def unregister_process(self, pid): 133 | if self._queue: 134 | raise NotImplementedError('sampler: a process unregistration ' + 135 | 'from a test-run worker is not ' + 136 | 'implemented yet') 137 | if pid not in self._processes: 138 | return 139 | 140 | self._log('unregister', pid) 141 | del self._processes[pid] 142 | 143 | def _log(self, event, pid): 144 | # Those logs are not written due to gh-247. 145 | process_def = self._processes[pid] 146 | task_id = process_def['task_id'] 147 | test_name = task_id[0] + ((':' + task_id[1]) if task_id[1] else '') 148 | worker_name = process_def['worker_name'] 149 | server_name = process_def['server_name'] 150 | color_log('DEBUG: sampler: {} {}\n'.format( 151 | event, format_process(pid)), schema='info') 152 | color_log(' | worker: {}\n'.format(worker_name)) 153 | color_log(' | test: {}\n'.format(test_name)) 154 | color_log(' | server: {}\n'.format(str(server_name))) 155 | 156 | def _sample(self): 157 | tasks_rss = dict() 158 | for pid in list(self._processes.keys()): 159 | # Unregister processes that're gone. 160 | # Assume that PIDs are rarely reused. 161 | try: 162 | os.kill(pid, 0) 163 | except ProcessLookupError: 164 | self.unregister_process(pid) 165 | else: 166 | self._sample_process(pid, tasks_rss) 167 | 168 | # Save current overall RSS value if it is bigger than saved. 169 | for task_id in tasks_rss: 170 | if self.rss_summary.get(task_id, 0) < tasks_rss[task_id]: 171 | self.rss_summary[task_id] = tasks_rss[task_id] 172 | 173 | def _sample_process(self, pid, tasks_rss): 174 | task_id = self._processes[pid]['task_id'] 175 | # Count overall RSS per task. 176 | tasks_rss[task_id] = get_proc_stat_rss(pid) + tasks_rss.get(task_id, 0) 177 | 178 | 179 | # The 'singleton' sampler instance: created in the main test-run 180 | # process, but then work differently in the main process and 181 | # workers. 182 | sampler = Sampler() 183 | -------------------------------------------------------------------------------- /lib/server.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import shutil 4 | import subprocess 5 | from itertools import product 6 | 7 | from lib.server_mixins import ValgrindMixin 8 | from lib.server_mixins import GdbMixin 9 | from lib.server_mixins import GdbServerMixin 10 | from lib.server_mixins import LLdbMixin 11 | from lib.server_mixins import StraceMixin 12 | from lib.server_mixins import LuacovMixin 13 | from lib.colorer import color_stdout 14 | from lib.options import Options 15 | from lib.utils import print_tail_n 16 | from lib.utils import bytes_to_str 17 | from lib.utils import find_tags 18 | 19 | DEFAULT_CHECKPOINT_PATTERNS = ["*.snap", "*.xlog", "*.vylog", "*.inprogress", 20 | "[0-9]*/"] 21 | 22 | DEFAULT_SNAPSHOT_NAME = "00000000000000000000.snap" 23 | 24 | 25 | class Server(object): 26 | """Server represents a single server instance. Normally, the 27 | program operates with only one server, but in future we may add 28 | replication slaves. The server is started once at the beginning 29 | of each suite, and stopped at the end.""" 30 | DEFAULT_INSPECTOR = 0 31 | TEST_RUN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 32 | "..")) 33 | # assert(false) hangs due to gh-4983, added fiber.sleep(0) to workaround it 34 | DISABLE_AUTO_UPGRADE = "require('fiber').sleep(0) \ 35 | assert(box.error.injection.set('ERRINJ_AUTO_UPGRADE', true) == 'ok', \ 36 | 'no such errinj')" 37 | 38 | @property 39 | def vardir(self): 40 | if not hasattr(self, '_vardir'): 41 | raise ValueError("No vardir specified") 42 | return self._vardir 43 | 44 | @vardir.setter 45 | def vardir(self, path): 46 | if path is None: 47 | return 48 | self._vardir = os.path.abspath(path) 49 | 50 | @staticmethod 51 | def get_mixed_class(cls, ini): 52 | if ini is None: 53 | return cls 54 | 55 | conflict_options = ('valgrind', 'gdb', 'gdbserver', 'lldb', 'strace') 56 | for op1, op2 in product(conflict_options, repeat=2): 57 | if op1 != op2 and \ 58 | (op1 in ini and ini[op1]) and \ 59 | (op2 in ini and ini[op2]): 60 | format_str = 'Can\'t run under {} and {} simultaniously' 61 | raise OSError(format_str.format(op1, op2)) 62 | 63 | lname = cls.__name__.lower() 64 | 65 | if ini.get('valgrind') and 'valgrind' not in lname: 66 | cls = type('Valgrind' + cls.__name__, (ValgrindMixin, cls), {}) 67 | elif ini.get('gdbserver') and 'gdbserver' not in lname: 68 | cls = type('GdbServer' + cls.__name__, (GdbServerMixin, cls), {}) 69 | elif ini.get('gdb') and 'gdb' not in lname: 70 | cls = type('Gdb' + cls.__name__, (GdbMixin, cls), {}) 71 | elif ini.get('lldb') and 'lldb' not in lname: 72 | cls = type('LLdb' + cls.__name__, (LLdbMixin, cls), {}) 73 | elif 'strace' in ini and ini['strace']: 74 | cls = type('Strace' + cls.__name__, (StraceMixin, cls), {}) 75 | elif 'luacov' in ini and ini['luacov']: 76 | cls = type('Luacov' + cls.__name__, (LuacovMixin, cls), {}) 77 | 78 | return cls 79 | 80 | def __new__(cls, ini=None, *args, **kwargs): 81 | if ini is None or 'core' not in ini or ini['core'] is None: 82 | return object.__new__(cls) 83 | core = ini['core'].lower().strip() 84 | cls.mdlname = "lib.{0}_server".format(core.replace(' ', '_')) 85 | cls.clsname = "{0}Server".format(core.title().replace(' ', '')) 86 | corecls = __import__(cls.mdlname, 87 | fromlist=cls.clsname).__dict__[cls.clsname] 88 | return corecls.__new__(corecls, ini, *args, **kwargs) 89 | 90 | def __init__(self, ini, test_suite=None): 91 | self.core = ini['core'] 92 | self.ini = ini 93 | self.vardir = ini['vardir'] 94 | self.inspector_port = int(ini.get( 95 | 'inspector_port', self.DEFAULT_INSPECTOR 96 | )) 97 | self.disable_schema_upgrade = Options().args.disable_schema_upgrade 98 | self.snapshot_path = Options().args.snapshot_path 99 | 100 | # filled in {Test,AppTest,LuaTest,PythonTest}.execute() 101 | # or passed through execfile() for PythonTest (see 102 | # TarantoolServer.__init__). 103 | self.current_test = None 104 | 105 | # Used in valgrind_log property. 'test_suite' is not None only for 106 | # default servers running in TestSuite.run_all() 107 | self.test_suite = test_suite 108 | 109 | @classmethod 110 | def version(cls): 111 | p = subprocess.Popen([cls.binary, "--version"], stdout=subprocess.PIPE) 112 | version = bytes_to_str(p.stdout.read()).rstrip() 113 | p.wait() 114 | return version 115 | 116 | def prepare_args(self, args=[]): 117 | return args 118 | 119 | def pretest_clean(self): 120 | self.cleanup() 121 | 122 | def cleanup(self, dirname='.'): 123 | waldir = os.path.join(self.vardir, dirname) 124 | for pattern in DEFAULT_CHECKPOINT_PATTERNS: 125 | for f in glob.glob(os.path.join(waldir, pattern)): 126 | if os.path.isdir(f): 127 | shutil.rmtree(f) 128 | else: 129 | os.remove(f) 130 | 131 | def install(self, binary=None, vardir=None, mem=None, silent=True): 132 | pass 133 | 134 | def init(self): 135 | pass 136 | 137 | def start(self, silent=True): 138 | pass 139 | 140 | def stop(self, silent=True): 141 | pass 142 | 143 | def restart(self): 144 | pass 145 | 146 | def print_log(self, num_lines=None): 147 | """ Show information from the given log file. 148 | """ 149 | prefix = '\n[test-run server "{instance}"] '.format(instance=self.name) 150 | if not self.logfile: 151 | msg = 'No log file is set (internal test-run error)\n' 152 | color_stdout(prefix + msg, schema='error') 153 | elif not os.path.exists(self.logfile): 154 | fmt_str = 'The log file {logfile} does not exist\n' 155 | msg = fmt_str.format(logfile=self.logfile) 156 | color_stdout(prefix + msg, schema='error') 157 | elif os.path.getsize(self.logfile) == 0: 158 | fmt_str = 'The log file {logfile} has zero size\n' 159 | msg = fmt_str.format(logfile=self.logfile) 160 | color_stdout(prefix + msg, schema='error') 161 | elif num_lines: 162 | fmt_str = 'Last {num_lines} lines of the log file {logfile}:\n' 163 | msg = fmt_str.format(logfile=self.logfile, num_lines=num_lines) 164 | color_stdout(prefix + msg, schema='error') 165 | print_tail_n(self.logfile, num_lines) 166 | else: 167 | fmt_str = 'The log file {logfile}:\n' 168 | msg = fmt_str.format(logfile=self.logfile) 169 | color_stdout(msg, schema='error') 170 | print_tail_n(self.logfile, num_lines) 171 | 172 | @staticmethod 173 | def exclude_tests(test_names, exclude_patterns): 174 | """ Filter out test files by several criteria: 175 | 176 | exclude_patters: a list of strings. If a string is 177 | a substring of the test full name, exclude it. 178 | 179 | If --tags option is provided, exclude tests, which 180 | have no a tag from the provided list. 181 | """ 182 | 183 | # TODO: Support filtering by another file (for unit 184 | # tests). Transform a test name into a test source name. 185 | # Don't forget about out of source build. 186 | # TODO: Support multiline comments (mainly for unit 187 | # tests). 188 | 189 | def match_any_pattern(test_name, patterns): 190 | for pattern in patterns: 191 | if pattern in test_name: 192 | return True 193 | return False 194 | 195 | def match_any_tag(test_name, accepted_tags): 196 | tags = find_tags(test_name) 197 | for tag in tags: 198 | if tag in accepted_tags: 199 | return True 200 | return False 201 | 202 | accepted_tags = Options().args.tags 203 | 204 | res = [] 205 | for test_name in test_names: 206 | if match_any_pattern(test_name, exclude_patterns): 207 | continue 208 | if accepted_tags is None or match_any_tag(test_name, accepted_tags): 209 | res.append(test_name) 210 | return res 211 | -------------------------------------------------------------------------------- /lib/server_mixins.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import shlex 4 | 5 | from lib.utils import find_in_path 6 | from lib.utils import print_tail_n 7 | from lib.utils import non_empty_valgrind_logs 8 | from lib.utils import shlex_quote 9 | from lib.colorer import color_log 10 | from lib.colorer import color_stdout 11 | 12 | 13 | def shlex_join(strings): 14 | return ' '.join(shlex_quote(s) for s in strings) 15 | 16 | 17 | class Mixin(object): 18 | pass 19 | 20 | 21 | class ValgrindMixin(Mixin): 22 | default_valgr = { 23 | "suppress_path": "share/", 24 | "suppress_name": "tarantool.sup" 25 | } 26 | 27 | def format_valgrind_log_path(self, suite_name, test_name, conf, 28 | server_name, num): 29 | basename = '{}.{}.{}.{}.{}.valgrind.log'.format( 30 | suite_name, test_name, conf, server_name, str(num)) 31 | return os.path.join(self.vardir, basename) 32 | 33 | @property 34 | def valgrind_log(self): 35 | # suite.{test/default}.{conf/none}.instance.num.valgrind.log 36 | # Why 'TarantoolServer' is special case here? Consider: 37 | # * TarantoolServer runs once, then execute tests in the one process 38 | # (we run the server itself under valgrind). 39 | # * AppServer / UnittestServer just create separate processes for each 40 | # tests (we run these processes under valgrind). 41 | if 'TarantoolServer' in self.__class__.__name__ and self.test_suite: 42 | suite_name = os.path.basename(self.test_suite.suite_path) 43 | path = self.format_valgrind_log_path( 44 | suite_name, 'default', 'none', self.name, 1) 45 | else: 46 | suite_name = os.path.basename(self.current_test.suite_ini['suite']) 47 | test_name = os.path.basename(self.current_test.name) 48 | conf_name = self.current_test.conf_name or 'none' 49 | num = 1 50 | while True: 51 | path = self.format_valgrind_log_path( 52 | suite_name, test_name, conf_name, self.name, num) 53 | if not os.path.isfile(path): 54 | break 55 | num += 1 56 | return path 57 | 58 | def current_valgrind_logs(self, for_suite=False, for_test=False): 59 | if not self.test_suite or not self.current_test: 60 | raise ValueError( 61 | "The method should be called on a default suite's server.") 62 | if for_suite == for_test: 63 | raise ValueError('Set for_suite OR for_test to True') 64 | suite_name = os.path.basename(self.test_suite.suite_path) 65 | if for_test: 66 | test_name = os.path.basename(self.current_test.name) 67 | default_tmpl = self.format_valgrind_log_path( 68 | suite_name, 'default', '*', '*', '*') 69 | non_default_tmpl = self.format_valgrind_log_path( 70 | suite_name, test_name, '*', '*', '*') 71 | return sorted(glob.glob(default_tmpl) + 72 | glob.glob(non_default_tmpl)) 73 | else: 74 | suite_tmpl = self.format_valgrind_log_path( 75 | suite_name, '*', '*', '*', '*') 76 | return sorted(glob.glob(suite_tmpl)) 77 | 78 | @property 79 | def valgrind_sup(self): 80 | if not hasattr(self, '_valgrind_sup') or not self._valgrind_sup: 81 | return os.path.join(self.testdir, 82 | self.default_valgr['suppress_path'], 83 | self.default_valgr['suppress_name']) 84 | return self._valgrind_sup 85 | 86 | @valgrind_sup.setter 87 | def valgrind_sup(self, val): 88 | self._valgrind_sup = os.path.abspath(val) 89 | 90 | @property 91 | def valgrind_sup_output(self): 92 | return os.path.join(self.vardir, self.default_valgr['suppress_name']) 93 | 94 | @property 95 | def valgrind_cmd_args(self): 96 | return shlex.split("valgrind --log-file={log} --suppressions={sup} \ 97 | --gen-suppressions=all --trace-children=yes --leak-check=full \ 98 | --read-var-info=yes --quiet".format( 99 | log=self.valgrind_log, 100 | sup=self.valgrind_sup)) 101 | 102 | def prepare_args(self, args=[]): 103 | if not find_in_path('valgrind'): 104 | raise OSError('`valgrind` executables not found in PATH') 105 | orig_args = super(ValgrindMixin, self).prepare_args(args) 106 | res_args = self.valgrind_cmd_args + orig_args 107 | color_log('\nRUN: ' + shlex_join(res_args) + '\n', schema='test_var') 108 | return res_args 109 | 110 | def wait_stop(self): 111 | return self.process.wait() 112 | 113 | def crash_grep(self): 114 | if self.process.returncode < 0 or \ 115 | not non_empty_valgrind_logs([self.valgrind_log]): 116 | super(ValgrindMixin, self).crash_grep() 117 | return 118 | 119 | lines_cnt = 50 120 | color_stdout(('\n\nValgrind for [Instance "%s"] returns non-zero ' + 121 | 'exit code: %d\n') % (self.name, self.process.returncode), 122 | schema='error') 123 | color_stdout("It's known that it can be valgrind's " + 124 | "\"the 'impossible' happened\" error\n", schema='error') 125 | color_stdout('Last %d lines of valgring log file [%s]:\n' % ( 126 | lines_cnt, self.valgrind_log), schema='error') 127 | print_tail_n(self.valgrind_log, lines_cnt) 128 | 129 | 130 | class StraceMixin(Mixin): 131 | @property 132 | def strace_log(self): 133 | # TODO: don't overwrite log, like in the 'valgrind_log' property above 134 | return os.path.join(self.vardir, 'strace.log') 135 | 136 | def prepare_args(self, args=[]): 137 | if not find_in_path('strace'): 138 | raise OSError('`strace` executables not found in PATH') 139 | orig_args = super(StraceMixin, self).prepare_args(args) 140 | res_args = shlex.split("strace -o {log} -f -tt -T -x -I1 {bin}".format( 141 | bin=' '.join(orig_args), 142 | log=self.strace_log 143 | )) 144 | color_log('\nRUN: ' + shlex_join(res_args) + '\n', schema='test_var') 145 | return res_args 146 | 147 | def wait_stop(self): 148 | self.kill_old_server() 149 | return self.process.wait() 150 | 151 | 152 | class DebugMixin(Mixin): 153 | debugger_args = { 154 | "screen_name": None, 155 | "debugger": None, 156 | "sh_string": None 157 | } 158 | 159 | def prepare_args(self, args=[]): 160 | screen_name = self.debugger_args['screen_name'] 161 | debugger = self.debugger_args['debugger'] 162 | gdbserver_port = self.debugger_args['gdbserver_port'] 163 | gdbserver_opts = self.debugger_args['gdbserver_opts'] 164 | sh_string = self.debugger_args['sh_string'] 165 | 166 | is_under_gdbserver = 'GdbServer' in self.__class__.__name__ 167 | 168 | if not is_under_gdbserver and not find_in_path('screen'): 169 | raise OSError('`screen` executables not found in PATH') 170 | if not find_in_path(debugger): 171 | raise OSError('`%s` executables not found in PATH' % debugger) 172 | 173 | is_tarantoolserver = 'TarantoolServer' in self.__class__.__name__ 174 | 175 | if is_tarantoolserver or is_under_gdbserver: 176 | color_stdout('\nYou started the server in %s mode.\n' % debugger, 177 | schema='info') 178 | if is_under_gdbserver: 179 | color_stdout("To attach, use `gdb -ex 'target remote :%s'`\n" % 180 | gdbserver_port, schema='info') 181 | else: 182 | color_stdout('To attach, use `screen -r %s`\n' % screen_name, 183 | schema='info') 184 | 185 | # detach only for TarantoolServer 186 | screen_opts = '-d' if is_tarantoolserver else '' 187 | 188 | orig_args = super(DebugMixin, self).prepare_args(args) 189 | res_args = shlex.split(sh_string.format( 190 | screen_name=screen_name, 191 | screen_opts=screen_opts, 192 | binary=self.binary, 193 | args=' '.join(orig_args), 194 | logfile=self.logfile, 195 | debugger=debugger, 196 | gdbserver_port=gdbserver_port, 197 | gdbserver_opts=gdbserver_opts)) 198 | color_log('\nRUN: ' + shlex_join(res_args) + '\n', schema='test_var') 199 | return res_args 200 | 201 | def wait_stop(self): 202 | self.kill_old_server() 203 | self.process.wait() 204 | 205 | 206 | class GdbMixin(DebugMixin): 207 | debugger_args = { 208 | "screen_name": "tarantool", 209 | "debugger": "gdb", 210 | "gdbserver_port": None, 211 | "gdbserver_opts": None, 212 | "sh_string": 213 | """screen {screen_opts} -mS {screen_name} {debugger} {binary} 214 | -ex 'b main' -ex 'run {args} >> {logfile} 2>> {logfile}' 215 | """ 216 | } 217 | 218 | 219 | # this would be good for running unit tests: 220 | # https://cygwin.com/ml/gdb-patches/2015-03/msg01051.html 221 | class GdbServerMixin(DebugMixin): 222 | debugger_args = { 223 | "screen_name": None, 224 | "debugger": "gdbserver", 225 | "gdbserver_port": "8888", 226 | "gdbserver_opts": "", 227 | "sh_string": 228 | """gdbserver :{gdbserver_port} {binary} {args} -- {gdbserver_opts} 229 | """ 230 | } 231 | 232 | 233 | class LLdbMixin(DebugMixin): 234 | debugger_args = { 235 | "screen_name": "tarantool", 236 | "debugger": "lldb", 237 | "gdbserver_port": None, 238 | "gdbserver_opts": None, 239 | "sh_string": 240 | """screen {screen_opts} -mS {screen_name} {debugger} -f {binary} 241 | -o 'b main' 242 | -o 'settings set target.run-args {args}' 243 | -o 'process launch -o {logfile} -e {logfile}' 244 | """ 245 | } 246 | 247 | 248 | class LuacovMixin(Mixin): 249 | def prepare_args(self, args=[]): 250 | orig_args = super(LuacovMixin, self).prepare_args(args) 251 | return ['tarantool', 252 | '-e', '_G.TEST_RUN_LUACOV=true', 253 | '-e', 'jit.off()', 254 | '-l', 'luacov'] + orig_args 255 | -------------------------------------------------------------------------------- /lib/tarantool_connection.py: -------------------------------------------------------------------------------- 1 | __author__ = "Konstantin Osipov " 2 | 3 | # Redistribution and use in source and binary forms, with or without 4 | # modification, are permitted provided that the following conditions 5 | # are met: 6 | # 1. Redistributions of source code must retain the above copyright 7 | # notice, this list of conditions and the following disclaimer. 8 | # 2. Redistributions in binary form must reproduce the above copyright 9 | # notice, this list of conditions and the following disclaimer in the 10 | # documentation and/or other materials provided with the distribution. 11 | # 12 | # THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 13 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 14 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 15 | # ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 16 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 17 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 18 | # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 19 | # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 20 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 21 | # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 22 | # SUCH DAMAGE. 23 | 24 | import ctypes 25 | import errno 26 | import re 27 | import socket 28 | from contextlib import contextmanager 29 | 30 | import gevent 31 | from gevent import socket as gsocket 32 | 33 | from lib.connpool import ConnectionPool 34 | from lib.test import TestRunGreenlet 35 | from lib.utils import warn_unix_socket 36 | from lib.utils import set_fd_cloexec 37 | 38 | 39 | class TarantoolPool(ConnectionPool): 40 | def __init__(self, host, port, *args, **kwargs): 41 | self.host = host 42 | self.port = port 43 | super(TarantoolPool, self).__init__(*args, **kwargs) 44 | 45 | def _new_connection(self): 46 | result = None 47 | # https://github.com/tarantool/tarantool/issues/3806 48 | # We should set FD_CLOEXEC before connect(), because connect() is 49 | # blocking operation and with gevent it can wakeup another greenlet, 50 | # including one in which we do Popen. When FD_CLOEXEC was set after 51 | # connect() we observed socket file descriptors leaking into tarantool 52 | # server in case of unix socket. It was not observed in case of tcp 53 | # sockets for unknown reason, so now we leave setting FD_CLOEXEC after 54 | # connect for tcp sockets and fix it only for unix sockets. 55 | if self.host == 'unix/' or re.search(r'^/', str(self.port)): 56 | warn_unix_socket(self.port) 57 | result = gsocket.socket(gsocket.AF_UNIX, gsocket.SOCK_STREAM) 58 | set_fd_cloexec(result.fileno()) 59 | result.connect(self.port) 60 | else: 61 | result = gsocket.create_connection((self.host, self.port)) 62 | result.setsockopt(gsocket.SOL_TCP, gsocket.TCP_NODELAY, 1) 63 | set_fd_cloexec(result.fileno()) 64 | return result 65 | 66 | def _addOne(self): 67 | stime = 0.1 68 | while True: 69 | try: 70 | c = self._new_connection() 71 | except gsocket.error: 72 | c = None 73 | if c: 74 | break 75 | gevent.sleep(stime) 76 | if stime < 400: 77 | stime *= 2 78 | self.conn.append(c) 79 | self.lock.release() 80 | 81 | @contextmanager 82 | def get(self): 83 | self.lock.acquire() 84 | 85 | try: 86 | c = self.conn.pop() 87 | yield c 88 | except self.exc_classes: 89 | greenlet = TestRunGreenlet(self._addOne) 90 | greenlet.start_later(1) 91 | raise 92 | except: # noqa: E722 93 | self.conn.append(c) 94 | self.lock.release() 95 | raise 96 | else: 97 | self.conn.append(c) 98 | self.lock.release() 99 | 100 | def close_all(self): 101 | self.conn.clear() 102 | 103 | 104 | class TarantoolConnection(object): 105 | @property 106 | def uri(self): 107 | if self.host == 'unix/' or re.search(r'^/', str(self.port)): 108 | return self.port 109 | else: 110 | return self.host+':'+str(self.port) 111 | 112 | def __init__(self, host, port): 113 | self.host = host 114 | self.port = port 115 | self.is_connected = False 116 | if self.host == 'unix/' or re.search(r'^/', str(self.port)): 117 | warn_unix_socket(self.port) 118 | 119 | def connect(self): 120 | # See comment in TarantoolPool._new_connection(). 121 | if self.host == 'unix/' or re.search(r'^/', str(self.port)): 122 | self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 123 | set_fd_cloexec(self.socket.fileno()) 124 | self.socket.connect(self.port) 125 | else: 126 | self.socket = socket.create_connection((self.host, self.port)) 127 | self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) 128 | set_fd_cloexec(self.socket.fileno()) 129 | self.is_connected = True 130 | 131 | def disconnect(self): 132 | if self.is_connected: 133 | self.socket.close() 134 | self.is_connected = False 135 | 136 | def reconnect(self): 137 | self.disconnect() 138 | self.connect() 139 | 140 | def opt_reconnect(self): 141 | """ On a socket which was disconnected, recv of 0 bytes immediately 142 | returns with no data. On a socket which is alive, it returns 143 | EAGAIN. Make use of this property and detect whether or not the 144 | socket is dead. Reconnect a dead socket, do nothing if the socket 145 | is good. 146 | """ 147 | try: 148 | if not self.is_connected or self.socket.recv( 149 | 1, socket.MSG_DONTWAIT | socket.MSG_PEEK) == b'': 150 | self.reconnect() 151 | except socket.error as e: 152 | if e.errno == errno.EAGAIN: 153 | pass 154 | else: 155 | self.reconnect() 156 | 157 | def clone(self): 158 | return type(self)(self.host, self.port) 159 | 160 | def execute(self, command, silent=True): 161 | self.opt_reconnect() 162 | return self.execute_no_reconnect(command, silent) 163 | 164 | def __enter__(self): 165 | self.connect() 166 | return self 167 | 168 | def __exit__(self, type, value, tb): 169 | self.disconnect() 170 | 171 | def __call__(self, command, silent=False, simple=False): 172 | return self.execute(command, silent) 173 | 174 | 175 | class TarantoolAsyncConnection(TarantoolConnection): 176 | pool = TarantoolPool 177 | 178 | def __init__(self, host, port): 179 | super(TarantoolAsyncConnection, self).__init__(host, port) 180 | self.connections = None 181 | libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True) 182 | self._sys_recv = libc.recv 183 | 184 | @property 185 | def socket(self): 186 | with self.connections.get() as c: 187 | result = c 188 | return result 189 | 190 | def connect(self): 191 | self.connections = self.pool(self.host, self.port, 3) 192 | self.is_connected = True 193 | 194 | def disconnect(self): 195 | if self.is_connected: 196 | self.connections.close_all() 197 | self.is_connected = False 198 | 199 | def execute(self, command, silent=True): 200 | return self.execute_no_reconnect(command, silent) 201 | -------------------------------------------------------------------------------- /lib/test_suite.py: -------------------------------------------------------------------------------- 1 | try: 2 | # Python 2 3 | import ConfigParser as configparser 4 | except ImportError: 5 | # Python 3 6 | import configparser 7 | 8 | import json 9 | import os 10 | import re 11 | import sys 12 | import time 13 | 14 | from lib import Options 15 | from lib.app_server import AppServer 16 | from lib.luatest_server import LuatestServer 17 | from lib.colorer import color_stdout 18 | from lib.colorer import test_line 19 | from lib.inspector import TarantoolInspector 20 | from lib.server import Server 21 | from lib.tarantool_server import TarantoolServer 22 | from lib.unittest_server import UnittestServer 23 | 24 | 25 | class ConfigurationError(RuntimeError): 26 | def __init__(self, name, value, expected): 27 | self.name = name 28 | self.value = value 29 | self.expected = expected 30 | 31 | def __str__(self): 32 | return "Bad value for %s: expected %s, got %s" % ( 33 | repr(self.name), self.expected, repr(self.value) 34 | ) 35 | 36 | 37 | class TestSuite: 38 | """Each test suite contains a number of related tests files, 39 | located in the same directory on disk. Each test file has 40 | extention .test and contains a listing of server commands, 41 | followed by their output. The commands are executed, and 42 | obtained results are compared with pre-recorded output. In case 43 | of a comparision difference, an exception is raised. A test suite 44 | must also contain suite.ini, which describes how to start the 45 | server for this suite, the client program to execute individual 46 | tests and other suite properties. The server is started once per 47 | suite.""" 48 | 49 | RETRIES_COUNT = Options().args.retries 50 | 51 | def get_multirun_conf(self, suite_path): 52 | conf_name = self.ini.get('config', None) 53 | if conf_name is None: 54 | return None 55 | 56 | path = os.path.join(suite_path, conf_name) 57 | result = None 58 | with open(path) as cfg: 59 | try: 60 | content = cfg.read() 61 | content = re.sub(r'^\s*//.*$', '', content, flags=re.M) 62 | result = json.loads(content) 63 | except ValueError: 64 | raise RuntimeError('Invalid multirun json') 65 | return result 66 | 67 | def get_multirun_params(self, test_path): 68 | test = test_path.split('/')[-1] 69 | if self.multi_run is None: 70 | return 71 | result = self.multi_run.get(test, None) 72 | if result is not None: 73 | return result 74 | result = self.multi_run.get('*', None) 75 | return result 76 | 77 | def parse_bool_opt(self, name, default): 78 | val = self.ini.get(name) 79 | if val is None: 80 | self.ini[name] = default 81 | elif isinstance(val, bool): 82 | pass 83 | elif isinstance(val, str) and val.lower() in ('true', 'false'): 84 | # If value is not boolean it come from ini file, need to convert 85 | # string 'True' or 'False' into boolean representation. 86 | self.ini[name] = val.lower() == 'true' 87 | else: 88 | raise ConfigurationError(name, val, "'True' or 'False'") 89 | 90 | def __init__(self, suite_path, args): 91 | """Initialize a test suite: check that it exists and contains 92 | a syntactically correct configuration file. Then create 93 | a test instance for each found test.""" 94 | self.args = args 95 | self.tests = [] 96 | self.ini = {} 97 | self.fragile = {'retries': self.RETRIES_COUNT, 'tests': {}} 98 | self.suite_path = suite_path 99 | self.ini["core"] = "tarantool" 100 | 101 | if not os.access(suite_path, os.F_OK): 102 | raise RuntimeError("Suite %s doesn't exist" % repr(suite_path)) 103 | 104 | # read the suite config 105 | parser_kwargs = dict() 106 | if sys.version_info[0] == 3: 107 | parser_kwargs['inline_comment_prefixes'] = (';',) 108 | parser_kwargs['strict'] = True 109 | config = configparser.ConfigParser(**parser_kwargs) 110 | config.read(os.path.join(suite_path, "suite.ini")) 111 | self.ini.update(dict(config.items("default"))) 112 | self.ini.update(self.args.__dict__) 113 | self.multi_run = self.get_multirun_conf(suite_path) 114 | 115 | # list of long running tests 116 | if 'long_run' not in self.ini: 117 | self.ini['long_run'] = [] 118 | 119 | for i in ["script"]: 120 | self.ini[i] = os.path.join(suite_path, self.ini[i]) \ 121 | if i in self.ini else None 122 | for i in ["disabled", "valgrind_disabled", "release_disabled", 123 | "fragile"]: 124 | self.ini[i] = dict.fromkeys(self.ini[i].split()) \ 125 | if i in self.ini else dict() 126 | for i in ["lua_libs"]: 127 | self.ini[i] = map( 128 | lambda x: os.path.join(suite_path, x), 129 | dict.fromkeys(self.ini[i].split()) 130 | if i in self.ini else dict()) 131 | if config.has_option("default", "fragile"): 132 | fragiles = config.get("default", "fragile") 133 | try: 134 | self.fragile.update(json.loads(fragiles)) 135 | if 'tests' not in self.fragile: 136 | raise RuntimeError( 137 | "Key 'tests' absent in 'fragile' json: {}" 138 | . format(self.fragile)) 139 | except ValueError: 140 | # use old format dictionary 141 | self.fragile['tests'] = self.ini['fragile'] 142 | 143 | self.parse_bool_opt('use_unix_sockets_iproto', False) 144 | self.parse_bool_opt('is_parallel', False) 145 | self.parse_bool_opt('show_reproduce_content', True) 146 | 147 | # XXX: Refactor *Server.find_tests() to return a value 148 | # instead of direct changing of test_suite.tests and get 149 | # rid of all other side effects. 150 | self.tests_are_collected = False 151 | 152 | if self.ini['core'] == 'luatest': 153 | LuatestServer.verify_luatest_exe() 154 | 155 | def collect_tests(self): 156 | if self.tests_are_collected: 157 | return self.tests 158 | 159 | if self.ini['core'] == 'tarantool': 160 | TarantoolServer.find_tests(self, self.suite_path) 161 | elif self.ini['core'] == 'luatest': 162 | LuatestServer.find_tests(self, self.suite_path) 163 | elif self.ini['core'] == 'app': 164 | AppServer.find_tests(self, self.suite_path) 165 | elif self.ini['core'] == 'unittest': 166 | UnittestServer.find_tests(self, self.suite_path) 167 | elif self.ini['core'] == 'stress': 168 | # parallel tests are not supported and disabled for now 169 | self.tests = [] 170 | self.tests_are_collected = True 171 | return self.tests 172 | else: 173 | raise ValueError( 174 | 'Cannot collect tests of unknown type: %s' % self.ini['core']) 175 | 176 | # In given cases, this large output looks redundant. 177 | if not Options().args.reproduce and not Options().args.show_tags: 178 | color_stdout("Collecting tests in ", schema='ts_text') 179 | color_stdout( 180 | '%s (Found %s tests)' % ( 181 | repr(self.suite_path).ljust(16), 182 | str(len(self.tests)).ljust(3) 183 | ), 184 | schema='path' 185 | ) 186 | color_stdout(": ", self.ini["description"], ".\n", 187 | schema='ts_text') 188 | self.tests_are_collected = True 189 | return self.tests 190 | 191 | def get_fragile_list(self): 192 | return self.fragile['tests'].keys() 193 | 194 | def stable_tests(self): 195 | self.collect_tests() 196 | res = [] 197 | for test in self.tests: 198 | if os.path.basename(test.name) not in self.get_fragile_list(): 199 | res.append(test) 200 | return res 201 | 202 | def fragile_tests(self): 203 | self.collect_tests() 204 | res = [] 205 | for test in self.tests: 206 | if os.path.basename(test.name) in self.get_fragile_list(): 207 | res.append(test) 208 | return res 209 | 210 | def gen_server(self): 211 | try: 212 | return Server(self.ini, test_suite=self) 213 | except Exception as e: 214 | print(e) 215 | raise RuntimeError("Unknown server: core = {0}".format( 216 | self.ini["core"])) 217 | 218 | def is_test_enabled(self, test, conf, server): 219 | test_name = os.path.basename(test.name) 220 | tconf = '%s:%s' % (test_name, conf or '') 221 | checks = [ 222 | (True, self.ini["disabled"]), 223 | (not server.debug, self.ini["release_disabled"]), 224 | (self.args.valgrind, self.ini["valgrind_disabled"]), 225 | (not self.args.long, self.ini["long_run"]) 226 | ] 227 | for check in checks: 228 | check_enabled, disabled_tests = check 229 | if check_enabled and (test_name in disabled_tests or 230 | tconf in disabled_tests): 231 | return False 232 | return True 233 | 234 | def start_server(self, server): 235 | # create inspector daemon for cluster tests 236 | inspector = TarantoolInspector( 237 | 'localhost', server.inspector_port 238 | ) 239 | inspector.start() 240 | # fixme: remove this string if we fix all legacy tests 241 | suite_name = os.path.basename(self.suite_path) 242 | # Set 'lua' type for *.test.lua and *.test.sql test files. 243 | server.tests_type = 'python' if suite_name.endswith('-py') else 'lua' 244 | server.deploy(silent=False) 245 | return inspector 246 | 247 | def stop_server(self, server, inspector, silent=False, cleanup=True): 248 | if server: 249 | server.stop(silent=silent) 250 | if inspector: 251 | inspector.stop() 252 | # don't delete core files or state of the data dir 253 | # in case of exception, which is raised when the 254 | # server crashes 255 | if cleanup and inspector: 256 | inspector.cleanup_nondefault() 257 | if cleanup and server: 258 | server.cleanup() 259 | 260 | def run_test(self, test, server, inspector): 261 | """ Returns short status of the test as a string: 'skip', 'pass', 262 | 'new', 'fail', or 'disabled'. 263 | """ 264 | test.inspector = inspector 265 | test_name = os.path.basename(test.name) 266 | full_test_name = os.path.join(self.ini['suite'], test_name) 267 | test_line(full_test_name, test.conf_name) 268 | 269 | start_time = time.time() 270 | if self.is_test_enabled(test, test.conf_name, server): 271 | short_status = test.run(server) 272 | else: 273 | color_stdout("[ disabled ]\n", schema='t_name') 274 | short_status = 'disabled' 275 | duration = time.time() - start_time 276 | 277 | # cleanup only if test passed or if --force mode enabled 278 | if Options().args.is_force or short_status == 'pass': 279 | inspector.cleanup_nondefault() 280 | 281 | return short_status, duration 282 | 283 | def is_parallel(self): 284 | return self.ini['is_parallel'] 285 | 286 | def fragile_retries(self): 287 | return self.fragile['retries'] 288 | 289 | def show_reproduce_content(self): 290 | return self.ini['show_reproduce_content'] 291 | 292 | def test_is_long(self, task_id): 293 | return os.path.basename(task_id[0]) in self.ini['long_run'] 294 | -------------------------------------------------------------------------------- /lib/unittest_server.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import sys 4 | import glob 5 | from subprocess import Popen, PIPE, STDOUT 6 | 7 | from lib.sampler import sampler 8 | from lib.server import Server 9 | from lib.tarantool_server import Test 10 | from lib.tarantool_server import TarantoolServer 11 | 12 | 13 | class UnitTest(Test): 14 | def __init__(self, *args, **kwargs): 15 | Test.__init__(self, *args, **kwargs) 16 | self.valgrind = kwargs.get('valgrind', False) 17 | 18 | def execute(self, server): 19 | server.current_test = self 20 | execs = server.prepare_args() 21 | proc = Popen(execs, cwd=server.vardir, stdout=PIPE, stderr=STDOUT) 22 | sampler.register_process(proc.pid, self.id, server.name) 23 | sys.stdout.write_bytes(proc.communicate()[0]) 24 | 25 | 26 | class UnittestServer(Server): 27 | """A dummy server implementation for unit test suite""" 28 | def __new__(cls, ini=None, *args, **kwargs): 29 | cls = Server.get_mixed_class(cls, ini) 30 | return object.__new__(cls) 31 | 32 | def __init__(self, _ini=None, test_suite=None): 33 | if _ini is None: 34 | _ini = {} 35 | ini = {'vardir': None} 36 | ini.update(_ini) 37 | Server.__init__(self, ini, test_suite) 38 | self.testdir = os.path.abspath(os.curdir) 39 | self.vardir = ini['vardir'] 40 | self.builddir = ini['builddir'] 41 | self.name = 'unittest_server' 42 | 43 | @property 44 | def logfile(self): 45 | return self.current_test.tmp_result 46 | 47 | @property 48 | def binary(self): 49 | return UnittestServer.prepare_args(self)[0] 50 | 51 | def prepare_args(self, args=[]): 52 | executable_path = os.path.join(self.builddir, "test", 53 | self.current_test.name) 54 | return [os.path.abspath(executable_path)] + args 55 | 56 | def deploy(self, vardir=None, silent=True, wait=True): 57 | self.vardir = vardir 58 | if not os.access(self.vardir, os.F_OK): 59 | os.makedirs(self.vardir) 60 | 61 | @classmethod 62 | def find_exe(cls, builddir): 63 | cls.builddir = builddir 64 | cls.binary = TarantoolServer.binary 65 | cls.debug = bool(re.findall(r'^Target:.*-Debug$', str(cls.version()), 66 | re.M)) 67 | 68 | @staticmethod 69 | def find_tests(test_suite, suite_path): 70 | def patterned(test, patterns): 71 | answer = [] 72 | for i in patterns: 73 | if test.name.find(i) != -1: 74 | answer.append(test) 75 | return answer 76 | 77 | test_suite.ini['suite'] = suite_path 78 | tests = glob.glob(os.path.join(suite_path, "*.test")) 79 | 80 | if not tests: 81 | executable_path_glob = os.path.join(test_suite.args.builddir, 82 | 'test', suite_path, '*.test') 83 | tests = glob.glob(executable_path_glob) 84 | 85 | tests = Server.exclude_tests(tests, test_suite.args.exclude) 86 | test_suite.tests = [UnitTest(k, test_suite.args, test_suite.ini) 87 | for k in sorted(tests)] 88 | test_suite.tests = sum([patterned(x, test_suite.args.tests) 89 | for x in test_suite.tests], []) 90 | 91 | def print_log(self, lines): 92 | pass 93 | -------------------------------------------------------------------------------- /lib/utils.py: -------------------------------------------------------------------------------- 1 | import errno 2 | import os 3 | import sys 4 | import collections 5 | import signal 6 | import fcntl 7 | import difflib 8 | import time 9 | import json 10 | import subprocess 11 | import multiprocessing 12 | from lib.colorer import color_stdout 13 | 14 | try: 15 | # Python3.5 or above 16 | from signal import Signals 17 | except ImportError: 18 | # Python2 19 | Signals = None 20 | 21 | try: 22 | # Python 3.3+. 23 | from shlex import quote as _shlex_quote 24 | except ImportError: 25 | # Python 2.7. 26 | from pipes import quote as _shlex_quote 27 | 28 | try: 29 | # Python 3.3+. 30 | from shutil import get_terminal_size 31 | except ImportError: 32 | # Python 2.7. 33 | get_terminal_size = None 34 | 35 | try: 36 | # Python 3.3+ 37 | from os import sched_getaffinity 38 | except ImportError: 39 | sched_getaffinity = None 40 | 41 | UNIX_SOCKET_LEN_LIMIT = 107 42 | 43 | # Useful for very coarse version differentiation. 44 | PY3 = sys.version_info[0] == 3 45 | PY2 = sys.version_info[0] == 2 46 | 47 | if PY2: 48 | FileNotFoundError = IOError 49 | 50 | if PY3: 51 | string_types = str, 52 | integer_types = int, 53 | else: 54 | string_types = basestring, # noqa: F821 55 | integer_types = (int, long) # noqa: F821 56 | 57 | 58 | def check_libs(): 59 | deps = [ 60 | ('msgpack', 'msgpack-python'), 61 | ('tarantool', 'tarantool-python') 62 | ] 63 | base_path = os.path.dirname(os.path.abspath(__file__)) 64 | 65 | for (mod_name, mod_dir) in deps: 66 | mod_path = os.path.join(base_path, mod_dir) 67 | if mod_path not in sys.path: 68 | sys.path = [mod_path] + sys.path 69 | 70 | for (mod_name, _mod_dir) in deps: 71 | try: 72 | __import__(mod_name) 73 | except ImportError as e: 74 | color_stdout("\n\nNo %s library found\n" % mod_name, 75 | schema='error') 76 | print(e) 77 | sys.exit(1) 78 | 79 | 80 | def non_empty_valgrind_logs(paths_to_log): 81 | """ Check that there were no warnings in the log.""" 82 | non_empty_logs = [] 83 | for path_to_log in paths_to_log: 84 | if os.path.exists(path_to_log) and os.path.getsize(path_to_log) != 0: 85 | non_empty_logs.append(path_to_log) 86 | return non_empty_logs 87 | 88 | 89 | def print_tail_n(filename, num_lines=None): 90 | """ Print N last lines of a file. If num_lines is not set, 91 | prints the whole file. 92 | """ 93 | with open(filename, "r", encoding="utf-8", errors="replace") as logfile: 94 | tail_n = collections.deque(logfile, num_lines) 95 | for line in tail_n: 96 | color_stdout(line, schema='tail') 97 | 98 | 99 | def find_in_path(name): 100 | path = os.curdir + os.pathsep + os.environ["PATH"] 101 | for _dir in path.split(os.pathsep): 102 | exe = os.path.join(_dir, name) 103 | if os.access(exe, os.X_OK): 104 | return exe 105 | return '' 106 | 107 | 108 | # http://stackoverflow.com/a/2549950 109 | SIGNAMES = dict((int(v), k) for k, v in reversed(sorted( 110 | signal.__dict__.items())) if k.startswith('SIG') and 111 | not k.startswith('SIG_')) 112 | SIGNUMS = dict((k, int(v)) for k, v in reversed(sorted( 113 | signal.__dict__.items())) if k.startswith('SIG') and 114 | not k.startswith('SIG_')) 115 | 116 | 117 | def signame(signal): 118 | if isinstance(signal, integer_types): 119 | return SIGNAMES[signal] 120 | if Signals and isinstance(signal, Signals): 121 | return SIGNAMES[int(signal)] 122 | if isinstance(signal, string_types): 123 | return signal 124 | raise TypeError('signame(): signal argument of unexpected type: {}'.format( 125 | str(type(signal)))) 126 | 127 | 128 | def signum(signal): 129 | if isinstance(signal, integer_types): 130 | return signal 131 | if Signals and isinstance(signal, Signals): 132 | return int(signal) 133 | if isinstance(signal, string_types): 134 | if not signal.startswith('SIG'): 135 | signal = 'SIG' + signal 136 | return SIGNUMS[signal] 137 | raise TypeError('signum(): signal argument of unexpected type: {}'.format( 138 | str(type(signal)))) 139 | 140 | 141 | def warn_unix_sockets_at_start(vardir): 142 | max_unix_socket_rel = '???_replication/autobootstrap_guest3.control' 143 | real_vardir = os.path.realpath(vardir) 144 | max_unix_socket_abs = os.path.join(real_vardir, max_unix_socket_rel) 145 | max_unix_socket_real = os.path.realpath(max_unix_socket_abs) 146 | if len(max_unix_socket_real) > UNIX_SOCKET_LEN_LIMIT: 147 | color_stdout( 148 | 'WARGING: unix sockets can become longer than %d symbols:\n' 149 | % UNIX_SOCKET_LEN_LIMIT, 150 | schema='error') 151 | color_stdout('WARNING: for example: "%s" has length %d\n' % 152 | (max_unix_socket_real, len(max_unix_socket_real)), 153 | schema='error') 154 | 155 | 156 | def warn_unix_socket(path): 157 | real_path = os.path.realpath(path) 158 | if len(real_path) <= UNIX_SOCKET_LEN_LIMIT or \ 159 | real_path in warn_unix_socket.warned: 160 | return 161 | color_stdout( 162 | '\nWARGING: unix socket\'s "%s" path has length %d symbols that is ' 163 | 'longer than %d. That likely will cause failing of tests.\n' % 164 | (real_path, len(real_path), UNIX_SOCKET_LEN_LIMIT), schema='error') 165 | warn_unix_socket.warned.add(real_path) 166 | 167 | 168 | warn_unix_socket.warned = set() 169 | 170 | 171 | def safe_makedirs(directory): 172 | if os.path.isdir(directory): 173 | return 174 | # try-except to prevent races btw processes 175 | try: 176 | os.makedirs(directory) 177 | except OSError: 178 | pass 179 | 180 | 181 | def format_process(pid): 182 | cmdline = 'unknown' 183 | try: 184 | with open('/proc/%d/cmdline' % pid, 'r') as f: 185 | cmdline = ' '.join(f.read().split('\0')).strip() or cmdline 186 | except (OSError, IOError): 187 | pass 188 | status = 'unknown' 189 | try: 190 | with open('/proc/%d/status' % pid, 'r') as f: 191 | for line in f: 192 | if ':' not in line: 193 | continue 194 | key, value = line.split(':', 1) 195 | if key == 'State': 196 | status = value.strip() 197 | except (OSError, IOError): 198 | pass 199 | return 'process %d [%s; %s]' % (pid, status, cmdline) 200 | 201 | 202 | def proc_stat_rss_supported(): 203 | return os.path.isfile('/proc/%d/status' % os.getpid()) 204 | 205 | 206 | def get_proc_stat_rss(pid): 207 | rss = 0 208 | try: 209 | with open('/proc/%d/status' % pid, 'r') as f: 210 | for line in f: 211 | if ':' not in line: 212 | continue 213 | key, value = line.split(':', 1) 214 | if key == 'VmRSS': 215 | rss = int(value.strip().split()[0]) 216 | except (OSError, IOError): 217 | pass 218 | return rss 219 | 220 | 221 | def set_fd_cloexec(socket): 222 | flags = fcntl.fcntl(socket, fcntl.F_GETFD) 223 | fcntl.fcntl(socket, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) 224 | 225 | 226 | def print_unidiff(filepath_a, filepath_b): 227 | def process_file(filepath): 228 | fh = None 229 | try: 230 | fh = open(filepath, 'r') 231 | lines = fh.readlines() 232 | ctime = time.ctime(os.stat(filepath).st_mtime) 233 | except Exception: 234 | if not os.path.exists(filepath): 235 | color_stdout('[File does not exist: {}]\n'.format(filepath), 236 | schema='error') 237 | lines = [] 238 | ctime = time.ctime() 239 | if fh: 240 | fh.close() 241 | return lines, ctime 242 | 243 | lines_a, time_a = process_file(filepath_a) 244 | lines_b, time_b = process_file(filepath_b) 245 | diff = difflib.unified_diff(lines_a, 246 | lines_b, 247 | filepath_a, 248 | filepath_b, 249 | time_a, 250 | time_b) 251 | color_stdout.writeout_unidiff(diff) 252 | 253 | 254 | def prefix_each_line(prefix, data): 255 | data = data.rstrip('\n') 256 | lines = [(line + '\n') for line in data.split('\n')] 257 | return prefix + prefix.join(lines) 258 | 259 | 260 | def just_and_trim(src, width): 261 | if len(src) > width: 262 | return src[:width - 1] + '>' 263 | return src.ljust(width) 264 | 265 | 266 | def xlog_rows(xlog_path): 267 | """ Parse xlog / snapshot file. 268 | 269 | Assume tarantool and tarantoolctl is in PATH. 270 | """ 271 | if not os.path.exists(xlog_path): 272 | raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), xlog_path) 273 | cmd = ['tarantoolctl', 'cat', xlog_path, '--format=json', '--show-system'] 274 | with open(os.devnull, 'w') as devnull: 275 | process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=devnull) 276 | for line in process.stdout.readlines(): 277 | yield json.loads(bytes_to_str(line)) 278 | 279 | 280 | def extract_schema_from_snapshot(snapshot_path): 281 | """ 282 | Extract schema version from snapshot. 283 | 284 | Assume tarantool and tarantoolctl is in PATH. 285 | 286 | Example of record: 287 | 288 | { 289 | "HEADER": {"lsn":2, "type": "INSERT", "timestamp": 1584694286.0031}, 290 | "BODY": {"space_id": 272, "tuple": ["version", 2, 3, 1]} 291 | } 292 | 293 | :returns: (2, 3, 1) 294 | """ 295 | BOX_SCHEMA_ID = 272 296 | for row in xlog_rows(snapshot_path): 297 | if row['HEADER']['type'] == 'INSERT' and \ 298 | row['BODY']['space_id'] == BOX_SCHEMA_ID: 299 | res = row['BODY']['tuple'] 300 | if res[0] == 'version': 301 | return tuple(res[1:]) 302 | return None 303 | 304 | 305 | def assert_bytes(b): 306 | """ Ensure given value is . 307 | """ 308 | if type(b) is not bytes: 309 | raise ValueError('Internal error: expected {}, got {}: {}'.format( 310 | str(bytes), str(type(b)), repr(b))) 311 | 312 | 313 | def assert_str(s): 314 | """ Ensure given value is . 315 | """ 316 | if type(s) is not str: 317 | raise ValueError('Internal error: expected {}, got {}: {}'.format( 318 | str(str), str(type(s)), repr(s))) 319 | 320 | 321 | def bytes_to_str(b): 322 | """ Convert to . 323 | 324 | No-op on Python 2. 325 | """ 326 | assert_bytes(b) 327 | if PY2: 328 | return b 329 | return b.decode('utf-8') 330 | 331 | 332 | def str_to_bytes(s): 333 | """ Convert to . 334 | 335 | No-op on Python 2. 336 | """ 337 | assert_str(s) 338 | if PY2: 339 | return s 340 | return s.encode('utf-8') 341 | 342 | 343 | def parse_tag_line(line): 344 | tags_str = line.split(':', 1)[1].strip() 345 | return [tag.strip() for tag in tags_str.split(',')] 346 | 347 | 348 | def find_tags(filename): 349 | """ Extract tags from a first comment in the file. 350 | """ 351 | # TODO: Support multiline comments. See exclude_tests() in 352 | # lib/server.py. 353 | if filename.endswith('.lua') or filename.endswith('.sql'): 354 | singleline_comment = '--' 355 | elif filename.endswith('.py'): 356 | singleline_comment = '#' 357 | else: 358 | return [] 359 | 360 | tags = [] 361 | with open(filename, 'r') as f: 362 | for line in f: 363 | line = line.rstrip('\n') 364 | if line.startswith('#!'): 365 | pass 366 | elif line == '': 367 | pass 368 | elif line.startswith(singleline_comment + ' tags:'): 369 | tags.extend(parse_tag_line(line)) 370 | elif line.startswith(singleline_comment): 371 | pass 372 | else: 373 | break 374 | return tags 375 | 376 | 377 | def prepend_path(p): 378 | """ Add an absolute path into PATH (at start) if it is not already there. 379 | """ 380 | p = os.path.abspath(p) 381 | if p in os.environ['PATH'].split(os.pathsep): 382 | return 383 | os.environ['PATH'] = os.pathsep.join((p, os.environ['PATH'])) 384 | 385 | 386 | def shlex_quote(s): 387 | return _shlex_quote(s) 388 | 389 | 390 | def terminal_columns(): 391 | if get_terminal_size: 392 | return get_terminal_size().columns 393 | return 80 394 | 395 | 396 | def cpu_count(): 397 | """ 398 | Return available CPU count available for the current process. 399 | 400 | The result is the same as one from the `nproc` command. 401 | 402 | It may be smaller than all the online CPUs count. For example, 403 | an LXD container may have limited available CPUs or it may be 404 | reduced by `taskset` or `numactl` commands. 405 | 406 | If it is impossible to determine the available CPUs count (for 407 | example on Python < 3.3), fallback to the all online CPUs 408 | count. 409 | """ 410 | if sched_getaffinity: 411 | return len(sched_getaffinity(0)) 412 | return multiprocessing.cpu_count() 413 | -------------------------------------------------------------------------------- /requirements-test.txt: -------------------------------------------------------------------------------- 1 | coverage==5.* 2 | flake8==5.* 3 | hypothesis==4.* 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Pin pyyaml to 5.3.1 until yaml/pyyaml#724 is fixed. 2 | PyYAML==5.3.1 3 | gevent==22.10.2; python_version <= '3.8' 4 | gevent==24.11.1; python_version > '3.8' 5 | -------------------------------------------------------------------------------- /test-run.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Tarantool regression test suite front-end.""" 3 | 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions 6 | # are met: 7 | # 1. Redistributions of source code must retain the above copyright 8 | # notice, this list of conditions and the following disclaimer. 9 | # 2. Redistributions in binary form must reproduce the above copyright 10 | # notice, this list of conditions and the following disclaimer in the 11 | # documentation and/or other materials provided with the distribution. 12 | # 13 | # THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 | # ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 | # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 | # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 | # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 | # SUCH DAMAGE. 24 | 25 | # How it works (briefly, simplified) 26 | # ################################## 27 | # 28 | # * Get task groups; each task group correspond to a test suite; each task 29 | # group contains workers generator (factory) and task IDs (test_name + 30 | # conf_name). 31 | # * Put task groups to Dispatcher, which: 32 | # * Create task (input) and result (output) queues for each task group. 33 | # * Create and run specified count of workers on these queues. 34 | # * Wait for results on the result queues and calls registered listeners. 35 | # * If some worker done its work, the Dispatcher will run the new one if 36 | # there are tasks. 37 | # * Listeners received messages from workers and timeouts when no messages 38 | # received. Its: 39 | # * Count results statistics. 40 | # * Multiplex screen's output. 41 | # * Log output to per worker log files. 42 | # * Exit us when some test failed. 43 | # * Exit us when no output received from workers during some time. 44 | # * When all workers reported it's done (or exceptional situation occured) the 45 | # main process kill all processes in the same process group as its own to 46 | # prevent 'orphan' worker or tarantool servers from flooding an OS. 47 | # * Exit status is zero (success) when no errors detected and all requested 48 | # tests passed. Otherwise non-zero. 49 | 50 | 51 | import multiprocessing 52 | import os 53 | import sys 54 | import time 55 | 56 | from lib import Options 57 | from lib import saved_env 58 | from lib.colorer import color_stdout 59 | from lib.colorer import separator 60 | from lib.colorer import test_line 61 | from lib.utils import cpu_count 62 | from lib.utils import find_tags 63 | from lib.utils import shlex_quote 64 | from lib.error import TestRunInitError 65 | from lib.utils import print_tail_n 66 | from lib.utils import PY3 67 | from lib.worker import get_task_groups 68 | from lib.worker import get_reproduce_file 69 | from lib.worker import reproduce_task_groups 70 | from lib.worker import print_greetings 71 | from dispatcher import Dispatcher 72 | from listeners import HangError 73 | 74 | EXIT_SUCCESS = 0 75 | EXIT_HANG = 1 76 | EXIT_INTERRUPTED = 2 77 | EXIT_FAILED_TEST = 3 78 | EXIT_NOTDONE_TEST = 4 79 | EXIT_INIT_ERROR = 5 80 | EXIT_UNKNOWN_ERROR = 50 81 | 82 | 83 | def main_loop_parallel(): 84 | color_stdout("Started {0}\n".format(" ".join(sys.argv)), schema='tr_text') 85 | 86 | args = Options().args 87 | jobs = args.jobs 88 | if jobs < 1: 89 | # faster result I got was with 2 * cpu_count 90 | jobs = 2 * cpu_count() 91 | 92 | if jobs > 0: 93 | color_stdout("Running in parallel with %d workers\n\n" % jobs, 94 | schema='tr_text') 95 | randomize = True 96 | 97 | color_stdout("Timeout options:\n", schema='tr_text') 98 | color_stdout('-' * 19, "\n", schema='separator') 99 | color_stdout("SERVER_START_TIMEOUT:" . ljust(26) + "{}\n" . 100 | format(args.server_start_timeout), schema='tr_text') 101 | color_stdout("REPLICATION_SYNC_TIMEOUT:" . ljust(26) + "{}\n" . 102 | format(args.replication_sync_timeout), schema='tr_text') 103 | color_stdout("TEST_TIMEOUT:" . ljust(26) + "{}\n" . 104 | format(args.test_timeout), schema='tr_text') 105 | color_stdout("NO_OUTPUT_TIMEOUT:" . ljust(26) + "{}\n" . 106 | format(args.no_output_timeout), schema='tr_text') 107 | color_stdout("\n", schema='tr_text') 108 | 109 | task_groups = get_task_groups() 110 | if Options().args.reproduce: 111 | task_groups = reproduce_task_groups(task_groups) 112 | jobs = 1 113 | randomize = False 114 | 115 | dispatcher = Dispatcher(task_groups, jobs, randomize) 116 | dispatcher.start() 117 | 118 | print_greetings() 119 | 120 | color_stdout('\n') 121 | separator('=') 122 | color_stdout('WORKR ', schema='t_name') 123 | test_line('TEST', 'PARAMS') 124 | color_stdout('RESULT\n', schema='test_pass') 125 | separator('-') 126 | 127 | try: 128 | is_force = Options().args.is_force 129 | dispatcher.wait() 130 | dispatcher.wait_processes() 131 | separator('-') 132 | has_failed, has_flaked = dispatcher.statistics.print_statistics() 133 | has_undone = dispatcher.report_undone( 134 | verbose=bool(is_force or not has_failed)) 135 | if any([has_failed, has_flaked]): 136 | dispatcher.artifacts.save_artifacts() 137 | if has_failed: 138 | return EXIT_FAILED_TEST 139 | if has_undone: 140 | return EXIT_NOTDONE_TEST 141 | except KeyboardInterrupt: 142 | separator('-') 143 | dispatcher.statistics.print_statistics() 144 | dispatcher.report_undone(verbose=False) 145 | raise 146 | except HangError: 147 | separator('-') 148 | dispatcher.statistics.print_statistics() 149 | dispatcher.report_undone(verbose=False) 150 | return EXIT_HANG 151 | return EXIT_SUCCESS 152 | 153 | 154 | def main_parallel(): 155 | res = EXIT_UNKNOWN_ERROR 156 | 157 | try: 158 | res = main_loop_parallel() 159 | except KeyboardInterrupt: 160 | color_stdout('\n[Main process] Caught keyboard interrupt\n', 161 | schema='test_var') 162 | res = EXIT_INTERRUPTED 163 | return res 164 | 165 | 166 | def main_loop_consistent(failed_test_ids): 167 | # find and prepare all tasks/groups, print information 168 | task_groups = get_task_groups().items() 169 | print_greetings() 170 | 171 | for name, task_group in task_groups: 172 | # print information about current test suite 173 | color_stdout('\n') 174 | separator('=') 175 | test_line('TEST', 'PARAMS') 176 | color_stdout("RESULT\n", schema='test_pass') 177 | separator('-') 178 | 179 | task_ids = task_group['task_ids'] 180 | show_reproduce_content = task_group['show_reproduce_content'] 181 | if not task_ids: 182 | continue 183 | worker_id = 1 184 | worker = task_group['gen_worker'](worker_id) 185 | for task_id in task_ids: 186 | # The 'run_task' method returns a tuple of two items: 187 | # (short_status, duration). So taking the first 188 | # item of this tuple for failure check. 189 | short_status = worker.run_task(task_id)[0] 190 | if short_status == 'fail': 191 | reproduce_file_path = \ 192 | get_reproduce_file(worker.name) 193 | color_stdout('Reproduce file %s\n' % 194 | reproduce_file_path, schema='error') 195 | if show_reproduce_content: 196 | color_stdout("---\n", schema='separator') 197 | print_tail_n(reproduce_file_path) 198 | color_stdout("...\n", schema='separator') 199 | failed_test_ids.append(task_id) 200 | if not Options().args.is_force: 201 | worker.stop_server(cleanup=False) 202 | return 203 | 204 | separator('-') 205 | 206 | worker.stop_server(silent=False) 207 | color_stdout() 208 | 209 | 210 | def main_consistent(): 211 | color_stdout("Started {0}\n".format(" ".join(sys.argv)), schema='tr_text') 212 | failed_test_ids = [] 213 | 214 | try: 215 | main_loop_consistent(failed_test_ids) 216 | except KeyboardInterrupt: 217 | color_stdout('[Main loop] Caught keyboard interrupt\n', 218 | schema='test_var') 219 | except RuntimeError as e: 220 | color_stdout("\nFatal error: %s. Execution aborted.\n" % e, 221 | schema='error') 222 | if Options().args.gdb: 223 | time.sleep(100) 224 | return -1 225 | 226 | if failed_test_ids and Options().args.is_force: 227 | color_stdout("\n===== %d tests failed:\n" % len(failed_test_ids), 228 | schema='error') 229 | for test_id in failed_test_ids: 230 | color_stdout("----- %s\n" % str(test_id), schema='info') 231 | 232 | return (-1 if failed_test_ids else 0) 233 | 234 | 235 | def show_tags(): 236 | # Collect tests in the same way as when we run them. 237 | collected_tags = set() 238 | for name, task_group in get_task_groups().items(): 239 | for task_id in task_group['task_ids']: 240 | test_name, _ = task_id 241 | for tag in find_tags(test_name): 242 | collected_tags.add(tag) 243 | 244 | for tag in sorted(collected_tags): 245 | color_stdout(tag + '\n') 246 | 247 | 248 | def show_env(): 249 | """ Print new values of changed environment variables. 250 | 251 | The format is suitable for sourcing in a shell. 252 | """ 253 | original_env = saved_env() 254 | for k, v in os.environ.items(): 255 | # Don't change PWD. 256 | # 257 | # test-run changes current working directory and set PWD 258 | # environment variable. If we'll just export PWD (and 259 | # don't change a current directory), it will be very 260 | # misleading. Moreover, changing the directory by test-run 261 | # is more like an implementation detail. It would be good 262 | # to get rid from this approach in a future. 263 | if k == 'PWD': 264 | continue 265 | 266 | # Don't print unchanged environment variables. 267 | # 268 | # It would be harmless, but if we filter them out, the 269 | # output is nicely short. 270 | if original_env.get(k) == v: 271 | continue 272 | 273 | color_stdout('export {}={}\n'.format(shlex_quote(k), shlex_quote(v))) 274 | 275 | # test-run doesn't call `del os.environ['FOO']` anywhere, so 276 | # all changed variables are present in `os.environ`. We don't 277 | # need an extra traverse over `original_env` as it would be in 278 | # the general case of comparing two dictionaries. 279 | 280 | 281 | if __name__ == "__main__": 282 | # In Python 3 start method 'spawn' in multiprocessing module becomes 283 | # default on Mac OS. 284 | # 285 | # The 'spawn' method causes re-execution of some code, which is already 286 | # executed in the main process. At least it is seen on the 287 | # lib/__init__.py code, which removes the 'var' directory. Some other 288 | # code may have side effects too, it requires investigation. 289 | # 290 | # The method also requires object serialization that doesn't work when 291 | # objects use lambdas, whose for example used in class TestSuite 292 | # (lib/test_suite.py). 293 | # 294 | # The latter problem is easy to fix, but the former looks more 295 | # fundamental. So we stick to the 'fork' method now. 296 | if PY3: 297 | multiprocessing.set_start_method('fork') 298 | 299 | # test-run assumes that text file streams are UTF-8 (as 300 | # contrary to ASCII) on Python 3. It is necessary to process 301 | # non ASCII symbols in test files, result files and so on. 302 | # 303 | # Default text file stream encoding depends on a system 304 | # locale with exception for the POSIX locale (C locale): in 305 | # this case UTF-8 is used (see PEP-0540). Sadly, this 306 | # behaviour is in effect since Python 3.7. 307 | # 308 | # We want to achieve the same behaviour on lower Python 309 | # versions, at least on 3.6.8, which is provided by CentOS 7 310 | # and CentOS 8. 311 | # 312 | # So we hack the open() builtin. 313 | # 314 | # https://stackoverflow.com/a/53347548/1598057 315 | if PY3 and sys.version_info[0:2] < (3, 7): 316 | std_open = __builtins__.open 317 | 318 | def open_as_utf8(*args, **kwargs): 319 | if len(args) >= 2: 320 | mode = args[1] 321 | else: 322 | mode = kwargs.get('mode', '') 323 | if 'b' not in mode: 324 | kwargs.setdefault('encoding', 'utf-8') 325 | return std_open(*args, **kwargs) 326 | 327 | __builtins__.open = open_as_utf8 328 | 329 | status = 0 330 | 331 | if Options().args.show_tags: 332 | show_tags() 333 | exit(status) 334 | 335 | if Options().args.show_env: 336 | show_env() 337 | exit(status) 338 | 339 | try: 340 | force_parallel = bool(Options().args.reproduce) 341 | if not force_parallel and Options().args.jobs == -1: 342 | status = main_consistent() 343 | else: 344 | status = main_parallel() 345 | except TestRunInitError as e: 346 | color_stdout(str(e), '\n', schema='error') 347 | status = EXIT_INIT_ERROR 348 | 349 | exit(status) 350 | -------------------------------------------------------------------------------- /test/instances/default.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | local fun = require('fun') 4 | local json = require('json') 5 | 6 | local function default_cfg() 7 | return { 8 | work_dir = os.getenv('TARANTOOL_WORKDIR'), 9 | listen = os.getenv('TARANTOOL_LISTEN'), 10 | log = ('%s/%s.log'):format(os.getenv('TARANTOOL_WORKDIR'), 11 | os.getenv('TARANTOOL_ALIAS')), 12 | } 13 | end 14 | 15 | local function env_cfg() 16 | local cfg = os.getenv('TARANTOOL_BOX_CFG') 17 | if cfg == nil then 18 | return {} 19 | end 20 | 21 | local res = json.decode(cfg) 22 | assert(type(res) == 'table') 23 | return res 24 | end 25 | 26 | -- Create a table for box.cfg from values passed while server initialization and 27 | -- the given argument. 28 | local function box_cfg(cfg) 29 | return fun.chain(default_cfg(), env_cfg(), cfg or {}):tomap() 30 | end 31 | 32 | box.cfg(box_cfg()) 33 | box.schema.user.grant('guest', 'super', nil, nil, {if_not_exists = true}) 34 | 35 | -- The Server:start function unblocks only when this variable becomes true. 36 | -- 37 | -- Set it when the instance is fully operable: 38 | -- * The server listens for requests. 39 | -- * The database is bootstrapped. 40 | -- * Permissions are granted. 41 | -- 42 | -- Use server:start({wait_for_readiness = false}) to not wait for setting this 43 | -- variable. 44 | _G.ready = true 45 | -------------------------------------------------------------------------------- /test/luatest_helpers/server.lua: -------------------------------------------------------------------------------- 1 | local clock = require('clock') 2 | local digest = require('digest') 3 | local ffi = require('ffi') 4 | local fiber = require('fiber') 5 | local fio = require('fio') 6 | local fun = require('fun') 7 | local json = require('json') 8 | 9 | local checks = require('checks') 10 | local luatest = require('luatest') 11 | 12 | ffi.cdef([[ 13 | int kill(pid_t pid, int sig); 14 | ]]) 15 | 16 | local Server = luatest.Server:inherit({}) 17 | 18 | local WAIT_TIMEOUT = 60 19 | local WAIT_DELAY = 0.1 20 | 21 | -- Differences from luatest.Server: 22 | -- 23 | -- * 'alias' is mandatory. 24 | -- * 'command' is optional, assumed test/instances/default.lua by 25 | -- default. 26 | -- * 'workdir' is optional, determined by 'alias'. 27 | -- * The new 'box_cfg' parameter. 28 | -- * engine - provides engine for parameterized tests 29 | Server.constructor_checks = fun.chain(Server.constructor_checks, { 30 | alias = 'string', 31 | command = '?string', 32 | workdir = '?string', 33 | box_cfg = '?table', 34 | engine = '?string', 35 | }):tomap() 36 | 37 | Server.socketdir = fio.abspath(os.getenv('VARDIR') or 'test/var') 38 | 39 | function Server.build_instance_uri(alias) 40 | return ('%s/%s.iproto'):format(Server.socketdir, alias) 41 | end 42 | 43 | function Server:initialize() 44 | if self.id == nil then 45 | local random = digest.urandom(9) 46 | self.id = digest.base64_encode(random, {urlsafe = true}) 47 | end 48 | if self.command == nil then 49 | self.command = 'test/instances/default.lua' 50 | end 51 | if self.workdir == nil then 52 | self.workdir = ('%s/%s-%s'):format(self.socketdir, self.alias, self.id) 53 | fio.rmtree(self.workdir) 54 | fio.mktree(self.workdir) 55 | end 56 | if self.net_box_port == nil and self.net_box_uri == nil then 57 | self.net_box_uri = self.build_instance_uri(self.alias) 58 | fio.mktree(self.socketdir) 59 | end 60 | 61 | -- AFAIU, the inner getmetatable() returns our helpers.Server 62 | -- class, the outer one returns luatest.Server class. 63 | getmetatable(getmetatable(self)).initialize(self) 64 | end 65 | 66 | --- Generates environment to run process with. 67 | -- The result is merged into os.environ(). 68 | -- @return map 69 | function Server:build_env() 70 | local res = getmetatable(getmetatable(self)).build_env(self) 71 | if self.box_cfg ~= nil then 72 | res.TARANTOOL_BOX_CFG = json.encode(self.box_cfg) 73 | end 74 | res.TARANTOOL_ENGINE = self.engine 75 | return res 76 | end 77 | 78 | function Server:wait_for_readiness() 79 | local alias = self.alias 80 | local id = self.id 81 | local pid = self.process.pid 82 | 83 | local deadline = clock.time() + WAIT_TIMEOUT 84 | while true do 85 | local ok, is_ready = pcall(function() 86 | self:connect_net_box() 87 | return self.net_box:eval('return _G.ready') == true 88 | end) 89 | if ok and is_ready then 90 | break 91 | end 92 | if clock.time() > deadline then 93 | error(('Starting of server %s-%s (PID %d) was timed out'):format( 94 | alias, id, pid)) 95 | end 96 | fiber.sleep(WAIT_DELAY) 97 | end 98 | end 99 | 100 | -- Unlike the original luatest.Server function it waits for 101 | -- starting the server. 102 | function Server:start(opts) 103 | checks('table', { 104 | wait_for_readiness = '?boolean', 105 | }) 106 | getmetatable(getmetatable(self)).start(self) 107 | 108 | -- The option is true by default. 109 | local wait_for_readiness = true 110 | if opts ~= nil and opts.wait_for_readiness ~= nil then 111 | wait_for_readiness = opts.wait_for_readiness 112 | end 113 | 114 | if wait_for_readiness then 115 | self:wait_for_readiness() 116 | end 117 | end 118 | 119 | -- TODO: Add the 'wait_for_readiness' parameter for the restart() 120 | -- method. 121 | 122 | -- Unlike the original luatest.Server function it waits until 123 | -- the server will stop. 124 | function Server:stop() 125 | local alias = self.alias 126 | local id = self.id 127 | if self.process then 128 | local pid = self.process.pid 129 | getmetatable(getmetatable(self)).stop(self) 130 | 131 | local deadline = clock.time() + WAIT_TIMEOUT 132 | while true do 133 | if ffi.C.kill(pid, 0) ~= 0 then 134 | break 135 | end 136 | if clock.time() > deadline then 137 | error(('Stopping of server %s-%s (PID %d) was timed out'):format( 138 | alias, id, pid)) 139 | end 140 | fiber.sleep(WAIT_DELAY) 141 | end 142 | end 143 | end 144 | 145 | return Server 146 | -------------------------------------------------------------------------------- /test/test-app/cfg.test.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | -- Test is an example of TAP test 4 | 5 | local tap = require('tap') 6 | local test = tap.test('cfg') 7 | test:plan(4) 8 | 9 | box.cfg{listen = box.NULL} 10 | test:is(nil, box.info.listen, 'no cfg.listen - no info.listen') 11 | 12 | box.cfg{listen = '127.0.0.1:0'} 13 | test:ok(box.info.listen:match('127.0.0.1'), 'real IP in info.listen') 14 | test:ok(not box.info.listen:match(':0'), 'real port in info.listen') 15 | 16 | box.cfg{listen = box.NULL} 17 | test:is(nil, box.info.listen, 'cfg.listen reset drops info.listen') 18 | 19 | os.exit(test:check() and 0 or 1) 20 | -------------------------------------------------------------------------------- /test/test-app/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = app 3 | description = application tests 4 | is_parallel = True 5 | -------------------------------------------------------------------------------- /test/test-luatest/smoke_check_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | 3 | local server = require('test.luatest_helpers.server') 4 | 5 | local g = t.group() 6 | 7 | g.before_all = function() 8 | g.server = server:new({ 9 | alias = 'my_server', 10 | env = {MY_ENV_VAR = 'test_value'}, 11 | box_cfg = {memtx_memory = 100 * 1024 ^ 2}, 12 | }) 13 | g.server:start() 14 | end 15 | 16 | g.after_all = function() 17 | g.server:stop() 18 | end 19 | 20 | g.test_server_is_started_and_operable = function() 21 | local res = g.server:eval('return 42') 22 | t.assert_equals(res, 42) 23 | end 24 | 25 | g.test_database_is_bootstrapped_and_accessible = function() 26 | local res = g.server:exec(function() return box.info.status end) 27 | t.assert_equals(res, 'running') 28 | end 29 | 30 | g.test_environment_variable_is_passed = function() 31 | local res = g.server:exec(function() return os.getenv('MY_ENV_VAR') end) 32 | t.assert_equals(res, 'test_value') 33 | end 34 | 35 | g.test_box_cfg_values_are_passed = function() 36 | local res = g.server:exec(function() return box.cfg.memtx_memory end) 37 | t.assert_equals(res, 100 * 1024 ^ 2) 38 | end 39 | -------------------------------------------------------------------------------- /test/test-luatest/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = luatest 3 | description = luatest 4 | -------------------------------------------------------------------------------- /test/test-run.py: -------------------------------------------------------------------------------- 1 | ../test-run.py -------------------------------------------------------------------------------- /test/test-tarantool/box.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | local os = require('os') 3 | 4 | box.cfg{ 5 | listen = os.getenv("LISTEN"), 6 | memtx_memory = 107374182, 7 | pid_file = "tarantool.pid", 8 | force_recovery = true, 9 | wal_max_size = 500 10 | } 11 | 12 | require('console').listen(os.getenv('ADMIN')) 13 | -------------------------------------------------------------------------------- /test/test-tarantool/call.result: -------------------------------------------------------------------------------- 1 | box.schema.user.create('test', { password = 'test' }) 2 | --- 3 | ... 4 | box.schema.user.grant('test', 'execute,read,write', 'universe') 5 | --- 6 | ... 7 | exp_notation = 1e123 8 | --- 9 | ... 10 | function f1() return 'testing', 1, false, -1, 1.123, math.abs(exp_notation - 1e123) < 0.1, nil end 11 | --- 12 | ... 13 | f1() 14 | --- 15 | - testing 16 | - 1 17 | - false 18 | - -1 19 | - 1.123 20 | - true 21 | - null 22 | ... 23 | call f1 () 24 | - 'testing' 25 | - 1 26 | - False 27 | - -1 28 | - 1.123 29 | - True 30 | - None 31 | f1=nil 32 | --- 33 | ... 34 | call f1 () 35 | { 36 | "error": { 37 | "code": "ER_NO_SUCH_PROC", 38 | "reason": "Procedure 'f1' is not defined" 39 | } 40 | } 41 | function f1() return f1 end 42 | --- 43 | ... 44 | call f1 () 45 | { 46 | "error": { 47 | "code": "ER_PROC_LUA", 48 | "reason": "unsupported Lua type 'function'" 49 | } 50 | } 51 | call box.error (33333, 'Hey!') 52 | { 53 | "error": { 54 | "code": "U", 55 | "reason": "Unknown error" 56 | } 57 | } 58 | 59 | # A test case for Bug#103491 60 | # server CALL processing bug with name path longer than two 61 | # https://bugs.launchpad.net/tarantool/+bug/1034912 62 | 63 | f = function() return 'OK' end 64 | --- 65 | ... 66 | test = {} 67 | --- 68 | ... 69 | test.f = f 70 | --- 71 | ... 72 | test.test = {} 73 | --- 74 | ... 75 | test.test.f = f 76 | --- 77 | ... 78 | call f () 79 | - 'OK' 80 | call test.f () 81 | - 'OK' 82 | call test.test.f () 83 | - 'OK' 84 | 85 | # Test for Bug #955226 86 | # Lua Numbers are passed back wrongly as strings 87 | # 88 | 89 | function foo() return 1, 2, '1', '2' end 90 | --- 91 | ... 92 | call foo () 93 | - 1 94 | - 2 95 | - '1' 96 | - '2' 97 | function f1(...) return {...} end 98 | --- 99 | ... 100 | function f2(...) return f1({...}) end 101 | --- 102 | ... 103 | call f1 ('test_', 'test_') 104 | - ['test_', 'test_'] 105 | call f2 ('test_', 'test_') 106 | - [['test_', 'test_']] 107 | call f1 () 108 | - [] 109 | call f2 () 110 | - [[]] 111 | function f3() return {{'hello'}, {'world'}} end 112 | --- 113 | ... 114 | call f3 () 115 | - [['hello'], ['world']] 116 | function f3() return {'hello', {'world'}} end 117 | --- 118 | ... 119 | call f3 () 120 | - ['hello', ['world']] 121 | function f3() return 'hello', {{'world'}, {'canada'}} end 122 | --- 123 | ... 124 | call f3 () 125 | - 'hello' 126 | - [['world'], ['canada']] 127 | function f3() return {}, '123', {{}, {}} end 128 | --- 129 | ... 130 | call f3 () 131 | - [] 132 | - '123' 133 | - [[], []] 134 | function f3() return { {{'hello'}} } end 135 | --- 136 | ... 137 | call f3 () 138 | - [[['hello']]] 139 | function f3() return { box.tuple.new('hello'), {'world'} } end 140 | --- 141 | ... 142 | call f3 () 143 | - [['hello'], ['world']] 144 | function f3() return { {'world'}, box.tuple.new('hello') } end 145 | --- 146 | ... 147 | call f3 () 148 | - [['world'], ['hello']] 149 | function f3() return { { test={1,2,3} }, { test2={1,2,3} } } end 150 | --- 151 | ... 152 | call f3 () 153 | - [{'test': [1, 2, 3]}, {'test2': [1, 2, 3]}] 154 | call f1 ('jason',) 155 | - ['jason'] 156 | call f1 ('jason', 1, 'test', 2, 'stewart') 157 | - ['jason', 1, 'test', 2, 'stewart'] 158 | space = box.schema.space.create('tweedledum') 159 | --- 160 | ... 161 | index = space:create_index('primary', { type = 'hash' }) 162 | --- 163 | ... 164 | function myreplace(...) return space:replace{...} end 165 | --- 166 | ... 167 | function myinsert(...) return space:insert{...} end 168 | --- 169 | ... 170 | call myinsert (1, 'test box delete') 171 | - [1, 'test box delete'] 172 | call space:delete (1,) 173 | - [1, 'test box delete'] 174 | call myinsert (1, 'test box delete') 175 | - [1, 'test box delete'] 176 | call space:delete (1,) 177 | - [1, 'test box delete'] 178 | call space:delete (1,) 179 | 180 | call myinsert (2, 'test box delete') 181 | - [2, 'test box delete'] 182 | call space:delete (1,) 183 | 184 | call space:delete (2,) 185 | - [2, 'test box delete'] 186 | call space:delete (2,) 187 | 188 | space:delete{2} 189 | --- 190 | ... 191 | call myinsert (2, 'test box delete') 192 | - [2, 'test box delete'] 193 | call space:get (2,) 194 | - [2, 'test box delete'] 195 | space:delete{2} 196 | --- 197 | - [2, 'test box delete'] 198 | ... 199 | call space:get (2,) 200 | 201 | call myinsert (2, 'test box.select()') 202 | - [2, 'test box.select()'] 203 | call space:get (2,) 204 | - [2, 'test box.select()'] 205 | call space:select (2,) 206 | - [[2, 'test box.select()']] 207 | space:get{2} 208 | --- 209 | - [2, 'test box.select()'] 210 | ... 211 | space:select{2} 212 | --- 213 | - - [2, 'test box.select()'] 214 | ... 215 | space:get{1} 216 | --- 217 | ... 218 | space:select{1} 219 | --- 220 | - [] 221 | ... 222 | call myreplace (2, 'hello', 'world') 223 | - [2, 'hello', 'world'] 224 | call myreplace (2, 'goodbye', 'universe') 225 | - [2, 'goodbye', 'universe'] 226 | call space:get (2,) 227 | - [2, 'goodbye', 'universe'] 228 | call space:select (2,) 229 | - [[2, 'goodbye', 'universe']] 230 | space:get{2} 231 | --- 232 | - [2, 'goodbye', 'universe'] 233 | ... 234 | space:select{2} 235 | --- 236 | - - [2, 'goodbye', 'universe'] 237 | ... 238 | call myreplace (2,) 239 | - [2] 240 | call space:get (2,) 241 | - [2] 242 | call space:select (2,) 243 | - [[2]] 244 | call space:delete (2,) 245 | - [2] 246 | call space:delete (2,) 247 | 248 | call myinsert (3, 'old', 2) 249 | - [3, 'old', 2] 250 | space:update({3}, {{'=', 1, 4}, {'=', 2, 'new'}}) 251 | --- 252 | - error: Attempt to modify a tuple field in space 253 | ... 254 | space:insert(space:get{3}:update{{'=', 1, 4}, {'=', 2, 'new'}}) space:delete{3} 255 | --- 256 | ... 257 | call space:get (4,) 258 | - [4, 'new', 2] 259 | call space:select (4,) 260 | - [[4, 'new', 2]] 261 | space:update({4}, {{'+', 3, 1}}) 262 | --- 263 | - [4, 'new', 3] 264 | ... 265 | space:update({4}, {{'-', 3, 1}}) 266 | --- 267 | - [4, 'new', 2] 268 | ... 269 | call space:get (4,) 270 | - [4, 'new', 2] 271 | call space:select (4,) 272 | - [[4, 'new', 2]] 273 | function field_x(key, field_index) return space:get(key)[field_index] end 274 | --- 275 | ... 276 | call field_x (4, 1) 277 | - 4 278 | call field_x (4, 2) 279 | - 'new' 280 | call space:delete (4,) 281 | - [4, 'new', 2] 282 | space:drop() 283 | --- 284 | ... 285 | space = box.schema.space.create('tweedledum') 286 | --- 287 | ... 288 | index = space:create_index('primary', { type = 'tree' }) 289 | --- 290 | ... 291 | eval (return 1)() 292 | --- 293 | - 1 294 | function f(...) return 1 end 295 | --- 296 | ... 297 | call f() 298 | --- 299 | - 1 300 | eval (return 1, 2, 3)() 301 | --- 302 | - 1 303 | - 2 304 | - 3 305 | function f(...) return 1, 2, 3 end 306 | --- 307 | ... 308 | call f() 309 | --- 310 | - 1 311 | - 2 312 | - 3 313 | eval (return true)() 314 | --- 315 | - true 316 | function f(...) return true end 317 | --- 318 | ... 319 | call f() 320 | --- 321 | - true 322 | eval (return nil)() 323 | --- 324 | - null 325 | function f(...) return nil end 326 | --- 327 | ... 328 | call f() 329 | --- 330 | - null 331 | eval (return )() 332 | --- 333 | 334 | function f(...) return end 335 | --- 336 | ... 337 | call f() 338 | --- 339 | 340 | eval (return {})() 341 | --- 342 | - [] 343 | function f(...) return {} end 344 | --- 345 | ... 346 | call f() 347 | --- 348 | - [] 349 | eval (return {1})() 350 | --- 351 | - [1] 352 | function f(...) return {1} end 353 | --- 354 | ... 355 | call f() 356 | --- 357 | - [1] 358 | eval (return {1, 2, 3})() 359 | --- 360 | - [1, 2, 3] 361 | function f(...) return {1, 2, 3} end 362 | --- 363 | ... 364 | call f() 365 | --- 366 | - [1, 2, 3] 367 | eval (return {k1 = 'v1', k2 = 'v2'})() 368 | --- 369 | - {"k1": "v1", "k2": "v2"} 370 | function f(...) return {k1 = 'v1', k2 = 'v2'} end 371 | --- 372 | ... 373 | call f() 374 | --- 375 | - {"k1": "v1", "k2": "v2"} 376 | eval (return {k1 = 'v1', k2 = 'v2'})() 377 | --- 378 | - {"k1": "v1", "k2": "v2"} 379 | function f(...) return {k1 = 'v1', k2 = 'v2'} end 380 | --- 381 | ... 382 | call f() 383 | --- 384 | - {"k1": "v1", "k2": "v2"} 385 | eval (return {s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}})() 386 | --- 387 | - {"c": {"106": [1, 1428578535], "2": [1, 1428578535]}, "pc": {"106": [1, 1428578535, 9243], "2": [1, 1428578535, 9243]}, "s": [1, 1428578535], "u": 1428578535, "v": []} 388 | function f(...) return {s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}} end 389 | --- 390 | ... 391 | call f() 392 | --- 393 | - {"c": {"106": [1, 1428578535], "2": [1, 1428578535]}, "pc": {"106": [1, 1428578535, 9243], "2": [1, 1428578535, 9243]}, "s": [1, 1428578535], "u": 1428578535, "v": []} 394 | eval (return true, {s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}})() 395 | --- 396 | - true 397 | - {"c": {"106": [1, 1428578535], "2": [1, 1428578535]}, "pc": {"106": [1, 1428578535, 9243], "2": [1, 1428578535, 9243]}, "s": [1, 1428578535], "u": 1428578535, "v": []} 398 | function f(...) return true, {s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}} end 399 | --- 400 | ... 401 | call f() 402 | --- 403 | - true 404 | - {"c": {"106": [1, 1428578535], "2": [1, 1428578535]}, "pc": {"106": [1, 1428578535, 9243], "2": [1, 1428578535, 9243]}, "s": [1, 1428578535], "u": 1428578535, "v": []} 405 | eval (return {s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}}, true)() 406 | --- 407 | - {"c": {"106": [1, 1428578535], "2": [1, 1428578535]}, "pc": {"106": [1, 1428578535, 9243], "2": [1, 1428578535, 9243]}, "s": [1, 1428578535], "u": 1428578535, "v": []} 408 | - true 409 | function f(...) return {s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}}, true end 410 | --- 411 | ... 412 | call f() 413 | --- 414 | - {"c": {"106": [1, 1428578535], "2": [1, 1428578535]}, "pc": {"106": [1, 1428578535, 9243], "2": [1, 1428578535, 9243]}, "s": [1, 1428578535], "u": 1428578535, "v": []} 415 | - true 416 | t = box.tuple.new('tuple', {1, 2, 3}, { k1 = 'v', k2 = 'v2'}) 417 | --- 418 | ... 419 | eval (return t)() 420 | --- 421 | - ["tuple", [1, 2, 3], {"k1": "v", "k2": "v2"}] 422 | function f(...) return t end 423 | --- 424 | ... 425 | call f() 426 | --- 427 | - ["tuple", [1, 2, 3], {"k1": "v", "k2": "v2"}] 428 | eval (return t, t, t)() 429 | --- 430 | - ["tuple", [1, 2, 3], {"k1": "v", "k2": "v2"}] 431 | - ["tuple", [1, 2, 3], {"k1": "v", "k2": "v2"}] 432 | - ["tuple", [1, 2, 3], {"k1": "v", "k2": "v2"}] 433 | function f(...) return t, t, t end 434 | --- 435 | ... 436 | call f() 437 | --- 438 | - ["tuple", [1, 2, 3], {"k1": "v", "k2": "v2"}] 439 | - ["tuple", [1, 2, 3], {"k1": "v", "k2": "v2"}] 440 | - ["tuple", [1, 2, 3], {"k1": "v", "k2": "v2"}] 441 | eval (return {t})() 442 | --- 443 | - [["tuple", [1, 2, 3], {"k1": "v", "k2": "v2"}]] 444 | function f(...) return {t} end 445 | --- 446 | ... 447 | call f() 448 | --- 449 | - [["tuple", [1, 2, 3], {"k1": "v", "k2": "v2"}]] 450 | eval (return {t, t, t})() 451 | --- 452 | - [["tuple", [1, 2, 3], {"k1": "v", "k2": "v2"}], ["tuple", [1, 2, 3], {"k1": "v", "k2": "v2"}], ["tuple", [1, 2, 3], {"k1": "v", "k2": "v2"}]] 453 | function f(...) return {t, t, t} end 454 | --- 455 | ... 456 | call f() 457 | --- 458 | - [["tuple", [1, 2, 3], {"k1": "v", "k2": "v2"}], ["tuple", [1, 2, 3], {"k1": "v", "k2": "v2"}], ["tuple", [1, 2, 3], {"k1": "v", "k2": "v2"}]] 459 | eval (return error('exception'))() 460 | --- 461 | { 462 | "error": { 463 | "code": "ER_PROC_LUA", 464 | "reason": "exception" 465 | } 466 | } 467 | function f(...) return error('exception') end 468 | --- 469 | ... 470 | call f() 471 | --- 472 | { 473 | "error": { 474 | "code": "ER_PROC_LUA", 475 | "reason": "exception" 476 | } 477 | } 478 | eval (return box.error(0))() 479 | --- 480 | 481 | function f(...) return box.error(0) end 482 | --- 483 | ... 484 | call f() 485 | --- 486 | 487 | eval (return ...)() 488 | --- 489 | 490 | function f(...) return ... end 491 | --- 492 | ... 493 | call f() 494 | --- 495 | 496 | eval (return ...)(1, 2, 3) 497 | --- 498 | - 1 499 | - 2 500 | - 3 501 | function f(...) return ... end 502 | --- 503 | ... 504 | call f(1, 2, 3) 505 | --- 506 | - 1 507 | - 2 508 | - 3 509 | eval (return ...)(null, null, null) 510 | --- 511 | - null 512 | - null 513 | - null 514 | function f(...) return ... end 515 | --- 516 | ... 517 | call f(null, null, null) 518 | --- 519 | - null 520 | - null 521 | - null 522 | eval (return ...)({"k1": "v1", "k2": "v2"}) 523 | --- 524 | - {"k1": "v1", "k2": "v2"} 525 | function f(...) return ... end 526 | --- 527 | ... 528 | call f({"k1": "v1", "k2": "v2"}) 529 | --- 530 | - {"k1": "v1", "k2": "v2"} 531 | eval (return space:auto_increment({"transaction"}))() 532 | --- 533 | - [1, "transaction"] 534 | function f(...) return space:auto_increment({"transaction"}) end 535 | --- 536 | ... 537 | call f() 538 | --- 539 | - [2, "transaction"] 540 | eval (return space:select{})() 541 | --- 542 | - [[1, "transaction"], [2, "transaction"]] 543 | function f(...) return space:select{} end 544 | --- 545 | ... 546 | call f() 547 | --- 548 | - [[1, "transaction"], [2, "transaction"]] 549 | eval (return box.begin(), space:auto_increment({"failed"}), box.rollback())() 550 | --- 551 | - null 552 | - [3, "failed"] 553 | function f(...) return box.begin(), space:auto_increment({"failed"}), box.rollback() end 554 | --- 555 | ... 556 | call f() 557 | --- 558 | - null 559 | - [3, "failed"] 560 | eval (return space:select{})() 561 | --- 562 | - [[1, "transaction"], [2, "transaction"]] 563 | function f(...) return space:select{} end 564 | --- 565 | ... 566 | call f() 567 | --- 568 | - [[1, "transaction"], [2, "transaction"]] 569 | eval (return require("fiber").sleep(0))() 570 | --- 571 | 572 | function f(...) return require("fiber").sleep(0) end 573 | --- 574 | ... 575 | call f() 576 | --- 577 | 578 | eval (!invalid expression)() 579 | --- 580 | { 581 | "error": { 582 | "code": "ER_PROC_LUA", 583 | "reason": "eval:1: unexpected symbol near '!'" 584 | } 585 | } 586 | space:drop() 587 | --- 588 | ... 589 | box.schema.user.drop('test') 590 | --- 591 | ... 592 | -------------------------------------------------------------------------------- /test/test-tarantool/call.test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tarantool's test box-py/call.test.py had a problem fixed in 3 | tarantool-python library by commit: 4 | 8847b8c8bd1092d87601987d858e3cb9ff54f9f6 5 | ("python3: make json.dumps compatible with Python 2"). 6 | """ 7 | 8 | from __future__ import print_function 9 | 10 | import os 11 | import sys 12 | import json 13 | 14 | def call(name, *args): 15 | return iproto.call(name, *args) 16 | 17 | admin("box.schema.user.create('test', { password = 'test' })") 18 | admin("box.schema.user.grant('test', 'execute,read,write', 'universe')") 19 | iproto.authenticate("test", "test") 20 | # workaround for gh-770 centos 6 float representation 21 | admin("exp_notation = 1e123") 22 | admin("function f1() return 'testing', 1, false, -1, 1.123, math.abs(exp_notation - 1e123) < 0.1, nil end") 23 | admin("f1()") 24 | call("f1") 25 | admin("f1=nil") 26 | call("f1") 27 | admin("function f1() return f1 end") 28 | call("f1") 29 | 30 | # A test case for https://github.com/tarantool/tarantool/issues/44 31 | # IPROTO required! 32 | call("box.error", 33333, "Hey!") 33 | 34 | print(""" 35 | # A test case for Bug#103491 36 | # server CALL processing bug with name path longer than two 37 | # https://bugs.launchpad.net/tarantool/+bug/1034912 38 | """) 39 | admin("f = function() return 'OK' end") 40 | admin("test = {}") 41 | admin("test.f = f") 42 | admin("test.test = {}") 43 | admin("test.test.f = f") 44 | call("f") 45 | call("test.f") 46 | call("test.test.f") 47 | 48 | print(""" 49 | # Test for Bug #955226 50 | # Lua Numbers are passed back wrongly as strings 51 | # 52 | """) 53 | admin("function foo() return 1, 2, '1', '2' end") 54 | call("foo") 55 | 56 | # 57 | # check how well we can return tables 58 | # 59 | admin("function f1(...) return {...} end") 60 | admin("function f2(...) return f1({...}) end") 61 | call("f1", "test_", "test_") 62 | call("f2", "test_", "test_") 63 | call("f1") 64 | call("f2") 65 | # 66 | # check multi-tuple return 67 | # 68 | admin("function f3() return {{'hello'}, {'world'}} end") 69 | call("f3") 70 | admin("function f3() return {'hello', {'world'}} end") 71 | call("f3") 72 | admin("function f3() return 'hello', {{'world'}, {'canada'}} end") 73 | call("f3") 74 | admin("function f3() return {}, '123', {{}, {}} end") 75 | call("f3") 76 | admin("function f3() return { {{'hello'}} } end") 77 | call("f3") 78 | admin("function f3() return { box.tuple.new('hello'), {'world'} } end") 79 | call("f3") 80 | admin("function f3() return { {'world'}, box.tuple.new('hello') } end") 81 | call("f3") 82 | admin("function f3() return { { test={1,2,3} }, { test2={1,2,3} } } end") 83 | call("f3") 84 | 85 | call("f1", "jason") 86 | call("f1", "jason", 1, "test", 2, "stewart") 87 | 88 | admin("space = box.schema.space.create('tweedledum')") 89 | admin("index = space:create_index('primary', { type = 'hash' })") 90 | 91 | admin("function myreplace(...) return space:replace{...} end") 92 | admin("function myinsert(...) return space:insert{...} end") 93 | 94 | call("myinsert", 1, "test box delete") 95 | call("space:delete", 1) 96 | call("myinsert", 1, "test box delete") 97 | call("space:delete", 1) 98 | call("space:delete", 1) 99 | call("myinsert", 2, "test box delete") 100 | call("space:delete", 1) 101 | call("space:delete", 2) 102 | call("space:delete", 2) 103 | admin("space:delete{2}") 104 | 105 | call("myinsert", 2, "test box delete") 106 | call("space:get", 2) 107 | admin("space:delete{2}") 108 | call("space:get", 2) 109 | call("myinsert", 2, "test box.select()") 110 | call("space:get", 2) 111 | call("space:select", 2) 112 | admin("space:get{2}") 113 | admin("space:select{2}") 114 | admin("space:get{1}") 115 | admin("space:select{1}") 116 | call("myreplace", 2, "hello", "world") 117 | call("myreplace", 2, "goodbye", "universe") 118 | call("space:get", 2) 119 | call("space:select", 2) 120 | admin("space:get{2}") 121 | admin("space:select{2}") 122 | call("myreplace", 2) 123 | call("space:get", 2) 124 | call("space:select", 2) 125 | call("space:delete", 2) 126 | call("space:delete", 2) 127 | call("myinsert", 3, "old", 2) 128 | sys.stdout.push_filter("Attempt to modify a tuple field.*in space.*", "Attempt to modify a tuple field in space") 129 | sys.stdout.push_filter("\s+'tweedledum'", "") 130 | admin("space:update({3}, {{'=', 1, 4}, {'=', 2, 'new'}})") 131 | admin("space:insert(space:get{3}:update{{'=', 1, 4}, {'=', 2, 'new'}}) space:delete{3}") 132 | call("space:get", 4) 133 | call("space:select", 4) 134 | admin("space:update({4}, {{'+', 3, 1}})") 135 | admin("space:update({4}, {{'-', 3, 1}})") 136 | call("space:get", 4) 137 | call("space:select", 4) 138 | admin("function field_x(key, field_index) return space:get(key)[field_index] end") 139 | call("field_x", 4, 1) 140 | call("field_x", 4, 2) 141 | call("space:delete", 4) 142 | admin("space:drop()") 143 | 144 | admin("space = box.schema.space.create('tweedledum')") 145 | admin("index = space:create_index('primary', { type = 'tree' })") 146 | 147 | json_dumps_kwargs=dict(sort_keys=True, separators=(', ', ': ')) 148 | 149 | def dump_args(*args): 150 | return json.dumps(args, **json_dumps_kwargs)[1:-1] 151 | 152 | def dump_response(response): 153 | if response.return_code: 154 | return str(response) 155 | if not response.data: 156 | return '' 157 | res = [] 158 | for item in response.data: 159 | res.append(json.dumps(item, **json_dumps_kwargs)) 160 | return '- ' + '\n- '.join(res) 161 | 162 | def lua_eval(name, *args): 163 | print("eval ({})({})".format(name, dump_args(*args))) 164 | print("---") 165 | print(dump_response(iproto.py_con.eval(name, args))) 166 | 167 | def lua_call(name, *args): 168 | print("call {}({})".format(name, dump_args(*args))) 169 | print("---") 170 | print(dump_response(iproto.py_con.call(name, args))) 171 | 172 | def test(expr, *args): 173 | lua_eval("return " + expr, *args) 174 | admin("function f(...) return " + expr + " end") 175 | lua_call("f", *args) 176 | 177 | # Return values 178 | test("1") 179 | test("1, 2, 3") 180 | test("true") 181 | test("nil") 182 | test("") 183 | test("{}") 184 | test("{1}") 185 | test("{1, 2, 3}") 186 | test("{k1 = 'v1', k2 = 'v2'}") 187 | test("{k1 = 'v1', k2 = 'v2'}") 188 | # gh-791: maps are wrongly assumed to be arrays 189 | test("{s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}}") 190 | test("true, {s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}}") 191 | test("{s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}}, true") 192 | admin("t = box.tuple.new('tuple', {1, 2, 3}, { k1 = 'v', k2 = 'v2'})") 193 | test("t") 194 | test("t, t, t") 195 | test("{t}") 196 | test("{t, t, t}") 197 | test("error('exception')") 198 | test("box.error(0)") 199 | test("...") 200 | test("...", 1, 2, 3) 201 | test("...", None, None, None) 202 | test("...", { "k1": "v1", "k2": "v2"}) 203 | # Transactions 204 | test("space:auto_increment({\"transaction\"})") 205 | test("space:select{}") 206 | test("box.begin(), space:auto_increment({\"failed\"}), box.rollback()") 207 | test("space:select{}") 208 | test("require(\"fiber\").sleep(0)") 209 | # Other 210 | lua_eval("!invalid expression") 211 | 212 | admin("space:drop()") 213 | admin("box.schema.user.drop('test')") 214 | 215 | # Re-connect after removing user 216 | iproto.py_con.close() 217 | -------------------------------------------------------------------------------- /test/test-tarantool/engine.cfg: -------------------------------------------------------------------------------- 1 | { 2 | "set_language.test.sql": { 3 | "memtx": {"engine": "memtx"} 4 | }, 5 | "setopt_delimeter.test.lua": { 6 | "memtx": {"engine": "memtx"} 7 | }, 8 | "worker_hang_when_gc_triggered_inside_colorer.test.lua": { 9 | "vinyl": {"engine": "vinyl"} 10 | }, 11 | "*": { 12 | "memtx": {"engine": "memtx"}, 13 | "vinyl": {"engine": "vinyl"} 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /test/test-tarantool/iproto.result: -------------------------------------------------------------------------------- 1 | IPROTO_UPDATE 2 | query [('IPROTO_CODE', 4)] [('IPROTO_SPACE_ID', 280)] 3 | True 4 | query [('IPROTO_CODE', 4)] [('IPROTO_KEY', (1,)), ('IPROTO_SPACE_ID', 280)] 5 | True 6 | 7 | 8 | -------------------------------------------------------------------------------- /test/test-tarantool/iproto.test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Original Tarantool's test box-py/iproto.test.py had a problem when output with 3 | running under Python 2 was not the same as with running under Python 3. 4 | 5 | Fixed in commit 697b79781cc63e2d87d86d43713998261d602334 6 | "test: make output of box-py/iproto.test.py deterministic". 7 | """ 8 | 9 | from __future__ import print_function 10 | 11 | import msgpack 12 | from tarantool.const import * 13 | from tarantool import Connection 14 | from tarantool.response import Response 15 | from lib.tarantool_connection import TarantoolConnection 16 | 17 | # Note re IPROTO_SQL_INFO_* keys: they cannot appear in the 18 | # response map at the top level, but have the same codes as other 19 | # IPROTO_* constants. Exclude those names so. 20 | key_names = {} 21 | for (k,v) in list(globals().items()): 22 | if type(k) == str and k.startswith("IPROTO_") and \ 23 | not k.startswith("IPROTO_SQL_INFO_") and type(v) == int: 24 | key_names[v] = k 25 | 26 | def repr_dict(todump): 27 | d = {} 28 | for (k, v) in todump.items(): 29 | k_name = key_names.get(k, k) 30 | d[k_name] = v 31 | return repr(sorted(d.items())) 32 | 33 | 34 | def test(header, body): 35 | # Connect and authenticate 36 | c = Connection("localhost", server.iproto.port) 37 | c.connect() 38 | print("query", repr_dict(header), repr_dict(body)) 39 | header = msgpack.dumps(header) 40 | body = msgpack.dumps(body) 41 | query = msgpack.dumps(len(header) + len(body)) + header + body 42 | # Send raw request using connected socket 43 | s = c._socket 44 | try: 45 | s.send(query) 46 | except OSError as e: 47 | print(" => ", "Failed to send request") 48 | c.close() 49 | print(iproto.py_con.ping() > 0) 50 | 51 | print("IPROTO_UPDATE") 52 | test({ IPROTO_CODE : REQUEST_TYPE_UPDATE }, { IPROTO_SPACE_ID: 280 }) 53 | test({ IPROTO_CODE : REQUEST_TYPE_UPDATE }, 54 | { IPROTO_SPACE_ID: 280, IPROTO_KEY: (1, )}) 55 | print("\n") 56 | -------------------------------------------------------------------------------- /test/test-tarantool/replica.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | local repl_include_self = arg[1] and arg[1] == 'true' or false 4 | local repl_list 5 | 6 | if repl_include_self then 7 | repl_list = {os.getenv("MASTER"), os.getenv("LISTEN")} 8 | else 9 | repl_list = os.getenv("MASTER") 10 | end 11 | 12 | -- Start the console first to allow test-run to attach even before 13 | -- box.cfg is finished. 14 | require('console').listen(os.getenv('ADMIN')) 15 | 16 | box.cfg({ 17 | listen = os.getenv("LISTEN"), 18 | replication = repl_list, 19 | memtx_memory = 107374182, 20 | replication_timeout = 0.1, 21 | }) 22 | -------------------------------------------------------------------------------- /test/test-tarantool/set_language.result: -------------------------------------------------------------------------------- 1 | -- test-run result file version 2 2 | -- Simple SQL test that uses '\set language' command. 3 | -- Command introduced in commit 6e38b88eb6bbe543a1e3ba0a6a0be2f6f58abc86 4 | -- ('Implement SQL driver') 5 | 6 | -- Create table for tests 7 | CREATE TABLE t (a BOOLEAN PRIMARY KEY); 8 | | --- 9 | | - row_count: 1 10 | | ... 11 | INSERT INTO t VALUES (true), (false); 12 | | --- 13 | | - row_count: 2 14 | | ... 15 | 16 | -- Create user-defined function. 17 | \set language lua 18 | | --- 19 | | - true 20 | | ... 21 | test_run = require('test_run').new() 22 | | --- 23 | | ... 24 | \set language sql 25 | | --- 26 | | - true 27 | | ... 28 | 29 | SELECT a FROM t WHERE a; 30 | | --- 31 | | - metadata: 32 | | - name: A 33 | | type: boolean 34 | | rows: 35 | | - [true] 36 | | ... 37 | SELECT a FROM t WHERE a != true; 38 | | --- 39 | | - metadata: 40 | | - name: A 41 | | type: boolean 42 | | rows: 43 | | - [false] 44 | | ... 45 | 46 | -- Cleaning. 47 | DROP TABLE t; 48 | | --- 49 | | - row_count: 1 50 | | ... 51 | -------------------------------------------------------------------------------- /test/test-tarantool/set_language.test.sql: -------------------------------------------------------------------------------- 1 | -- Simple SQL test that uses '\set language' command. 2 | -- Command introduced in commit 6e38b88eb6bbe543a1e3ba0a6a0be2f6f58abc86 3 | -- ('Implement SQL driver') 4 | 5 | -- Create table for tests 6 | CREATE TABLE t (a BOOLEAN PRIMARY KEY); 7 | INSERT INTO t VALUES (true), (false); 8 | 9 | -- Create user-defined function. 10 | \set language lua 11 | test_run = require('test_run').new() 12 | \set language sql 13 | 14 | SELECT a FROM t WHERE a; 15 | SELECT a FROM t WHERE a != true; 16 | 17 | -- Cleaning. 18 | DROP TABLE t; 19 | -------------------------------------------------------------------------------- /test/test-tarantool/setopt_delimeter.result: -------------------------------------------------------------------------------- 1 | -- test-run result file version 2 2 | -- Simple test that uses 'setopt delimiter' command. 3 | -- Command introduced in commit 6e38b88eb6bbe543a1e3ba0a6a0be2f6f58abc86 4 | -- ('Implement SQL driver') 5 | 6 | test_run = require('test_run').new() 7 | | --- 8 | | ... 9 | 10 | -- Using delimiter 11 | _ = test_run:cmd("setopt delimiter ';'") 12 | | --- 13 | | ... 14 | function test_a() 15 | local a = 1 16 | end; 17 | | --- 18 | | ... 19 | _ = test_run:cmd("setopt delimiter ''"); 20 | | --- 21 | | ... 22 | 23 | box.cfg{} 24 | | --- 25 | | ... 26 | 27 | -- Using multiline 28 | box.cfg{ \ 29 | coredump = false, \ 30 | log_format = 'plain', \ 31 | log_level = 5, \ 32 | strip_core = true \ 33 | } 34 | | --- 35 | | ... 36 | -------------------------------------------------------------------------------- /test/test-tarantool/setopt_delimeter.test.lua: -------------------------------------------------------------------------------- 1 | -- Simple test that uses 'setopt delimiter' command. 2 | -- Command introduced in commit 6e38b88eb6bbe543a1e3ba0a6a0be2f6f58abc86 3 | -- ('Implement SQL driver') 4 | 5 | test_run = require('test_run').new() 6 | 7 | -- Using delimiter 8 | _ = test_run:cmd("setopt delimiter ';'") 9 | function test_a() 10 | local a = 1 11 | end; 12 | _ = test_run:cmd("setopt delimiter ''"); 13 | 14 | box.cfg{} 15 | 16 | -- Using multiline 17 | box.cfg{ \ 18 | coredump = false, \ 19 | log_format = 'plain', \ 20 | log_level = 5, \ 21 | strip_core = true \ 22 | } 23 | -------------------------------------------------------------------------------- /test/test-tarantool/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = tarantool 3 | description = tarantool tests 4 | script = box.lua 5 | config = engine.cfg 6 | -------------------------------------------------------------------------------- /test/test-tarantool/worker_hang_when_gc_triggered_inside_colorer.result: -------------------------------------------------------------------------------- 1 | -- test-run result file version 2 2 | -- regression test for the problem fixed in [1] that was a part 3 | -- of more common issue [2]. It's worth to mention that original 4 | -- problem has more chances to reproduce with applied patch to 5 | -- multiprocessing source code (multiprocessing/connection.py). 6 | -- 7 | -- 1. https://github.com/tarantool/test-run/pull/275 8 | -- 2. https://github.com/tarantool/tarantool-qa/issues/96 9 | 10 | -- Setup 11 | box.schema.user.grant('guest', 'replication') 12 | | --- 13 | | ... 14 | 15 | -- Setup and teardown cluster, manage separate instances. 16 | test_run = require('test_run').new() 17 | | --- 18 | | ... 19 | test_run:cmd('create server replica with rpl_master=default, script="test-tarantool/replica.lua"') 20 | | --- 21 | | - true 22 | | ... 23 | test_run:cmd('start server replica') 24 | | --- 25 | | - true 26 | | ... 27 | test_run:cmd('stop server replica') 28 | | --- 29 | | - true 30 | | ... 31 | test_run:cmd('cleanup server replica') 32 | | --- 33 | | - true 34 | | ... 35 | test_run:cmd('delete server replica') 36 | | --- 37 | | - true 38 | | ... 39 | 40 | -- Teardown 41 | box.schema.user.revoke('guest', 'replication') 42 | | --- 43 | | ... 44 | -------------------------------------------------------------------------------- /test/test-tarantool/worker_hang_when_gc_triggered_inside_colorer.test.lua: -------------------------------------------------------------------------------- 1 | -- regression test for the problem fixed in [1] that was a part 2 | -- of more common issue [2]. It's worth to mention that original 3 | -- problem has more chances to reproduce with applied patch to 4 | -- multiprocessing source code (multiprocessing/connection.py). 5 | -- 6 | -- 1. https://github.com/tarantool/test-run/pull/275 7 | -- 2. https://github.com/tarantool/tarantool-qa/issues/96 8 | 9 | -- Setup 10 | box.schema.user.grant('guest', 'replication') 11 | 12 | -- Setup and teardown cluster, manage separate instances. 13 | test_run = require('test_run').new() 14 | test_run:cmd('create server replica with rpl_master=default, script="test-tarantool/replica.lua"') 15 | test_run:cmd('start server replica') 16 | test_run:cmd('stop server replica') 17 | test_run:cmd('cleanup server replica') 18 | test_run:cmd('delete server replica') 19 | 20 | -- Teardown 21 | box.schema.user.revoke('guest', 'replication') 22 | -------------------------------------------------------------------------------- /test/test-unit/broken_unicode.result: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tarantool/test-run/5f82c70394b4cc35198a723c0f330f16de701043/test/test-unit/broken_unicode.result -------------------------------------------------------------------------------- /test/test-unit/broken_unicode.test: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Test is needed to check how test-run handle 4 | # tests with result files. Since commit 5 | # 395edeb6b743c4479a62dd2183062124973d2b2a 6 | # 'python3: decouple bytes and strings' test-run 7 | # reads test output and result files in byte mode. 8 | # Output of this test contains broken UTF-8 sequences 9 | # and test-run will fail if it will try to decode output. 10 | 11 | printf 'TAP version 13\n' 12 | printf '1..3\n' 13 | printf 'ok 1 - \302\302\n' 14 | printf 'ok 2 - \302\302\n' 15 | printf 'ok 3 - \342\230\240\n' 16 | -------------------------------------------------------------------------------- /test/test-unit/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = unittest 3 | description = unit tests 4 | is_parallel = True 5 | -------------------------------------------------------------------------------- /test/unittest/00000000000000000003.snap: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tarantool/test-run/5f82c70394b4cc35198a723c0f330f16de701043/test/unittest/00000000000000000003.snap -------------------------------------------------------------------------------- /test/unittest/box-cc0544b6afd1.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | box.cfg { 4 | listen = os.getenv('LISTEN'), 5 | } 6 | 7 | require('console').listen(os.getenv('ADMIN')) 8 | -------------------------------------------------------------------------------- /test/unittest/hang.result: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tarantool/test-run/5f82c70394b4cc35198a723c0f330f16de701043/test/unittest/hang.result -------------------------------------------------------------------------------- /test/unittest/hang.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | 3 | -- This test should hang: we are unable to bootstrap the replica, because it is 4 | -- unable to join the master because of lack of granting user permissions. 5 | test_run:cmd('create server replica with rpl_master=default, script="unittest/replica-7f4d4895ff58.lua"') 6 | test_run:cmd('start server replica') 7 | -------------------------------------------------------------------------------- /test/unittest/replica-7f4d4895ff58.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | box.cfg { 4 | listen = os.getenv('LISTEN'), 5 | replication = os.getenv('MASTER'), 6 | } 7 | 8 | require('console').listen(os.getenv('ADMIN')) 9 | -------------------------------------------------------------------------------- /test/unittest/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = tarantool 3 | description = unit tests 4 | script = box-cc0544b6afd1.lua 5 | use_unix_sockets_iproto = True 6 | -------------------------------------------------------------------------------- /test/unittest/test_lib_utils.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import lib.utils as utils 4 | 5 | 6 | class TestUtils(unittest.TestCase): 7 | def test_extract_schema_from_snapshot(self): 8 | snapshot_path = 'test/unittest/00000000000000000003.snap' 9 | v = utils.extract_schema_from_snapshot(snapshot_path) 10 | self.assertEqual(v, (2, 3, 1)) 11 | 12 | 13 | if __name__ == "__main__": 14 | unittest.main() 15 | -------------------------------------------------------------------------------- /test/unittest/test_tarantool_server.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import sys 4 | import unittest 5 | 6 | 7 | class TestTarantoolServer(unittest.TestCase): 8 | def test_tarantool_server_not_hanging(self): 9 | env = os.environ.copy() 10 | env['SERVER_START_TIMEOUT'] = '5' 11 | 12 | cmd = [sys.executable, 'test/test-run.py', 'unittest/hang.test.lua'] 13 | 14 | # File names intentionally have hashes to find exactly these processes 15 | # using 'ps' command. 16 | box_instance = 'box-cc0544b6afd1' 17 | replica_instance = 'replica-7f4d4895ff58' 18 | 19 | err_msg_1 = ('[Instance "%s"] Failed to start tarantool ' 20 | 'instance "%s"' % (box_instance, replica_instance)) 21 | err_msg_2 = ('[Instance "%s"] Failed to start within %s seconds' 22 | % (replica_instance, env['SERVER_START_TIMEOUT'])) 23 | 24 | try: 25 | subprocess.check_output(cmd, env=env, universal_newlines=True) 26 | self.fail("Command `%s` did not return non-zero exit code" 27 | % ' '.join(cmd)) 28 | except subprocess.CalledProcessError as exc: 29 | err_obj = exc 30 | 31 | self.assertIn(err_msg_1, err_obj.output) 32 | self.assertIn(err_msg_2, err_obj.output) 33 | 34 | ps_lines = subprocess.check_output( 35 | ['ps', '-o', 'command'], universal_newlines=True 36 | ).splitlines() 37 | proc_lines = [line.strip() for line in ps_lines 38 | if 'tarantool %s.lua' % box_instance in line or 39 | 'tarantool %s.lua' % replica_instance in line] 40 | 41 | self.assertFalse( 42 | proc_lines, 'There are some hanging tarantool processes!' 43 | ) 44 | 45 | 46 | if __name__ == '__main__': 47 | unittest.main() 48 | --------------------------------------------------------------------------------