├── .gitignore ├── .gitmodules ├── .travis.yml ├── CHANGES.md ├── CMakeLists.txt ├── Jenkinsfile ├── LICENSE ├── README.md ├── cmake ├── FindMsgPuck.cmake └── FindTarantool.cmake ├── debian ├── .gitignore ├── changelog ├── compat ├── control ├── copyright ├── docs ├── rules └── source │ └── format ├── debug ├── bench.sh ├── master.lua └── master1.lua ├── demo.lua ├── rpm └── tarantool-shard.spec ├── shard-scm-1.rockspec ├── shard ├── CMakeLists.txt ├── connpool.lua ├── driver.c ├── heap.h ├── ibuf.h └── init.lua ├── test.sh └── test ├── .tarantoolctl ├── CMakeLists.txt ├── box └── proxy.lua ├── join ├── join1.lua ├── join2.lua ├── master.lua ├── master1.lua ├── master2.lua ├── master3.lua ├── master4.lua ├── master5.lua ├── multi_node.result ├── multi_node.test.lua ├── multi_pair.result ├── multi_pair.test.lua ├── nd_single.result ├── nd_single.test.lua ├── pair.result ├── pair.test.lua ├── single.result ├── single.test.lua └── suite.ini ├── node_down ├── auto_increment.result ├── auto_increment.test.lua ├── basic.result ├── basic.test.lua ├── batch.result ├── batch.test.lua ├── master.lua ├── master1.lua ├── node_down.result ├── node_down.test.lua ├── q_basic.result ├── q_basic.test.lua └── suite.ini ├── redundancy1 ├── auto_increment.result ├── auto_increment.test.lua ├── basic.result ├── basic.test.lua ├── batch.result ├── batch.test.lua ├── master.lua ├── master1.lua ├── master2.lua ├── monitoring.result ├── monitoring.test.lua ├── node_down.result ├── node_down.test.lua ├── q_basic.result ├── q_basic.test.lua ├── shard.result ├── shard.test.lua └── suite.ini ├── redundancy2 ├── auto_increment.result ├── auto_increment.test.lua ├── basic.result ├── basic.test.lua ├── batch.result ├── batch.test.lua ├── master.lua ├── master1.lua ├── master2.lua ├── monitoring.result ├── monitoring.test.lua ├── node_down.result ├── node_down.test.lua ├── q_basic.result ├── q_basic.test.lua ├── shard.result ├── shard.test.lua └── suite.ini ├── redundancy3 ├── auto_increment.result ├── auto_increment.test.lua ├── basic.result ├── basic.test.lua ├── batch.result ├── batch.test.lua ├── master.lua ├── master1.lua ├── master2.lua ├── monitoring.result ├── monitoring.test.lua ├── node_down.result ├── node_down.test.lua ├── q_basic.result ├── q_basic.test.lua ├── shard.result ├── shard.test.lua └── suite.ini └── test-run.py /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | .*.swp 3 | *.snap 4 | *.xlog 5 | *.log 6 | *.vylog 7 | test/var/ 8 | VERSION 9 | 10 | CMakeCache.txt 11 | CMakeFiles 12 | Makefile 13 | cmake_install.cmake 14 | 15 | *.so 16 | *.dylib 17 | 18 | packpack 19 | build 20 | *.reject 21 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "test-run"] 2 | path = test-run 3 | url = https://github.com/tarantool/test-run.git 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | services: 3 | - docker 4 | 5 | language: python 6 | 7 | cache: 8 | directories: 9 | - $HOME/.cache 10 | 11 | git: 12 | depth: 100500 13 | 14 | env: 15 | global: 16 | - PRODUCT=tarantool-shard 17 | matrix: 18 | - TARGET=test 19 | - OS=el DIST=6 20 | - OS=el DIST=7 21 | - OS=fedora DIST=26 22 | - OS=fedora DIST=27 23 | - OS=fedora DIST=28 24 | - OS=fedora DIST=29 25 | - OS=fedora DIST=30 26 | - OS=ubuntu DIST=trusty 27 | - OS=ubuntu DIST=xenial 28 | - OS=ubuntu DIST=bionic 29 | - OS=ubuntu DIST=cosmic 30 | - OS=ubuntu DIST=disco 31 | - OS=debian DIST=jessie 32 | - OS=debian DIST=stretch 33 | - OS=debian DIST=buster 34 | 35 | script: 36 | - git describe --long 37 | - | 38 | if [ "${TARGET}" = "test" ]; then 39 | ./test.sh; 40 | else 41 | git clone https://github.com/packpack/packpack.git packpack; 42 | packpack/packpack; 43 | fi; 44 | 45 | before_deploy: 46 | - ls -l build/ 47 | 48 | deploy: 49 | # Deploy packages to PackageCloud 50 | - provider: packagecloud 51 | username: tarantool 52 | repository: "1_9" 53 | token: ${PACKAGECLOUD_TOKEN} 54 | dist: ${OS}/${DIST} 55 | package_glob: build/*.{rpm,deb} 56 | skip_cleanup: true 57 | on: 58 | branch: master 59 | condition: -n "${OS}" && -n "${DIST}" && -n "${PACKAGECLOUD_TOKEN}" 60 | - provider: packagecloud 61 | username: tarantool 62 | repository: "1_10" 63 | token: ${PACKAGECLOUD_TOKEN} 64 | dist: ${OS}/${DIST} 65 | package_glob: build/*.{rpm,deb} 66 | skip_cleanup: true 67 | on: 68 | branch: master 69 | condition: -n "${OS}" && -n "${DIST}" && -n "${PACKAGECLOUD_TOKEN}" 70 | - provider: packagecloud 71 | username: tarantool 72 | repository: "2x" 73 | token: ${PACKAGECLOUD_TOKEN} 74 | dist: ${OS}/${DIST} 75 | package_glob: build/*.{rpm,deb} 76 | skip_cleanup: true 77 | on: 78 | branch: master 79 | condition: -n "${OS}" && -n "${DIST}" && -n "${PACKAGECLOUD_TOKEN}" 80 | - provider: packagecloud 81 | username: tarantool 82 | repository: "2_2" 83 | token: ${PACKAGECLOUD_TOKEN} 84 | dist: ${OS}/${DIST} 85 | package_glob: build/*.{rpm,deb} 86 | skip_cleanup: true 87 | on: 88 | branch: master 89 | condition: -n "${OS}" && -n "${DIST}" && -n "${PACKAGECLOUD_TOKEN}" 90 | 91 | notifications: 92 | email: 93 | recipients: 94 | - build@tarantool.org 95 | on_success: change 96 | on_failure: always 97 | -------------------------------------------------------------------------------- /CHANGES.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## Version 2.2 (unstable) 4 | 5 | This release contains bunch of bugfixes, revives q_select and allows 6 | secondary_select requests be fired in parallel. 7 | 8 | * q_select: revive and use C merger; 9 | * CI/CD fixes (#75); 10 | * resharding: fix a bug when tuples were not moved to another shard (#79); 11 | * bootstrapping: keep attemting to connect after access denied (guard UUID 12 | request); 13 | * secondary_select: make requests in parallel on tarantool-1.10+. 14 | 15 | ## Version 2.1 (unstable) 16 | 17 | Improved compatibility with shard-1.2 (consider PR #67 for details): 18 | 19 | * Fixed undefined variables references to work under strict mode / debug 20 | tarantool build. 21 | * Fixed a bug in connpool.wait_connection(). 22 | * Brought back shard.wait_connection() function. 23 | * Brought back mr_select's sort_index_id parameter. 24 | 25 | ## Version 2.0 (unstable) 26 | 27 | This version is not compatible with previous ones. 28 | 29 | Requires `Tarantool 1.9` or higher. 30 | 31 | ### Breaking changes 32 | 33 | * Return values for `*_call` functions, `secondary_select`, `shard()`, `lookup()`, `truncate()` has changed: 34 | now they have mutlireturn `result`, `error`. If `result == nil` `error` will contain 35 | either error object `{ errno, message }` or just `message` 36 | * `append_shard` now does not require zones, they are retrieved from configs 37 | * Resharding does not start on append. It is **required** to call `start_resharding` after successfull append 38 | * Shard's `wait_connection()` is replaced by a new function `wait_for_shards_to_go_online(...)` 39 | * Connection pool method `one` is renamed to `get_any_active_server` 40 | * Connection pool method `all` is renamed to `get_all_active_servers` 41 | * `check_connection` is removed 42 | 43 | ### API changes 44 | 45 | #### New functions 46 | 47 | * `start_resharding()` -- toggles flag that turns on resharding (global) 48 | * `remote_rotate(shard_id)` -- calls `rotate_shard` on each cluster server one by one (global) 49 | * `get_server_list()` -- returns list of servers from shard's configuration (global) 50 | 51 | * `start_monitoring()` -- turns on fibers that monitor shards 52 | * `init_synchronizer()` -- starts synchronizer fiber which checks resharding status 53 | * `disable_resharding()` -- gracefully stops resharding fibers (pairs with `enable_resharding`) 54 | * `rotate_shard(shard_id)` -- switch places of master and replica in the shard table 55 | * `reload_schema()` -- load new schema and invalidate mergers 56 | * `synchronize_shards_object(zones_object)` -- perform `rotate_shard` on particular shard if its entry in 57 | shard table differs from zones_object 58 | * `get_zones` -- return zones_object from server 59 | * `truncate(self, space)` function allows to truncate space on the whole cluster 60 | * `truncate_local_space(space)` -- perform truncate on a single server 61 | * `wait_for_shards_to_go_online(timeout, delay)` -- blocks fiber until all shards are in `connected` 62 | state. If `timeout` is reached, returns error. 63 | 64 | New callbacks in connection pool: 65 | * `on_connection_failure` 66 | * `on_monitor_failure` 67 | * `on_dead_disconnected` 68 | * `on_dead_connected` 69 | 70 | Following callbacks are used for failover: 71 | * `on_server_fail` 72 | * `on_server_return` 73 | 74 | #### Reworked functions 75 | 76 | * `append_shard(servers)` -- accepts only table of servers' uris 77 | 78 | ### New features 79 | 80 | * Adds one more option: `rsd_max_tuple_transfer` which defines how many tuples 81 | will be transfered in one operation (default: 1000) 82 | * A lot of improvements for error handling 83 | * `shard_status` gathers uuid of all servers in connection pool 84 | 85 | * Implemented state-machine for server object (`server.state`). One of 5 states is possible: 86 | - connecting 87 | - connected 88 | - failed 89 | - is_dead_connected 90 | - is_dead_disconnected 91 | 92 | ### Bug Fixes 93 | 94 | * Correct sorting order in `mr_select` 95 | 96 | ## Version 1.2 (stable) 97 | 98 | Refer to `README.md` for details 99 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8 FATAL_ERROR) 2 | 3 | set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) 4 | include(ExternalProject) 5 | 6 | project(shard C) 7 | if(NOT CMAKE_BUILD_TYPE) 8 | set(CMAKE_BUILD_TYPE Debug) 9 | endif() 10 | 11 | # Find Tarantool 12 | set(TARANTOOL_FIND_REQUIRED ON) 13 | set(CMAKE_INSTALL_DATADIR "" ) 14 | find_package(Tarantool) 15 | include_directories(${TARANTOOL_INCLUDE_DIRS}) 16 | 17 | # Find MsgPuck llibrary 18 | set(MsgPuck_FIND_REQUIRED ON) 19 | find_package(MsgPuck) 20 | include_directories(${MSGPUCK_INCLUDE_DIRS}) 21 | 22 | add_definitions("-D_GNU_SOURCE") 23 | 24 | # Set CFLAGS 25 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") 26 | set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Wall -Wextra") 27 | 28 | # Build module 29 | add_subdirectory(shard) 30 | # Enable tests 31 | add_subdirectory(test) 32 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | stage('Build'){ 2 | packpack = new org.tarantool.packpack() 3 | 4 | matrix = packpack.filterMatrix( 5 | packpack.default_matrix, 6 | {!(it['OS'] == 'fedora' && it['DIST'] == 'rawhide')}) 7 | 8 | node { 9 | checkout scm 10 | packpack.prepareSources() 11 | } 12 | packpack.packpackBuildMatrix('result', matrix) 13 | } 14 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (C) 2015-2016 Tarantool AUTHORS: 2 | please see AUTHORS file in tarantool/tarantool repository. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, 5 | are permitted provided that the following conditions are met: 6 | 7 | Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | Redistributions in binary form must reproduce the above copyright notice, this 11 | list of conditions and the following disclaimer in the documentation and/or 12 | other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 15 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 16 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 18 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 19 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 21 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 23 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DEPRECATED 2 | 3 | ### Please note, this project is deprecated and no longer being maintained, please use [vshard](https://github.com/tarantool/vshard). 4 | 5 | # Tarantool sharding module 6 | [![Tests status](https://travis-ci.org/tarantool/shard.svg?branch=master)](https://travis-ci.org/tarantool/shard) 7 | 8 | An application-level library that provides sharding and client-side 9 | reliable replication for [tarantool](http://tarantool.org). Implements 10 | a single-phase and two-phase protocol operations (with batching 11 | support), monitors availability of nodes and automatically expells 12 | failed nodes from the cluster. 13 | 14 | To shard data across nodes, a variant of consistent hashing is 15 | used. The shard key is determined automatically based on sharded space 16 | description. 17 | 18 | ## Installation 19 | 1. Add [tarantool repository](http://tarantool.org/download.html) for 20 | yum or apt 21 | 2. Install 22 | ```bash 23 | $sudo [yum|apt-get] install tarantool tarantool-shard tarantool-pool 24 | ``` 25 | 26 | ## Terminology 27 | 28 | * redundancy - the redundancy factor. How many copies of each tuple to 29 | maintain in the cluster 30 | * zone - a redundancy zone. May represent a single machine or a single 31 | data center. The number of zones must be greater or equal to the 32 | redundancy factor: duplicating data in the same zone doesn't 33 | increase availability 34 | 35 | ## Usage example 36 | 37 | This example starts a tarantool instance that connects to itself, 38 | creating a sharding configuration with a single zone and a single 39 | server. 40 | 41 | If you need more servers, add entries to the `servers` part of the 42 | configuration. See "Configuration" below for details. 43 | 44 | ```lua 45 | local shard = require('shard') 46 | local json = require('json') 47 | 48 | -- tarantool configuration 49 | box.cfg { 50 | wal_mode = 'none', 51 | listen = 33021 52 | } 53 | 54 | box.schema.create_space('demo', {if_not_exists = true}) 55 | box.space.demo:create_index('primary', 56 | {parts={1, 'unsigned'}, if_not_exists=true}) 57 | 58 | box.schema.user.grant('guest', 'read,write,execute', 59 | 'universe', nil, {if_not_exists = true}) 60 | 61 | box.schema.user.grant('guest', 'replication', 62 | nil, nil, {if_not_exists = true}) 63 | 64 | 65 | -- sharding configuration 66 | shard.init { 67 | servers = { 68 | { uri = 'localhost:33021', zone = '0' }, 69 | }, 70 | 71 | login = 'guest', 72 | password = '', 73 | redundancy = 1 74 | } 75 | 76 | shard.demo:insert({1, 'test'}) 77 | shard.demo:replace({1, 'test2'}) 78 | shard.demo:update({1}, {{'=', 2, 'test3'}}) 79 | shard.demo:insert({2, 'test4'}) 80 | shard.demo:insert({3, 'test5'}) 81 | shard.demo:delete({3}) 82 | 83 | print(json.encode(shard.demo:select({1}))) 84 | print(json.encode(shard.demo:select({2}))) 85 | ``` 86 | 87 | ## Testing 88 | 89 | Sharding module can be tested with [tarantool functional testing framework](https://github.com/tarantool/test-run): 90 | ```bash 91 | pip install -r test-run/requirements.txt 92 | python test/test-run.py 93 | ``` 94 | 95 | ## Configuration 96 | 97 | ```lua 98 | cfg = { 99 | servers = { 100 | { uri = 'localhost:33130', zone = '0' }, 101 | { uri = 'localhost:33131', zone = '1' }, 102 | { uri = 'localhost:33132', zone = '2' } 103 | }, 104 | login = 'tester', 105 | password = 'pass', 106 | monitor = true, 107 | pool_name = "default", 108 | redundancy = 3, 109 | rsd_max_rps = 1000, 110 | replication = true 111 | } 112 | ``` 113 | 114 | Where: 115 | 116 | * `servers`: a list of dictionaris {uri = '', zone = ''} that describe 117 | individual servers in the sharding configuration 118 | * `login` and `password`: credentials that will be used to connect to 119 | `servers` 120 | * `monitor`: whether to do active checks on the servers and remove 121 | them from sharding if they become unreachable (default `true`) 122 | * `pool_name`: display name of the connection pool created for the 123 | group of `servers`. This only matters if you 124 | use [connpool](https://github.com/tarantool/connpool) module in 125 | parallel to the sharding module for other purposes. Otherwise you 126 | may skip this option. (default `'default'`) 127 | * `redundancy`: How many copies of each tuple to maintain in the 128 | cluster. (defaults to number of zones) 129 | * `replication`: Set to `true` if redundancy is handled by replication 130 | (default is `false`) 131 | 132 | Timeout options are global, and can be set before calling the `init()` 133 | funciton, like this: 134 | 135 | ```lua 136 | shard = require 'shard' 137 | 138 | local cfg = {...} 139 | 140 | shard.REMOTE_TIMEOUT = 210 141 | shard.HEARTBEAT_TIMEOUT = 500 142 | shard.DEAD_TIMEOUT = 10 143 | shard.RECONNECT_AFTER = 30 144 | 145 | shard.init(cfg) 146 | ``` 147 | 148 | Where: 149 | 150 | * `REMOTE_TIMEOUT` is a timeout in seconds for data access operations, 151 | like insert/update/delete. (default is `210`) 152 | * `HEARTBEAT_TIMEOUT` is a timeout in seconds before a heartbeat call 153 | will fail. (default is `500`) 154 | * `DEAD_TIMEOUT` is a timeout in seconds after which the 155 | non-responding node will be expelled from the cluster (default is 156 | 10) 157 | * `RECONNECT_AFTER` allows you to ignore transient failures in remote 158 | operations. Terminated connections will be re-established after a 159 | specified timeout in seconds. Under the hood, it uses the 160 | `reconnect_after` option for `net.box`. (disabled by default, 161 | i.e. `msgpack.NULL`) 162 | 163 | ## API 164 | 165 | ### Configuration and cluster management 166 | 167 | #### `shard.init(cfg)` 168 | 169 | Initialize sharding module, connect to all nodes and start monitoring them. 170 | 171 | * cfg - sharding configuration (see Configuration above) 172 | 173 | Note, that sharding configuration can be changed dynamically, and it 174 | is your job to make sure that the changes get reflected in this 175 | configuration. Because when you restart your cluster, the topology 176 | will be read from whatever you pass to `init()`. 177 | 178 | #### `shard.get_heartbeat()` 179 | 180 | Returns status of the cluster from the point of view of each node. 181 | 182 | Example output: 183 | 184 | ```yaml 185 | --- 186 | - localhost:3302: 187 | localhost:3302: {'try': 0, 'ts': 1499270503.9233} 188 | localhost:3301: {'try': 0, 'ts': 1499270507.0284} 189 | localhost:3301: 190 | localhost:3302: {'try': 0, 'ts': 1499270504.9097} 191 | localhost:3301: {'try': 0, 'ts': 1499270506.8166} 192 | ... 193 | ``` 194 | 195 | #### `shard.is_table_filled()` 196 | 197 | Returns `true` if the heartbeat table contains data about each node, 198 | from the point of view of each other node. If the sharding module 199 | hasn't yet filled in the heartbeats, or there are dead nodes, this 200 | function will return `false`. 201 | 202 | #### `shard.is_connected()` 203 | 204 | Returns `true` if all shards are connected and operational. 205 | 206 | #### `shard.wait_connection()` 207 | 208 | Wait until all shards are connected and operational. 209 | 210 | #### `shard_status()` 211 | 212 | Returns the status of all shards: whether they are online, offline or 213 | in maintenance. 214 | 215 | Example output: 216 | 217 | ```yaml 218 | --- 219 | - maintenance: [] 220 | offline: [] 221 | online: 222 | - uri: localhost:3301 223 | id: 1 224 | - uri: localhost:3302 225 | id: 2 226 | - uri: localhost:3303 227 | id: 3 228 | ... 229 | ``` 230 | 231 | #### `remote_append(servers)` 232 | 233 | Appends a pair of redundant instances to the cluster, and initiates 234 | resharding. 235 | 236 | * `servers` - table of servers in the same format as in config 237 | 238 | This function should be called on one node and will propagate changes 239 | everywhere. 240 | 241 | Example: 242 | 243 | ```lua 244 | remote_append({{uri="localhost:3305", zone='2'}, 245 | {uri="localhost:3306", zone='2'}}) 246 | ``` 247 | 248 | Returns: `true` on success 249 | 250 | #### `remote_join(id)` 251 | 252 | If the node got expelled from the cluster, you may bring it back by 253 | using `remote_join()`. It will reconnect to the node and allow write 254 | access to it. 255 | 256 | There are 2 reasons why it may happen: either the node has died, or 257 | you've called `remote_unjoin()` on it. 258 | 259 | Example: 260 | 261 | ```lua 262 | remote_join(2) 263 | ``` 264 | 265 | Returns: `true` on success 266 | 267 | #### `remote_unjoin(id)` 268 | 269 | Put the node identified by `id` to maintenance mode. It will not 270 | receive writes, and will not be returned by the `shard()` function. 271 | 272 | ### Single phase operations 273 | 274 | #### `shard.space.insert(tuple)` 275 | 276 | Inserts `tuple` to the shard space. 277 | 278 | `tuple[1]` is treated as shard key. 279 | 280 | Returns: table with results of individual `insert()` calls on each 281 | redundant node. 282 | 283 | #### `shard.space.replace(tuple)` 284 | 285 | Replaces `tuple` across the shard space. 286 | 287 | `tuple[1]` is treated as shard key. 288 | 289 | Returns: table with results of individual `replace()` calls on each 290 | redundant node. 291 | 292 | #### `shard.space.delete(key)` 293 | 294 | Deletes tuples with primary key `key` across the shard space. 295 | 296 | `key[1]` is treated as shard key. 297 | 298 | Returns: table with results of individual `delete()` calls on each 299 | redundant node. 300 | 301 | 302 | #### `shard.space.update(key, {{operator, field_no, value}, ...})` 303 | 304 | Update `tuple` across the shard space. Behaves the same way as Tarantool's [update()](http://tarantool.org/doc/book/box/box_space.html?highlight=insert#lua-function.space_object.update). 305 | 306 | `key[1]` is treated as shard key. 307 | 308 | Returns: table with results of individual `update()` calls on each 309 | redundant node. 310 | 311 | 312 | #### `shard.space.auto_increment(tuple)` 313 | 314 | Inserts `tuple` to the shard space, automatically incrementing its primary key. 315 | 316 | If primary key is numeric, `auto_increment()` will use the next integer number. 317 | If primary key is string, `auto_increment()` will generate a new UUID. 318 | 319 | Shard key is determined from the space schema, unlike the `insert()` operation. 320 | 321 | Returns: table with results of individual `auto_increment()` calls on 322 | each redundant node. Return value of each `auto_increment()` is the 323 | same as in the `insert()` call. 324 | 325 | 326 | ### Two-phase operations 327 | 328 | Two phase operations work, well, in two phases. The first phase pushes 329 | the operation into an auxiliary space "operations" on all the involved 330 | shards, according to the redundancy factor. As soon as the operation 331 | is propagated to the shards, a separate call triggers execution of the 332 | operation on all shards. If the caller dies before invoking the second 333 | phase, the shards figure out by themselves that the operation has been 334 | propagated and execute it anyway (it only takes a while, since the 335 | check is done only once in a period of time). The operation id is 336 | necessary to avoid double execution of the same operation (at most 337 | once execution semantics) and most be provided by the user. The status 338 | of the operation can always be checked, given its operation id, and 339 | provided that it has not been pruned from operations space. 340 | 341 | #### `shard.space.q_insert(operation_id, tuple)` 342 | 343 | Inserts `tuple` to the shard space. 344 | 345 | * `operation_id` is a unique operation identifier (see "Tho-phase operations") 346 | * `tuple[1]` is treated as shard key. 347 | 348 | Returns: `tuple` 349 | 350 | #### `shard.space.q_replace(operation_id, tuple)` 351 | 352 | Replaces `tuple` across the shard space. 353 | 354 | * `operation_id` is a unique operation identifier (see "Tho-phase operations") 355 | * `tuple[1]` is treated as shard key. 356 | 357 | Returns: `tuple` 358 | 359 | #### `shard.space.q_delete(operation_id, key)` 360 | 361 | Deletes tuples with primary key `key` across the shard space. 362 | 363 | * `operation_id` is a unique operation identifier (see "Tho-phase operations") 364 | * `key` is treated as a shard key. 365 | 366 | Returns: nothing 367 | 368 | #### `shard.space.q_update(operation_id, key, {{operator, field_no, value}, ...})` 369 | 370 | Update `tuple` across the shard space. Behaves the same way as Tarantool's [update()](http://tarantool.org/doc/book/box/box_space.html?highlight=insert#lua-function.space_object.update). 371 | 372 | * `operation_id` is a unique operation identifier (see "Tho-phase operations") 373 | * `key` is treated as shard key. 374 | 375 | Returns: nothing 376 | 377 | #### `shard.space.q_auto_increment(tuple)` 378 | 379 | Inserts `tuple` to the shard space, automatically incrementing its primary key. 380 | 381 | * `operation_id` is a unique operation identifier (see "Tho-phase operations") 382 | 383 | If primary key is numeric, `auto_increment()` will use the next integer number. 384 | If primary key is string, `auto_increment()` will generate a new UUID. 385 | 386 | Shard key is determined from the space schema, unlike the `insert()` operation. 387 | 388 | Returns: `tuple` 389 | 390 | #### `shard.check_operation(operation_id, tuple_id)` 391 | 392 | Function checks the operation status on all nodes. If the operation 393 | hasn't finished yet - waits for its completion for up to 5 seconds. 394 | 395 | * `operation_id` - unique operation identifier 396 | * `tuple_id` - tuple primary key 397 | 398 | Returns: `true`, if the operation has completed, `false` otherwise. 399 | 400 | #### `shard.q_begin()|batch_obj.q_end()` 401 | 402 | `q_begin()` returns an object that wraps multiple sequential two-phase 403 | operations into one batch. You can use it the same way you use the 404 | shard object: 405 | 406 | ```lua 407 | batch_obj = shard.q_begin() 408 | batch_obj.demo:q_insert(1, {0, 'test'}) 409 | batch_obj.demo:q_replace(2, {0, 'test2'}) 410 | batch_obj:q_end() 411 | ``` 412 | 413 | When you call `q_end()`, the batch will be executed in one shot. 414 | 415 | #### `wait_operations()` 416 | 417 | If there are pending two-phase operations, wait until they complete. 418 | -------------------------------------------------------------------------------- /cmake/FindMsgPuck.cmake: -------------------------------------------------------------------------------- 1 | # - Find libmsgpuck header-only library 2 | # The module defines the following variables: 3 | # 4 | # MSGPUCK_FOUND - true if MsgPuck was found 5 | # MSGPUCK_INCLUDE_DIRS - the directory of the MsgPuck headers 6 | # MSGPUCK_LIBRARIES - the MsgPuck static library needed for linking 7 | # 8 | 9 | find_path(MSGPUCK_INCLUDE_DIR msgpuck.h PATH_SUFFIXES msgpuck) 10 | find_library(MSGPUCK_LIBRARY NAMES libmsgpuck.a) 11 | 12 | include(FindPackageHandleStandardArgs) 13 | find_package_handle_standard_args(MsgPuck 14 | REQUIRED_VARS MSGPUCK_INCLUDE_DIR MSGPUCK_LIBRARY) 15 | set(MSGPUCK_INCLUDE_DIRS ${MSGPUCK_INCLUDE_DIR}) 16 | set(MSGPUCK_LIBRARIES ${MSGPUCK_LIBRARY}) 17 | mark_as_advanced(MSGPUCK_INCLUDE_DIR MSGPUCK_INCLUDE_DIRS 18 | MSGPUCK_LIBRARY MSGPUCK_LIBRARIES) 19 | -------------------------------------------------------------------------------- /cmake/FindTarantool.cmake: -------------------------------------------------------------------------------- 1 | # Define GNU standard installation directories 2 | include(GNUInstallDirs) 3 | 4 | macro(extract_definition name output input) 5 | string(REGEX MATCH "#define[\t ]+${name}[\t ]+\"([^\"]*)\"" 6 | _t "${input}") 7 | string(REGEX REPLACE "#define[\t ]+${name}[\t ]+\"(.*)\"" "\\1" 8 | ${output} "${_t}") 9 | endmacro() 10 | 11 | find_path(TARANTOOL_INCLUDE_DIR tarantool/module.h 12 | HINTS ${TARANTOOL_DIR} ENV TARANTOOL_DIR 13 | PATH_SUFFIXES include 14 | ) 15 | 16 | if(TARANTOOL_INCLUDE_DIR) 17 | set(_config "-") 18 | file(READ "${TARANTOOL_INCLUDE_DIR}/tarantool/module.h" _config0) 19 | string(REPLACE "\\" "\\\\" _config ${_config0}) 20 | unset(_config0) 21 | extract_definition(PACKAGE_VERSION TARANTOOL_VERSION ${_config}) 22 | extract_definition(INSTALL_PREFIX _install_prefix ${_config}) 23 | unset(_config) 24 | endif() 25 | 26 | include(FindPackageHandleStandardArgs) 27 | find_package_handle_standard_args(TARANTOOL 28 | REQUIRED_VARS TARANTOOL_INCLUDE_DIR VERSION_VAR TARANTOOL_VERSION) 29 | if(TARANTOOL_FOUND) 30 | set(TARANTOOL_INCLUDE_DIRS "${TARANTOOL_INCLUDE_DIR}" 31 | "${TARANTOOL_INCLUDE_DIR}/tarantool/" 32 | CACHE PATH "Include directories for Tarantool") 33 | set(TARANTOOL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}/tarantool" 34 | CACHE PATH "Directory for storing Lua modules written in Lua") 35 | set(TARANTOOL_INSTALL_LUADIR "${CMAKE_INSTALL_DATADIR}/tarantool" 36 | CACHE PATH "Directory for storing Lua modules written in C") 37 | 38 | if (NOT TARANTOOL_FIND_QUIETLY AND NOT FIND_TARANTOOL_DETAILS) 39 | set(FIND_TARANTOOL_DETAILS ON CACHE INTERNAL "Details about TARANTOOL") 40 | message(STATUS "Tarantool LUADIR is ${TARANTOOL_INSTALL_LUADIR}") 41 | message(STATUS "Tarantool LIBDIR is ${TARANTOOL_INSTALL_LIBDIR}") 42 | endif () 43 | endif() 44 | mark_as_advanced(TARANTOOL_INCLUDE_DIRS TARANTOOL_INSTALL_LIBDIR 45 | TARANTOOL_INSTALL_LUADIR) 46 | -------------------------------------------------------------------------------- /debian/.gitignore: -------------------------------------------------------------------------------- 1 | tarantool-shard/ 2 | files 3 | stamp-* 4 | *.substvars 5 | *.log 6 | -------------------------------------------------------------------------------- /debian/changelog: -------------------------------------------------------------------------------- 1 | tarantool-shard (1.1.0-1) unstable; urgency=medium 2 | 3 | * Initial release. 4 | 5 | -- Roman Tsisyk Fri, 19 Feb 2016 14:34:49 +0300 6 | -------------------------------------------------------------------------------- /debian/compat: -------------------------------------------------------------------------------- 1 | 9 2 | -------------------------------------------------------------------------------- /debian/control: -------------------------------------------------------------------------------- 1 | Source: tarantool-shard 2 | Priority: optional 3 | Section: database 4 | Maintainer: Roman Tsisyk 5 | Build-Depends: debhelper (>= 9), 6 | tarantool (>= 1.7.2.0), 7 | tarantool-dev (>= 1.7.2.0), 8 | libmsgpuck-dev (>= 1.0.0) 9 | Standards-Version: 3.9.6 10 | Homepage: https://github.com/tarantool/shard 11 | Vcs-Git: git://github.com/tarantool/shard.git 12 | Vcs-Browser: https://github.com/tarantool/shard 13 | 14 | Package: tarantool-shard 15 | Architecture: all 16 | Depends: tarantool (>= 1.7.2.0), ${misc:Depends} 17 | Description: Tarantool sharding module 18 | An application-level library that provides sharding and client-side reliable 19 | replication for tarantool 1.6. Implements a single-phase and two-phase 20 | protocol operations (with batching support), monitors availability of nodes 21 | and automatically expells failed nodes from the cluster. 22 | -------------------------------------------------------------------------------- /debian/copyright: -------------------------------------------------------------------------------- 1 | Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ 2 | Debianized-By: Roman Tsisyk 3 | Upstream-Name: tarantool-shard 4 | Upstream-Contact: support@tarantool.org 5 | Source: https://github.com/tarantool/shard 6 | 7 | Files: * 8 | Copyright: 2015-2016 Tarantool AUTHORS 9 | License: BSD-2-Clause 10 | Redistribution and use in source and binary forms, with or without 11 | modification, are permitted provided that the following conditions 12 | are met: 13 | 1. Redistributions of source code must retain the above copyright 14 | notice, this list of conditions and the following disclaimer. 15 | 2. Redistributions in binary form must reproduce the above copyright 16 | notice, this list of conditions and the following disclaimer in the 17 | documentation and/or other materials provided with the distribution. 18 | . 19 | THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 | ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 | OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 | OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 | SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /debian/docs: -------------------------------------------------------------------------------- 1 | README.md 2 | -------------------------------------------------------------------------------- /debian/rules: -------------------------------------------------------------------------------- 1 | #!/usr/bin/make -f 2 | 3 | DEB_CMAKE_EXTRA_FLAGS := -DCMAKE_INSTALL_LIBDIR=lib/$(DEB_HOST_MULTIARCH) \ 4 | -DCMAKE_BUILD_TYPE=RelWithDebInfo 5 | # temporary disable tests - Debian has old gevent 6 | DEB_MAKE_CHECK_TARGET := 7 | 8 | include /usr/share/cdbs/1/rules/debhelper.mk 9 | include /usr/share/cdbs/1/class/cmake.mk 10 | -------------------------------------------------------------------------------- /debian/source/format: -------------------------------------------------------------------------------- 1 | 3.0 (quilt) 2 | -------------------------------------------------------------------------------- /debug/bench.sh: -------------------------------------------------------------------------------- 1 | killall tarantool 2 | rm *.snap 3 | rm m1.log 4 | rm work/m2.log 5 | rm work/*.snap 6 | 7 | tarantool master.lua & 8 | tarantool master1.lua & 9 | 10 | -------------------------------------------------------------------------------- /debug/master.lua: -------------------------------------------------------------------------------- 1 | shard = require('shard') 2 | log = require('log') 3 | yaml = require('yaml') 4 | 5 | local cfg = { 6 | servers = { 7 | { uri = 'localhost:3313', zone = '0' }; 8 | { uri = 'localhost:3314', zone = '0' }; 9 | }; 10 | http = 8080; 11 | login = 'tester'; 12 | password = 'pass'; 13 | redundancy = 1; 14 | binary = 3313; 15 | my_uri = 'localhost:3313' 16 | } 17 | 18 | box.cfg { 19 | slab_alloc_arena = 1.0; 20 | slab_alloc_factor = 1.06; 21 | slab_alloc_minimal = 16; 22 | wal_mode = 'none'; 23 | logger = 'm1.log'; 24 | log_level = 5; 25 | listen = cfg.binary; 26 | } 27 | if not box.space.demo then 28 | box.schema.user.create(cfg.login, { password = cfg.password }) 29 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 30 | 31 | local demo = box.schema.create_space('demo') 32 | demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 33 | end 34 | 35 | -- init shards 36 | shard.init(cfg) 37 | 38 | -- wait for operations 39 | require('fiber').sleep(3) 40 | 41 | -- show results 42 | log.info(yaml.encode(box.space.demo:select{})) 43 | -------------------------------------------------------------------------------- /debug/master1.lua: -------------------------------------------------------------------------------- 1 | shard = require('shard') 2 | log = require('log') 3 | yaml = require('yaml') 4 | 5 | local cfg = { 6 | servers = { 7 | { uri = 'localhost:3313', zone = '0' }; 8 | { uri = 'localhost:3314', zone = '0' }; 9 | }; 10 | http = 8080; 11 | login = 'tester'; 12 | password = 'pass'; 13 | redundancy = 1; 14 | binary = 3314; 15 | my_uri = 'localhost:3314' 16 | } 17 | 18 | box.cfg { 19 | slab_alloc_arena = 1.0; 20 | slab_alloc_factor = 1.06; 21 | slab_alloc_minimal = 16; 22 | wal_mode = 'none'; 23 | logger = 'm2.log'; 24 | log_level = 5; 25 | work_dir='work'; 26 | listen = cfg.binary; 27 | } 28 | if not box.space.demo then 29 | box.schema.user.create(cfg.login, { password = cfg.password }) 30 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 31 | 32 | local demo = box.schema.create_space('demo') 33 | demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 34 | end 35 | 36 | -- init shards 37 | shard.init(cfg) 38 | 39 | -- do inser, replace, update operations 40 | shard.demo:q_auto_increment(1, {'second', 'third'}) 41 | shard.demo:q_auto_increment(2, {'second'}) 42 | test_id = shard.demo:q_auto_increment(3, {'test'})[1] 43 | shard.demo:q_replace(4, {test_id, 'test2'}) 44 | shard.demo:q_update(5, test_id, {{'=', 2, 'test3'}}) 45 | shard.demo:q_auto_increment(6, {'test_incr'}) 46 | 47 | --batching 48 | batch = shard.q_begin() 49 | batch.demo:q_auto_increment(7, {'batch1'}) 50 | batch.demo:q_auto_increment(8, {'batch2'}) 51 | batch.demo:q_auto_increment(9, {'batch3'}) 52 | batch:q_end() 53 | 54 | -- wait and show results 55 | require('fiber').sleep(3) 56 | log.info(yaml.encode(box.space.demo:select{})) 57 | log.info(yaml.encode(shard.demo.check_operation(4, test_id))) 58 | -------------------------------------------------------------------------------- /demo.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | shard = require('shard') 4 | log = require('log') 5 | yaml = require('yaml') 6 | 7 | -- check that demo space exists 8 | shard.check_shard = function(conn) 9 | return conn.space.demo ~= nil 10 | end 11 | 12 | local cfg = { 13 | -- shards config 14 | servers = { 15 | { uri = [[localhost:33021]]; zone = [[1]]}; 16 | }; 17 | -- shard login/password 18 | login = 'tester'; 19 | password = 'pass'; 20 | monitor = false; 21 | redundancy = 1; 22 | binary = 33021 23 | } 24 | 25 | -- tarantool configuration 26 | box.cfg { 27 | slab_alloc_arena = 1; 28 | slab_alloc_factor = 1.06; 29 | slab_alloc_minimal = 16; 30 | wal_mode = 'none'; 31 | listen = cfg.binary 32 | } 33 | 34 | if not box.space.demo then 35 | box.schema.user.create(cfg.login, { password = cfg.password }) 36 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 37 | 38 | local demo = box.schema.space.create('demo') 39 | demo:create_index('primary', {type = 'hash', parts = {1, 'num'}}) 40 | end 41 | 42 | -- run sharing 43 | shard.init(cfg) 44 | 45 | --test shard insert 46 | print('insert') 47 | shard.demo.insert({0, 'test'}) 48 | 49 | --test shard select 50 | print('select') 51 | data = shard.demo.select{0} 52 | print(yaml.encode(data)) 53 | 54 | print('replace') 55 | shard.demo.replace({0, 'test2'}) 56 | data = shard.demo.select() 57 | print(yaml.encode(data)) 58 | 59 | print('update') 60 | shard.demo.update(0, {{'=', 2, 'test3'}}) 61 | result = shard.demo.select{0} 62 | print(yaml.encode(result)) 63 | 64 | print('auto_increment') 65 | shard.demo.auto_increment{'test3'} 66 | shard.demo.auto_increment{'test4'} 67 | result = box.space.demo:select() 68 | print(yaml.encode(result)) 69 | 70 | 71 | shard.demo.delete(0) 72 | 73 | -- vim: ts=4:sw=4:sts=4:et 74 | -------------------------------------------------------------------------------- /rpm/tarantool-shard.spec: -------------------------------------------------------------------------------- 1 | Name: tarantool-shard 2 | Version: 1.1.0 3 | Release: 1%{?dist} 4 | Summary: Tarantool sharding module 5 | Group: Applications/Databases 6 | License: BSD 7 | URL: https://github.com/tarantool/shard 8 | Source0: https://github.com/tarantool/shard/archive/%{version}/shard-%{version}.tar.gz 9 | BuildRequires: cmake >= 2.8 10 | BuildRequires: gcc >= 4.5 11 | BuildRequires: tarantool >= 1.7.2.0 12 | BuildRequires: tarantool-devel 13 | BuildRequires: msgpuck-devel >= 1.0.0 14 | Requires: tarantool >= 1.7.2.0 15 | 16 | # For tests 17 | %if (0%{?fedora} >= 22) 18 | BuildRequires: python >= 2.7 19 | BuildRequires: python-six >= 1.9.0 20 | BuildRequires: python-gevent >= 1.0 21 | BuildRequires: python-yaml >= 3.0.9 22 | # Temporary for old test-run 23 | # https://github.com/tarantool/shard/issues/1 24 | BuildRequires: python-daemon 25 | %endif 26 | 27 | %description 28 | An application-level library that provides sharding and client-side reliable 29 | replication for tarantool 1.6. Implements a single-phase and two-phase 30 | protocol operations (with batching support), monitors availability of nodes 31 | and automatically expells failed nodes from the cluster. 32 | 33 | %prep 34 | %setup -q -n shard-%{version} 35 | 36 | %build 37 | %cmake . -DCMAKE_BUILD_TYPE=RelWithDebInfo 38 | make %{?_smp_mflags} 39 | 40 | # tests are disabled till they are fixed (see https://github.com/tarantool/shard/issues/71) 41 | # %check 42 | # %if (0%{?fedora} >= 22) 43 | # make test 44 | # %endif 45 | 46 | %install 47 | %make_install 48 | 49 | %files 50 | %{_libdir}/tarantool/shard/ 51 | %{_datarootdir}/tarantool/shard/ 52 | %doc README.md 53 | %{!?_licensedir:%global license %doc} 54 | %license LICENSE 55 | 56 | %changelog 57 | * Fri Feb 19 2016 Roman Tsisyk 1.1.0-1 58 | - Initial version of the RPM spec 59 | -------------------------------------------------------------------------------- /shard-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = 'shard' 2 | version = 'scm-1' 3 | source = { 4 | url = 'git://github.com/tarantool/shard.git', 5 | branch = 'master', 6 | } 7 | description = { 8 | summary = "Lua sharding for Tarantool", 9 | homepage = 'https://github.com/tarantool/shard.git', 10 | license = 'BSD', 11 | } 12 | dependencies = { 13 | 'lua >= 5.1'; 14 | } 15 | external_dependencies = { 16 | TARANTOOL = { 17 | header = 'tarantool/module.h'; 18 | }; 19 | } 20 | build = { 21 | type = 'cmake'; 22 | variables = { 23 | CMAKE_BUILD_TYPE="RelWithDebInfo"; 24 | TARANTOOL_DIR="$(TARANTOOL_DIR)"; 25 | TARANTOOL_INSTALL_LIBDIR="$(LIBDIR)"; 26 | TARANTOOL_INSTALL_LUADIR="$(LUADIR)"; 27 | }; 28 | } 29 | 30 | -- vim: syntax=lua 31 | -------------------------------------------------------------------------------- /shard/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | if(APPLE) 2 | set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -undefined suppress -flat_namespace") 3 | endif(APPLE) 4 | 5 | # Add C library 6 | add_library(driver SHARED driver.c) 7 | set_target_properties(driver PROPERTIES PREFIX "" OUTPUT_NAME "driver") 8 | target_link_libraries(driver ${MSGPUCK_LIBRARIES}) 9 | 10 | # Install module 11 | install(FILES init.lua DESTINATION ${TARANTOOL_INSTALL_LUADIR}/shard) 12 | install(FILES connpool.lua DESTINATION ${TARANTOOL_INSTALL_LUADIR}/shard) 13 | install(TARGETS driver LIBRARY DESTINATION ${TARANTOOL_INSTALL_LIBDIR}/shard) 14 | -------------------------------------------------------------------------------- /shard/driver.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Redistribution and use in source and binary forms, with or 3 | * without modification, are permitted provided that the following 4 | * conditions are met: 5 | * 6 | * 1. Redistributions of source code must retain the above 7 | * copyright notice, this list of conditions and the 8 | * following disclaimer. 9 | * 10 | * 2. Redistributions in binary form must reproduce the above 11 | * copyright notice, this list of conditions and the following 12 | * disclaimer in the documentation and/or other materials 13 | * provided with the distribution. 14 | * 15 | * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND 16 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 17 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 19 | * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 20 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 23 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 24 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF 26 | * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 | * SUCH DAMAGE. 28 | */ 29 | #include 30 | 31 | #include 32 | #include 33 | 34 | #include 35 | #include 36 | 37 | #include "ibuf.h" 38 | #include "msgpuck.h" 39 | 40 | #define HEAP_FORWARD_DECLARATION 41 | #include "heap.h" 42 | 43 | #define IPROTO_DATA 0x30 44 | 45 | struct source { 46 | struct heap_node hnode; 47 | struct ibuf *buf; 48 | struct tuple *tuple; 49 | }; 50 | 51 | static uint32_t merger_type_id = 0; 52 | 53 | struct merger { 54 | heap_t heap; 55 | uint32_t count; 56 | uint32_t capacity; 57 | struct source **sources; 58 | struct key_def *key_def; 59 | box_tuple_format_t *format; 60 | int order; 61 | }; 62 | 63 | static bool 64 | source_less(const heap_t *heap, const struct heap_node *a, 65 | const struct heap_node *b) 66 | { 67 | struct source *left = container_of(a, struct source, hnode); 68 | struct source *right = container_of(b, struct source, hnode); 69 | if (left->tuple == NULL && right->tuple == NULL) 70 | return false; 71 | if (left->tuple == NULL) 72 | return false; 73 | if (right->tuple == NULL) 74 | return true; 75 | struct merger *merger = container_of(heap, struct merger, heap); 76 | return merger->order * 77 | box_tuple_compare(left->tuple, right->tuple, merger->key_def) < 0; 78 | } 79 | 80 | #define HEAP_NAME merger_heap 81 | #define HEAP_LESS source_less 82 | #include "heap.h" 83 | 84 | static inline void 85 | source_fetch(struct source *source, box_tuple_format_t *format) 86 | { 87 | source->tuple = NULL; 88 | if (ibuf_used(source->buf) == 0) 89 | return; 90 | const char *tuple_beg = source->buf->rpos; 91 | const char *tuple_end = tuple_beg; 92 | mp_next(&tuple_end); 93 | assert(tuple_end <= source->buf->wpos); 94 | source->buf->rpos = (char *)tuple_end; 95 | source->tuple = box_tuple_new(format, tuple_beg, tuple_end); 96 | box_tuple_ref(source->tuple); 97 | } 98 | 99 | static void 100 | free_sources(struct merger *merger) 101 | { 102 | for (uint32_t i = 0; i < merger->count; ++i) { 103 | if (merger->sources[i]->tuple != NULL) 104 | box_tuple_unref(merger->sources[i]->tuple); 105 | free(merger->sources[i]); 106 | } 107 | merger->count = 0; 108 | free(merger->sources); 109 | merger->capacity = 0; 110 | merger_heap_destroy(&merger->heap); 111 | merger_heap_create(&merger->heap); 112 | } 113 | 114 | static int 115 | lbox_merger_start(struct lua_State *L) 116 | { 117 | struct merger **merger_ptr; 118 | uint32_t cdata_type; 119 | if (lua_gettop(L) != 3 || lua_istable(L, 2) != 1 || 120 | lua_isnumber(L, 3) != 1 || 121 | (merger_ptr = luaL_checkcdata(L, 1, &cdata_type)) == NULL || 122 | cdata_type != merger_type_id) { 123 | return luaL_error(L, "Bad params, use: start(merger, {buffers}, " 124 | "order)"); 125 | } 126 | struct merger *merger = *merger_ptr; 127 | merger->order = lua_tointeger(L, 3) >= 0? 1: -1; 128 | free_sources(merger); 129 | 130 | merger->capacity = 8; 131 | merger->sources = (struct source **)malloc(merger->capacity * 132 | sizeof(struct source *)); 133 | if (merger->sources == NULL) 134 | return luaL_error(L, "Can't alloc sources buffer"); 135 | /* Fetch all sources */ 136 | while (true) { 137 | lua_pushinteger(L, merger->count + 1); 138 | lua_gettable(L, 2); 139 | if (lua_isnil(L, -1)) 140 | break; 141 | struct ibuf *buf = (struct ibuf *)lua_topointer(L, -1); 142 | if (buf == NULL) 143 | break; 144 | if (ibuf_used(buf) == 0) 145 | continue; 146 | if (merger->count == merger->capacity) { 147 | merger->capacity *= 2; 148 | struct source **new_sources; 149 | new_sources = 150 | (struct source **)realloc(merger->sources, 151 | merger->capacity * sizeof(struct source *)); 152 | if (new_sources == NULL) { 153 | free_sources(merger); 154 | return luaL_error(L, "Can't alloc sources buffer"); 155 | } 156 | merger->sources = new_sources; 157 | } 158 | merger->sources[merger->count] = 159 | (struct source *)malloc(sizeof(struct source)); 160 | if (merger->sources[merger->count] == NULL) { 161 | free_sources(merger); 162 | return luaL_error(L, "Can't alloc merge source"); 163 | } 164 | if (mp_typeof(*buf->rpos) != MP_MAP || 165 | mp_decode_map((const char **)&buf->rpos) != 1 || 166 | mp_typeof(*buf->rpos) != MP_UINT || 167 | mp_decode_uint((const char **)&buf->rpos) != IPROTO_DATA || 168 | mp_typeof(*buf->rpos) != MP_ARRAY) { 169 | free_sources(merger); 170 | return luaL_error(L, "Invalid merge source"); 171 | } 172 | mp_decode_array((const char **)&buf->rpos); 173 | merger->sources[merger->count]->buf = buf; 174 | merger->sources[merger->count]->tuple = NULL; 175 | source_fetch(merger->sources[merger->count], merger->format); 176 | if (merger->sources[merger->count]->tuple != NULL) 177 | merger_heap_insert(&merger->heap, 178 | &merger->sources[merger->count]->hnode); 179 | ++merger->count; 180 | } 181 | lua_pushboolean(L, true); 182 | return 1; 183 | } 184 | 185 | static int 186 | lbox_merge_next(struct lua_State *L) 187 | { 188 | struct merger **merger_ptr; 189 | uint32_t cdata_type; 190 | if (lua_gettop(L) != 1 || 191 | (merger_ptr = luaL_checkcdata(L, 1, &cdata_type)) == NULL || 192 | cdata_type != merger_type_id) { 193 | return luaL_error(L, "Bad params, use: next(merger)"); 194 | } 195 | struct merger *merger = *merger_ptr; 196 | struct heap_node *hnode = merger_heap_top(&merger->heap); 197 | if (hnode == NULL) { 198 | lua_pushnil(L); 199 | return 1; 200 | } 201 | struct source *source = container_of(hnode, struct source, hnode); 202 | luaT_pushtuple(L, source->tuple); 203 | box_tuple_unref(source->tuple); 204 | source_fetch(source, merger->format); 205 | if (source->tuple == NULL) 206 | merger_heap_delete(&merger->heap, hnode); 207 | else 208 | merger_heap_update(&merger->heap, hnode); 209 | return 1; 210 | } 211 | 212 | static int 213 | lbox_merger_new(struct lua_State *L) 214 | { 215 | if (lua_gettop(L) != 1 || lua_istable(L, 1) != 1) { 216 | return luaL_error(L, "Bad params, use: new({" 217 | "{fieldno = fieldno, type = type}, ...}"); 218 | } 219 | uint16_t count = 0, capacity = 8; 220 | uint32_t *fieldno = NULL; 221 | enum field_type *type = NULL; 222 | fieldno = (uint32_t *)malloc(sizeof(*fieldno) * capacity); 223 | if (fieldno == NULL) 224 | return luaL_error(L, "Can not alloc fieldno buffer"); 225 | type = (enum field_type *)malloc(sizeof(*type) * capacity); 226 | if (type == NULL) { 227 | free(fieldno); 228 | return luaL_error(L, "Can not alloc type buffer"); 229 | } 230 | while (true) { 231 | lua_pushinteger(L, count + 1); 232 | lua_gettable(L, 1); 233 | if (lua_isnil(L, -1)) 234 | break; 235 | if (count == capacity) { 236 | capacity *= 2; 237 | uint32_t *old_fieldno = fieldno; 238 | fieldno = (uint32_t *)realloc(fieldno, 239 | sizeof(*fieldno) * capacity); 240 | if (fieldno == NULL) { 241 | free(old_fieldno); 242 | free(type); 243 | return luaL_error(L, "Can not alloc fieldno buffer"); 244 | } 245 | enum field_type *old_type = type; 246 | type = (enum field_type *)realloc(type, 247 | sizeof(*type) * capacity); 248 | if (type == NULL) { 249 | free(fieldno); 250 | free(old_type); 251 | return luaL_error(L, "Can not alloc type buffer"); 252 | } 253 | } 254 | lua_pushstring(L, "fieldno"); 255 | lua_gettable(L, -2); 256 | if (lua_isnil(L, -1)) 257 | break; 258 | fieldno[count] = lua_tointeger(L, -1); 259 | lua_pop(L, 1); 260 | lua_pushstring(L, "type"); 261 | lua_gettable(L, -2); 262 | if (lua_isnil(L, -1)) 263 | break; 264 | type[count] = lua_tointeger(L, -1); 265 | lua_pop(L, 1); 266 | ++count; 267 | } 268 | 269 | struct merger *merger = calloc(1, sizeof(*merger)); 270 | if (merger == NULL) { 271 | free(fieldno); 272 | free(type); 273 | return luaL_error(L, "Can not alloc merger"); 274 | } 275 | merger->key_def = box_key_def_new(fieldno, type, count); 276 | if (merger->key_def == NULL) { 277 | free(fieldno); 278 | free(type); 279 | return luaL_error(L, "Can not alloc key_def"); 280 | } 281 | free(fieldno); 282 | free(type); 283 | 284 | merger->format = box_tuple_format_new(&merger->key_def, 1); 285 | if (merger->format == NULL) { 286 | box_key_def_delete(merger->key_def); 287 | free(merger); 288 | return luaL_error(L, "Can not create tuple format"); 289 | } 290 | 291 | *(struct merger **)luaL_pushcdata(L, merger_type_id) = merger; 292 | return 1; 293 | } 294 | 295 | static int 296 | lbox_merger_cmp(lua_State *L) 297 | { 298 | struct merger **merger_ptr; 299 | uint32_t cdata_type; 300 | if (lua_gettop(L) != 2 || 301 | (merger_ptr = luaL_checkcdata(L, 1, &cdata_type)) == NULL || 302 | cdata_type != merger_type_id) 303 | return luaL_error(L, "Bad params, use: cmp(merger, key)"); 304 | const char *key = lua_tostring(L, 2); 305 | struct merger *merger = *merger_ptr; 306 | struct heap_node *hnode = merger_heap_top(&merger->heap); 307 | if (hnode == NULL) { 308 | lua_pushnil(L); 309 | return 1; 310 | } 311 | struct source *source = container_of(hnode, struct source, hnode); 312 | lua_pushinteger(L, box_tuple_compare_with_key(source->tuple, key, 313 | merger->key_def) * 314 | merger->order); 315 | return 1; 316 | } 317 | 318 | static int 319 | lbox_merger_del(lua_State *L) 320 | { 321 | struct merger **merger_ptr; 322 | uint32_t cdata_type; 323 | if ((merger_ptr = luaL_checkcdata(L, 1, &cdata_type)) == NULL || 324 | cdata_type != merger_type_id) 325 | return 0; 326 | struct merger *merger = *merger_ptr; 327 | free_sources(merger); 328 | box_key_def_delete(merger->key_def); 329 | box_tuple_format_unref(merger->format); 330 | free(merger); 331 | return 0; 332 | } 333 | 334 | 335 | LUA_API int 336 | luaopen_shard_driver(lua_State *L) 337 | { 338 | luaL_cdef(L, "struct merger;"); 339 | merger_type_id = luaL_ctypeid(L, "struct merger&"); 340 | lua_newtable(L); 341 | static const struct luaL_Reg meta [] = { 342 | {"merge_new", lbox_merger_new}, 343 | {"merge_start", lbox_merger_start}, 344 | {"merge_cmp", lbox_merger_cmp}, 345 | {"merge_next", lbox_merge_next}, 346 | {"merge_del", lbox_merger_del}, 347 | {NULL, NULL} 348 | }; 349 | luaL_register(L, NULL, meta); 350 | return 1; 351 | } 352 | -------------------------------------------------------------------------------- /shard/heap.h: -------------------------------------------------------------------------------- 1 | /* 2 | * *No header guard*: the header is allowed to be included twice 3 | * with different sets of defines. 4 | */ 5 | /* 6 | * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. 7 | * 8 | * Redistribution and use in source and binary forms, with or 9 | * without modification, are permitted provided that the following 10 | * conditions are met: 11 | * 12 | * 1. Redistributions of source code must retain the above 13 | * copyright notice, this list of conditions and the 14 | * following disclaimer. 15 | * 16 | * 2. Redistributions in binary form must reproduce the above 17 | * copyright notice, this list of conditions and the following 18 | * disclaimer in the documentation and/or other materials 19 | * provided with the distribution. 20 | * 21 | * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND 22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 25 | * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 26 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 30 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF 32 | * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 | * SUCH DAMAGE. 34 | */ 35 | 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | 42 | /** 43 | * Additional user defined name that appended to prefix 'heap' 44 | * for all names of structs and functions in this header file. 45 | * All names use pattern: heap_ 46 | * May be empty, but still have to be defined (just #define HEAP_NAME) 47 | * Example: 48 | * #define HEAP_NAME test_ 49 | * ... 50 | * test_heap_create(&some_heap); 51 | * test_heap_destroy(&some_heap); 52 | */ 53 | 54 | /* For predefinition of structures and type non specific functions just make: 55 | * #define HEAP_FORWARD_DECLARATION 56 | * #inlude "heap.h" 57 | */ 58 | #ifndef HEAP_FORWARD_DECLARATION 59 | 60 | #ifndef HEAP_NAME 61 | #error "HEAP_NAME must be defined" 62 | #endif /* HEAP_NAME */ 63 | 64 | 65 | /** 66 | * Data comparing function. Takes 3 parameters - heap, node1, node2, 67 | * where heap is pointer onto heap_t structure and node1, node2 68 | * are two pointers on nodes in your structure. 69 | * For example you have such type: 70 | * struct my_type { 71 | * int value; 72 | * struct heap_node vnode; 73 | * }; 74 | * Then node1 and node2 will be pointers on field vnode of two 75 | * my_type instances. 76 | * The function below is example of valid comparator by value: 77 | * 78 | * int test_type_less(const heap_t *heap, 79 | * const struct heap_node *a, 80 | * const struct heap_node *b) { 81 | * 82 | * const struct my_type *left = (struct my_type *)((char *)a - 83 | * offsetof(struct my_type, vnode)); 84 | * const struct my_type *right = (struct my_type *)((char *)b - 85 | * offsetof(struct my_type, vnode)); 86 | * return left->value < right->value; 87 | * } 88 | * 89 | * HEAP_LESS is less function that is important! 90 | */ 91 | 92 | #ifndef HEAP_LESS 93 | #error "HEAP_LESS must be defined" 94 | #endif 95 | 96 | 97 | /** 98 | * Tools for name substitution: 99 | */ 100 | #ifndef CONCAT3 101 | #define CONCAT3_R(a, b, c) a##b##c 102 | #define CONCAT3(a, b, c) CONCAT3_R(a, b, c) 103 | #endif 104 | 105 | #ifdef _ 106 | #error '_' must be undefinded! 107 | #endif 108 | #ifndef HEAP 109 | #define HEAP(name) CONCAT3(HEAP_NAME, _, name) 110 | #endif 111 | 112 | #endif /* HEAP_FORWARD_DECLARATION */ 113 | 114 | /* Structures. */ 115 | 116 | #ifndef HEAP_STRUCTURES /* Include guard for structures */ 117 | 118 | #define HEAP_STRUCTURES 119 | 120 | enum { 121 | HEAP_INITIAL_CAPACITY = 8 122 | }; 123 | 124 | typedef uint32_t heap_off_t; 125 | 126 | /** 127 | * Main structure for holding heap. 128 | */ 129 | struct heap_core_structure { 130 | heap_off_t size; 131 | heap_off_t capacity; 132 | struct heap_node **harr; /* array of heap node pointers */ 133 | }; 134 | 135 | typedef struct heap_core_structure heap_t; 136 | 137 | /** 138 | * Heap entry structure. 139 | */ 140 | struct heap_node { 141 | heap_off_t pos; 142 | }; 143 | 144 | /** 145 | * Heap iterator structure. 146 | */ 147 | struct heap_iterator { 148 | heap_t *heap; 149 | heap_off_t curr_pos; 150 | }; 151 | 152 | #endif /* HEAP_STRUCTURES */ 153 | 154 | #ifndef HEAP_FORWARD_DECLARATION 155 | 156 | /* Extern API that is the most usefull part. */ 157 | 158 | /** 159 | * Initialize the heap. 160 | */ 161 | static inline void 162 | HEAP(create)(heap_t *heap); 163 | 164 | /** 165 | * Destroy current heap. 166 | */ 167 | static inline void 168 | HEAP(destroy)(heap_t *heap); 169 | 170 | /** 171 | * Return min value. 172 | */ 173 | static inline struct heap_node * 174 | HEAP(top)(heap_t *heap); 175 | 176 | /** 177 | * Erase min value. 178 | */ 179 | static inline struct heap_node * 180 | HEAP(pop)(heap_t *heap); 181 | 182 | /** 183 | * Insert value. 184 | */ 185 | static inline int 186 | HEAP(insert)(heap_t *heap, struct heap_node *nd); 187 | 188 | /** 189 | * Delete node from heap. 190 | */ 191 | static inline void 192 | HEAP(delete)(heap_t *heap, struct heap_node *value_node); 193 | 194 | /** 195 | * Heapify tree after update of value under value_node pointer. 196 | */ 197 | static inline void 198 | HEAP(update)(heap_t *heap, struct heap_node *value_node); 199 | 200 | /** 201 | * Heapify tree after updating all values. 202 | */ 203 | static inline void 204 | HEAP(update_all)(heap_t *heap); 205 | 206 | /** 207 | * Heap iterator init. 208 | */ 209 | static inline void 210 | HEAP(iterator_init)(heap_t *heap, struct heap_iterator *it); 211 | 212 | /** 213 | * Heap iterator next. 214 | */ 215 | static inline struct heap_node * 216 | HEAP(iterator_next) (struct heap_iterator *it); 217 | 218 | /* Routines. Functions below are useless for ordinary user. */ 219 | 220 | /* 221 | * Update backlink in the give heap_node structure. 222 | */ 223 | static inline void 224 | HEAP(update_link)(heap_t *heap, heap_off_t pos); 225 | 226 | /** 227 | * Sift up current node. 228 | */ 229 | static inline void 230 | HEAP(sift_up)(heap_t *heap, struct heap_node *node); 231 | 232 | /** 233 | * Sift down current node. 234 | */ 235 | static inline void 236 | HEAP(sift_down)(heap_t *heap, struct heap_node *node); 237 | 238 | /* Debug functions */ 239 | 240 | /** 241 | * Check that heap inveriants is holded. 242 | */ 243 | static inline int /* inline for suppress warning */ 244 | HEAP(check)(heap_t *heap); 245 | 246 | 247 | /* Function definitions. */ 248 | 249 | /** 250 | * Init heap. 251 | */ 252 | static inline void 253 | HEAP(create)(heap_t *heap) 254 | { 255 | heap->size = 0; 256 | heap->capacity = 0; 257 | heap->harr = NULL; 258 | } 259 | 260 | /** 261 | * Destroy current heap. 262 | */ 263 | static inline void 264 | HEAP(destroy)(heap_t *heap) 265 | { 266 | free(heap->harr); 267 | } 268 | 269 | /* 270 | * Update backlink in the give heap_node structure. 271 | */ 272 | static inline void 273 | HEAP(update_link)(heap_t *heap, heap_off_t pos) 274 | { 275 | heap->harr[pos]->pos = pos; 276 | } 277 | 278 | /** 279 | * Sift up current node. 280 | */ 281 | static inline void 282 | HEAP(sift_up)(heap_t *heap, struct heap_node *node) 283 | { 284 | heap_off_t curr_pos = node->pos, parent = (curr_pos - 1) / 2; 285 | 286 | while (curr_pos > 0 && HEAP_LESS(heap, node, heap->harr[parent])) { 287 | 288 | node = heap->harr[curr_pos]; 289 | heap->harr[curr_pos] = heap->harr[parent]; 290 | HEAP(update_link)(heap, curr_pos); 291 | heap->harr[parent] = node; 292 | HEAP(update_link)(heap, parent); 293 | 294 | curr_pos = parent; 295 | parent = (curr_pos - 1) / 2; 296 | /* here overflow can occure, but that won't affect */ 297 | } 298 | } 299 | 300 | /** 301 | * Sift down current node. 302 | */ 303 | static inline void 304 | HEAP(sift_down)(heap_t *heap, struct heap_node *node) 305 | { 306 | heap_off_t curr_pos = node->pos, left, right; 307 | heap_off_t min_child; 308 | 309 | while (true) { 310 | left = 2 * curr_pos + 1; 311 | right = 2 * curr_pos + 2; 312 | min_child = left; 313 | if (right < heap->size && 314 | HEAP_LESS(heap, heap->harr[right], heap->harr[left])) 315 | min_child = right; 316 | 317 | if (left >= heap->size || 318 | HEAP_LESS(heap, 319 | heap->harr[curr_pos], 320 | heap->harr[min_child]) ) 321 | return; 322 | 323 | node = heap->harr[curr_pos]; 324 | heap->harr[curr_pos] = heap->harr[min_child]; 325 | heap->harr[min_child] = node; 326 | HEAP(update_link)(heap, curr_pos); 327 | HEAP(update_link)(heap, min_child); 328 | 329 | curr_pos = min_child; 330 | } 331 | } 332 | 333 | /** 334 | * Increase capacity. 335 | */ 336 | static inline int 337 | HEAP(reserve)(heap_t *heap) 338 | { 339 | heap_off_t capacity = heap->capacity == 0 ? HEAP_INITIAL_CAPACITY : 340 | heap->capacity << 1; 341 | void *harr = realloc(heap->harr, sizeof(struct heap_node *) * capacity); 342 | if (harr == NULL) 343 | return -1; 344 | heap->harr = harr; 345 | heap->capacity = capacity; 346 | return 0; 347 | } 348 | 349 | /** 350 | * Insert value. 351 | */ 352 | static inline int 353 | HEAP(insert)(heap_t *heap, struct heap_node *node) 354 | { 355 | (void) heap; 356 | assert(heap); 357 | 358 | if (heap->size + 1 > heap->capacity) { 359 | if (HEAP(reserve)(heap)) 360 | return -1; 361 | } 362 | 363 | heap->harr[heap->size] = node; 364 | HEAP(update_link)(heap, heap->size++); 365 | HEAP(sift_up)(heap, node); /* heapify */ 366 | 367 | return 0; 368 | } 369 | 370 | /** 371 | * Return min value without removing it from heap. 372 | * If heap is empty, return NULL. 373 | */ 374 | static inline struct heap_node * 375 | HEAP(top)(heap_t *heap) 376 | { 377 | if (heap->size == 0) 378 | return NULL; 379 | return heap->harr[0]; 380 | } 381 | 382 | /** 383 | * Erase min value. Returns delete value. 384 | */ 385 | static inline struct heap_node * 386 | HEAP(pop)(heap_t *heap) 387 | { 388 | if (heap->size == 0) 389 | return NULL; 390 | 391 | struct heap_node *res = heap->harr[0]; 392 | HEAP(delete)(heap, heap->harr[0]); 393 | return res; 394 | } 395 | 396 | /* 397 | * Delete node from heap. 398 | */ 399 | static inline void 400 | HEAP(delete)(heap_t *heap, struct heap_node *value_node) 401 | { 402 | if (heap->size == 0) 403 | return; 404 | 405 | heap->size--; 406 | 407 | heap_off_t curr_pos = value_node->pos; 408 | 409 | if (curr_pos == heap->size) 410 | return; 411 | 412 | heap->harr[curr_pos] = heap->harr[heap->size]; 413 | HEAP(update_link)(heap, curr_pos); 414 | HEAP(update)(heap, heap->harr[curr_pos]); 415 | } 416 | 417 | /** 418 | * Heapify tree after update of value under value_node pointer. 419 | */ 420 | static inline void 421 | HEAP(update)(heap_t *heap, struct heap_node *value_node) 422 | { 423 | /* heapify */ 424 | HEAP(sift_down)(heap, value_node); 425 | HEAP(sift_up)(heap, value_node); 426 | } 427 | 428 | /** 429 | * Heapify tree after updating all values. 430 | */ 431 | static inline void 432 | HEAP(update_all)(heap_t *heap) 433 | { 434 | if (heap->size <= 1) 435 | return; 436 | 437 | /* Find the parent of the last element. */ 438 | heap_off_t curr_pos = (heap->size - 2) / 2; 439 | 440 | do { 441 | HEAP(sift_down)(heap, heap->harr[curr_pos]); 442 | } while (curr_pos-- > 0); 443 | } 444 | 445 | /** 446 | * Heap iterator init. 447 | */ 448 | static inline void 449 | HEAP(iterator_init)(heap_t *heap, struct heap_iterator *it) 450 | { 451 | it->curr_pos = 0; 452 | it->heap = heap; 453 | } 454 | 455 | /** 456 | * Heap iterator next. 457 | */ 458 | static inline struct heap_node * 459 | HEAP(iterator_next)(struct heap_iterator *it) 460 | { 461 | if (it->curr_pos == it->heap->size) 462 | return NULL; 463 | return it->heap->harr[it->curr_pos++]; 464 | } 465 | 466 | /** 467 | * Check that heap inveriants is holded. 468 | */ 469 | static inline int 470 | HEAP(check)(heap_t *heap) 471 | { 472 | heap_off_t left, right, min_child; 473 | for (heap_off_t curr_pos = 0; 474 | 2 * curr_pos + 1 < heap->size; 475 | ++curr_pos) { 476 | 477 | left = 2 * curr_pos + 1; 478 | right = 2 * curr_pos + 2; 479 | min_child = left; 480 | if (right < heap->size && 481 | HEAP_LESS(heap, heap->harr[right], heap->harr[left])) 482 | min_child = right; 483 | 484 | if (HEAP_LESS(heap, 485 | heap->harr[min_child], 486 | heap->harr[curr_pos])) 487 | return -1; 488 | } 489 | 490 | return 0; 491 | } 492 | 493 | #endif /* HEAP_FORWARD_DECLARATION */ 494 | 495 | #undef HEAP_FORWARD_DECLARATION 496 | -------------------------------------------------------------------------------- /shard/ibuf.h: -------------------------------------------------------------------------------- 1 | #ifndef TARANTOOL_SMALL_IBUF_H_INCLUDED 2 | #define TARANTOOL_SMALL_IBUF_H_INCLUDED 3 | /* 4 | * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. 5 | * 6 | * Redistribution and use in source and binary forms, with or 7 | * without modification, are permitted provided that the following 8 | * conditions are met: 9 | * 10 | * 1. Redistributions of source code must retain the above 11 | * copyright notice, this list of conditions and the 12 | * following disclaimer. 13 | * 14 | * 2. Redistributions in binary form must reproduce the above 15 | * copyright notice, this list of conditions and the following 16 | * disclaimer in the documentation and/or other materials 17 | * provided with the distribution. 18 | * 19 | * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND 20 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 23 | * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 24 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 27 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 28 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF 30 | * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 | * SUCH DAMAGE. 32 | */ 33 | #include 34 | #include 35 | 36 | #if defined(__cplusplus) 37 | extern "C" { 38 | #endif /* defined(__cplusplus) */ 39 | 40 | /** @module Input buffer. */ 41 | 42 | struct slab_cache; 43 | 44 | /* 45 | * Continuous piece of memory to store input. 46 | * Allocated in factors of 'start_capacity'. 47 | * Maintains position of the data "to be processed". 48 | * 49 | * Typical use case: 50 | * 51 | * struct ibuf *in; 52 | * coio_bread(coio, in, request_len); 53 | * if (ibuf_size(in) >= request_len) { 54 | * process_request(in->rpos, request_len); 55 | * in->rpos += request_len; 56 | * } 57 | */ 58 | struct ibuf 59 | { 60 | struct slab_cache *slabc; 61 | char *buf; 62 | /** Start of input. */ 63 | char *rpos; 64 | /** End of useful input */ 65 | char *wpos; 66 | /** End of buffer. */ 67 | char *end; 68 | size_t start_capacity; 69 | }; 70 | 71 | void 72 | ibuf_create(struct ibuf *ibuf, struct slab_cache *slabc, size_t start_capacity); 73 | 74 | void 75 | ibuf_destroy(struct ibuf *ibuf); 76 | 77 | void 78 | ibuf_reinit(struct ibuf *ibuf); 79 | 80 | /** How much data is read and is not parsed yet. */ 81 | static inline size_t 82 | ibuf_used(struct ibuf *ibuf) 83 | { 84 | assert(ibuf->wpos >= ibuf->rpos); 85 | return ibuf->wpos - ibuf->rpos; 86 | } 87 | 88 | /** How much data can we fit beyond buf->wpos */ 89 | static inline size_t 90 | ibuf_unused(struct ibuf *ibuf) 91 | { 92 | assert(ibuf->wpos <= ibuf->end); 93 | return ibuf->end - ibuf->wpos; 94 | } 95 | 96 | /** How much memory is allocated */ 97 | static inline size_t 98 | ibuf_capacity(struct ibuf *ibuf) 99 | { 100 | return ibuf->end - ibuf->buf; 101 | } 102 | 103 | /** 104 | * Integer value of the position in the buffer - stable 105 | * in case of realloc. 106 | */ 107 | static inline size_t 108 | ibuf_pos(struct ibuf *ibuf) 109 | { 110 | assert(ibuf->buf <= ibuf->rpos); 111 | return ibuf->rpos - ibuf->buf; 112 | } 113 | 114 | /** Forget all cached input. */ 115 | static inline void 116 | ibuf_reset(struct ibuf *ibuf) 117 | { 118 | ibuf->rpos = ibuf->wpos = ibuf->buf; 119 | } 120 | 121 | void * 122 | ibuf_reserve_slow(struct ibuf *ibuf, size_t size); 123 | 124 | static inline void * 125 | ibuf_reserve(struct ibuf *ibuf, size_t size) 126 | { 127 | if (ibuf->wpos + size <= ibuf->end) 128 | return ibuf->wpos; 129 | return ibuf_reserve_slow(ibuf, size); 130 | } 131 | 132 | static inline void * 133 | ibuf_alloc(struct ibuf *ibuf, size_t size) 134 | { 135 | void *ptr; 136 | if (ibuf->wpos + size <= ibuf->end) 137 | ptr = ibuf->wpos; 138 | else { 139 | ptr = ibuf_reserve_slow(ibuf, size); 140 | if (ptr == NULL) 141 | return NULL; 142 | } 143 | ibuf->wpos += size; 144 | return ptr; 145 | } 146 | 147 | static inline void * 148 | ibuf_reserve_cb(void *ctx, size_t *size) 149 | { 150 | struct ibuf *buf = (struct ibuf *) ctx; 151 | void *p = ibuf_reserve(buf, *size ? *size : buf->start_capacity); 152 | *size = ibuf_unused(buf); 153 | return p; 154 | } 155 | 156 | static inline void * 157 | ibuf_alloc_cb(void *ctx, size_t size) 158 | { 159 | return ibuf_alloc((struct ibuf *) ctx, size); 160 | } 161 | 162 | #if defined(__cplusplus) 163 | } /* extern "C" */ 164 | 165 | #include "exception.h" 166 | 167 | /** Reserve space for sz bytes in the input buffer. */ 168 | static inline void * 169 | ibuf_reserve_xc(struct ibuf *ibuf, size_t size) 170 | { 171 | void *ptr = ibuf_reserve(ibuf, size); 172 | if (ptr == NULL) 173 | tnt_raise(OutOfMemory, size, "ibuf", "reserve"); 174 | return ptr; 175 | } 176 | 177 | static inline void * 178 | ibuf_alloc_xc(struct ibuf *ibuf, size_t size) 179 | { 180 | void *ptr = ibuf_alloc(ibuf, size); 181 | if (ptr == NULL) 182 | tnt_raise(OutOfMemory, size, "ibuf", "alloc"); 183 | return ptr; 184 | } 185 | 186 | static inline void * 187 | ibuf_reserve_xc_cb(void *ctx, size_t *size) 188 | { 189 | void *ptr = ibuf_reserve_cb(ctx, size); 190 | if (ptr == NULL) 191 | tnt_raise(OutOfMemory, *size, "ibuf", "reserve"); 192 | return ptr; 193 | } 194 | 195 | static inline void * 196 | ibuf_alloc_xc_cb(void *ctx, size_t size) 197 | { 198 | void *ptr = ibuf_alloc_cb(ctx, size); 199 | if (ptr == NULL) 200 | tnt_raise(OutOfMemory, size, "ibuf", "alloc"); 201 | return ptr; 202 | } 203 | 204 | #endif /* defined(__cplusplus) */ 205 | 206 | #endif /* TARANTOOL_SMALL_IBUF_H_INCLUDED */ 207 | -------------------------------------------------------------------------------- /test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail # Strict shell 4 | 5 | curl -s https://packagecloud.io/install/repositories/tarantool/1_7/script.deb.sh | sudo bash 6 | sudo apt-get update > /dev/null 7 | sudo apt-get -q -y install tarantool tarantool-dev libmsgpuck-dev 8 | 9 | git submodule update --init --recursive 10 | pip install -r test-run/requirements.txt 11 | pip install git+https://github.com/tarantool/tarantool-python.git 12 | sudo chown $USER:$USER /var/lib/tarantool/ 13 | cmake . 14 | make test 15 | -------------------------------------------------------------------------------- /test/.tarantoolctl: -------------------------------------------------------------------------------- 1 | -- Options for test-run tarantoolctl 2 | local ver = string.sub(require('tarantool').version, 1,3) 3 | local storage = 'sophia_dir' 4 | if ver ~= '1.6' then 5 | storage = 'vinyl_dir' 6 | end 7 | 8 | local workdir = os.getenv('TEST_WORKDIR') 9 | default_cfg = { 10 | pid_file = workdir, 11 | wal_dir = workdir, 12 | snap_dir = workdir, 13 | logger = workdir, 14 | background = false, 15 | } 16 | default_cfg[storage] = workdir 17 | 18 | instance_dir = workdir 19 | 20 | -- vim: set ft=lua : 21 | -------------------------------------------------------------------------------- /test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | if(POLICY CMP0037) 2 | cmake_policy(SET CMP0037 OLD) 3 | endif(POLICY CMP0037) 4 | 5 | add_custom_target(test 6 | COMMAND ${PROJECT_SOURCE_DIR}/test/test-run.py -j -1 7 | --builddir=${PROJECT_BINARY_DIR} 8 | --vardir=${PROJECT_BINARY_DIR}/test/var) 9 | -------------------------------------------------------------------------------- /test/box/proxy.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | os = require('os') 3 | 4 | box.cfg{ 5 | listen = os.getenv("LISTEN"), 6 | slab_alloc_arena = 0.1, 7 | pid_file = "tarantool.pid", 8 | rows_per_wal = 50 9 | } 10 | 11 | require('console').listen(os.getenv('ADMIN')) 12 | -------------------------------------------------------------------------------- /test/join/join1.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '0' }; 10 | { uri = 'localhost:33132', zone = '0' }; 11 | { uri = 'localhost:33133', zone = '1' }; 12 | { uri = 'localhost:33134', zone = '1' }; 13 | { uri = 'localhost:33135', zone = '1' }; 14 | }; 15 | login = 'tester'; 16 | password = 'pass'; 17 | monitor = false; 18 | redundancy = 2; 19 | replication = true; 20 | binary = 33131; 21 | } 22 | 23 | box.cfg { 24 | slab_alloc_arena = 0.1; 25 | listen = cfg.binary; 26 | custom_proc_title = "joined_replica"; 27 | replication_source="localhost:33134"; 28 | } 29 | 30 | require('console').listen(os.getenv('ADMIN')) 31 | 32 | -- init shards 33 | fiber.create(function() 34 | shard.init(cfg) 35 | end) 36 | 37 | -------------------------------------------------------------------------------- /test/join/join2.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '0' }; 10 | { uri = 'localhost:33132', zone = '0' }; 11 | { uri = 'localhost:33133', zone = '1' }; 12 | { uri = 'localhost:33134', zone = '1' }; 13 | { uri = 'localhost:33135', zone = '1' }; 14 | }; 15 | login = 'tester'; 16 | password = 'pass'; 17 | redundancy = 2; 18 | replication = true; 19 | binary = 33132; 20 | } 21 | 22 | box.cfg { 23 | slab_alloc_arena = 0.1; 24 | listen = cfg.binary; 25 | custom_proc_title = "joined_replica"; 26 | replication_source="localhost:33135"; 27 | } 28 | 29 | require('console').listen(os.getenv('ADMIN')) 30 | 31 | -- init shards 32 | fiber.create(function() 33 | shard.init(cfg) 34 | end) 35 | 36 | -------------------------------------------------------------------------------- /test/join/master.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '0' }; 10 | { uri = 'localhost:33132', zone = '0' }; 11 | { uri = 'localhost:33133', zone = '1' }; 12 | { uri = 'localhost:33134', zone = '1' }; 13 | { uri = 'localhost:33135', zone = '1' }; 14 | }; 15 | login = 'tester'; 16 | password = 'pass'; 17 | monitor = false; 18 | redundancy = 2; 19 | replication = true; 20 | binary = 33130; 21 | } 22 | 23 | box.cfg { 24 | slab_alloc_arena = 0.1; 25 | listen = cfg.binary; 26 | custom_proc_title = "master" 27 | } 28 | 29 | function cluster(operation) 30 | for i=1, #cfg.servers - 1 do 31 | operation(tostring(i)) 32 | end 33 | end 34 | 35 | require('console').listen(os.getenv('ADMIN')) 36 | 37 | if not box.space.demo then 38 | box.schema.user.create(cfg.login, { password = cfg.password }) 39 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 40 | box.schema.user.grant('guest', 'read,write,execute', 'universe') 41 | 42 | local demo = box.schema.create_space('demo') 43 | demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 44 | end 45 | 46 | -- init shards 47 | fiber.create(function() 48 | shard.init(cfg) 49 | end) 50 | 51 | -------------------------------------------------------------------------------- /test/join/master1.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '0' }; 10 | { uri = 'localhost:33132', zone = '0' }; 11 | { uri = 'localhost:33133', zone = '1' }; 12 | { uri = 'localhost:33134', zone = '1' }; 13 | { uri = 'localhost:33135', zone = '1' }; 14 | }; 15 | login = 'tester'; 16 | password = 'pass'; 17 | monitor = false; 18 | redundancy = 2; 19 | replication = true; 20 | binary = 33131; 21 | } 22 | 23 | box.cfg { 24 | slab_alloc_arena = 0.1; 25 | listen = cfg.binary; 26 | custom_proc_title = "master" 27 | } 28 | 29 | require('console').listen(os.getenv('ADMIN')) 30 | 31 | if not box.space.demo then 32 | box.schema.user.create(cfg.login, { password = cfg.password }) 33 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 34 | box.schema.user.grant('guest', 'read,write,execute', 'universe') 35 | 36 | local demo = box.schema.create_space('demo') 37 | demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 38 | end 39 | 40 | -- init shards 41 | fiber.create(function() 42 | shard.init(cfg) 43 | end) 44 | 45 | -------------------------------------------------------------------------------- /test/join/master2.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '0' }; 10 | { uri = 'localhost:33132', zone = '0' }; 11 | { uri = 'localhost:33133', zone = '1' }; 12 | { uri = 'localhost:33134', zone = '1' }; 13 | { uri = 'localhost:33135', zone = '1' }; 14 | }; 15 | login = 'tester'; 16 | password = 'pass'; 17 | redundancy = 2; 18 | monitor = false; 19 | replication = true; 20 | binary = 33132; 21 | } 22 | 23 | box.cfg { 24 | slab_alloc_arena = 0.1; 25 | listen = cfg.binary; 26 | custom_proc_title = "master" 27 | } 28 | 29 | require('console').listen(os.getenv('ADMIN')) 30 | 31 | if not box.space.demo then 32 | box.schema.user.create(cfg.login, { password = cfg.password }) 33 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 34 | box.schema.user.grant('guest', 'read,write,execute', 'universe') 35 | 36 | local demo = box.schema.create_space('demo') 37 | demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 38 | end 39 | 40 | -- init shards 41 | fiber.create(function() 42 | shard.init(cfg) 43 | end) 44 | 45 | -------------------------------------------------------------------------------- /test/join/master3.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '0' }; 10 | { uri = 'localhost:33132', zone = '0' }; 11 | { uri = 'localhost:33133', zone = '1' }; 12 | { uri = 'localhost:33134', zone = '1' }; 13 | { uri = 'localhost:33135', zone = '1' }; 14 | }; 15 | login = 'tester'; 16 | password = 'pass'; 17 | redundancy = 2; 18 | monitor = false; 19 | replication = true; 20 | binary = 33133; 21 | } 22 | 23 | box.cfg { 24 | slab_alloc_arena = 0.1; 25 | listen = cfg.binary; 26 | custom_proc_title = "replica"; 27 | replication_source="localhost:33130"; 28 | } 29 | 30 | require('console').listen(os.getenv('ADMIN')) 31 | 32 | -- if not box.space.demo then 33 | -- box.schema.user.create(cfg.login, { password = cfg.password }) 34 | -- box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 35 | -- 36 | -- local demo = box.schema.create_space('demo') 37 | -- demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 38 | --end 39 | 40 | -- init shards 41 | fiber.create(function() 42 | shard.init(cfg) 43 | end) 44 | 45 | -------------------------------------------------------------------------------- /test/join/master4.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '0' }; 10 | { uri = 'localhost:33132', zone = '0' }; 11 | { uri = 'localhost:33133', zone = '1' }; 12 | { uri = 'localhost:33134', zone = '1' }; 13 | { uri = 'localhost:33135', zone = '1' }; 14 | }; 15 | login = 'tester'; 16 | password = 'pass'; 17 | monitor = false; 18 | redundancy = 2; 19 | replication = true; 20 | binary = 33134; 21 | } 22 | 23 | box.cfg { 24 | slab_alloc_arena = 0.1; 25 | listen = cfg.binary; 26 | custom_proc_title = "replica"; 27 | replication_source="localhost:33131"; 28 | } 29 | 30 | require('console').listen(os.getenv('ADMIN')) 31 | 32 | -- init shards 33 | fiber.create(function() 34 | shard.init(cfg) 35 | end) 36 | 37 | -------------------------------------------------------------------------------- /test/join/master5.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '0' }; 10 | { uri = 'localhost:33132', zone = '0' }; 11 | { uri = 'localhost:33133', zone = '1' }; 12 | { uri = 'localhost:33134', zone = '1' }; 13 | { uri = 'localhost:33135', zone = '1' }; 14 | }; 15 | login = 'tester'; 16 | password = 'pass'; 17 | redundancy = 2; 18 | monitor = false; 19 | replication = true; 20 | binary = 33135; 21 | } 22 | 23 | box.cfg { 24 | slab_alloc_arena = 0.1; 25 | listen = cfg.binary; 26 | custom_proc_title = "replica"; 27 | replication_source = "localhost:33132" 28 | } 29 | 30 | require('console').listen(os.getenv('ADMIN')) 31 | 32 | -- init shards 33 | fiber.create(function() 34 | shard.init(cfg) 35 | end) 36 | 37 | -------------------------------------------------------------------------------- /test/join/multi_node.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("setopt delimiter ';'") 8 | --- 9 | - true 10 | ... 11 | -- start shards 12 | cluster(function(id) 13 | test_run:cmd("create server master"..id.." with script='join/master"..id..".lua'") 14 | test_run:cmd("start server master"..id) 15 | end); 16 | --- 17 | ... 18 | test_run:cmd("setopt delimiter ''"); 19 | --- 20 | - true 21 | ... 22 | shard.wait_connection() 23 | --- 24 | ... 25 | for i=1, 10 do shard.demo:insert{i, 'test'} end 26 | --- 27 | ... 28 | -- check data 29 | box.space.demo:select() 30 | --- 31 | - - [1, 'test'] 32 | - [2, 'test'] 33 | - [7, 'test'] 34 | - [8, 'test'] 35 | ... 36 | test_run:cmd("switch master3") 37 | --- 38 | - true 39 | ... 40 | box.space.demo:select() 41 | --- 42 | - - [1, 'test'] 43 | - [2, 'test'] 44 | - [7, 'test'] 45 | - [8, 'test'] 46 | ... 47 | test_run:cmd("switch master1") 48 | --- 49 | - true 50 | ... 51 | box.space.demo:select() 52 | --- 53 | - - [4, 'test'] 54 | - [5, 'test'] 55 | ... 56 | test_run:cmd("switch master4") 57 | --- 58 | - true 59 | ... 60 | box.space.demo:select() 61 | --- 62 | - - [4, 'test'] 63 | - [5, 'test'] 64 | ... 65 | test_run:cmd("switch master2") 66 | --- 67 | - true 68 | ... 69 | box.space.demo:select() 70 | --- 71 | - - [3, 'test'] 72 | - [6, 'test'] 73 | - [9, 'test'] 74 | - [10, 'test'] 75 | ... 76 | test_run:cmd("switch master5") 77 | --- 78 | - true 79 | ... 80 | box.space.demo:select() 81 | --- 82 | - - [3, 'test'] 83 | - [6, 'test'] 84 | - [9, 'test'] 85 | - [10, 'test'] 86 | ... 87 | test_run:cmd("switch default") 88 | --- 89 | - true 90 | ... 91 | -- stop replica 92 | test_run:cmd("stop server master1") 93 | --- 94 | - true 95 | ... 96 | test_run:cmd("stop server master2") 97 | --- 98 | - true 99 | ... 100 | -- add tuples 101 | for i=11, 20 do shard.demo:insert{i, 'join_test'} end 102 | --- 103 | ... 104 | -- join replica 105 | test_run:cmd("create server join1 with script='join/join1.lua'") 106 | --- 107 | - true 108 | ... 109 | test_run:cmd("start server join1") 110 | --- 111 | - true 112 | ... 113 | test_run:cmd("create server join2 with script='join/join2.lua'") 114 | --- 115 | - true 116 | ... 117 | test_run:cmd("start server join2") 118 | --- 119 | - true 120 | ... 121 | status = shard_status() 122 | --- 123 | ... 124 | status 125 | --- 126 | - maintenance: [] 127 | offline: 128 | - uri: localhost:33131 129 | id: 2 130 | - uri: localhost:33132 131 | id: 3 132 | online: 133 | - uri: localhost:33133 134 | id: 4 135 | - uri: localhost:33130 136 | id: 1 137 | - uri: localhost:33134 138 | id: 5 139 | - uri: localhost:33135 140 | id: 6 141 | ... 142 | _ = remote_join(status.offline[1].id) 143 | --- 144 | ... 145 | _ = remote_join(status.offline[2].id) 146 | --- 147 | ... 148 | shard_status() 149 | --- 150 | - maintenance: [] 151 | offline: [] 152 | online: 153 | - uri: localhost:33133 154 | id: 4 155 | - uri: localhost:33130 156 | id: 1 157 | - uri: localhost:33131 158 | id: 2 159 | - uri: localhost:33134 160 | id: 5 161 | - uri: localhost:33132 162 | id: 3 163 | - uri: localhost:33135 164 | id: 6 165 | ... 166 | -- check joined replica 167 | box.space.demo:select() 168 | --- 169 | - - [1, 'test'] 170 | - [2, 'test'] 171 | - [7, 'test'] 172 | - [8, 'test'] 173 | - [13, 'join_test'] 174 | - [14, 'join_test'] 175 | - [15, 'join_test'] 176 | - [20, 'join_test'] 177 | ... 178 | test_run:cmd("switch master3") 179 | --- 180 | - true 181 | ... 182 | box.space.demo:select() 183 | --- 184 | - - [1, 'test'] 185 | - [2, 'test'] 186 | - [7, 'test'] 187 | - [8, 'test'] 188 | - [13, 'join_test'] 189 | - [14, 'join_test'] 190 | - [15, 'join_test'] 191 | - [20, 'join_test'] 192 | ... 193 | test_run:cmd("switch master4") 194 | --- 195 | - true 196 | ... 197 | box.space.demo:select() 198 | --- 199 | - - [4, 'test'] 200 | - [5, 'test'] 201 | - [12, 'join_test'] 202 | - [17, 'join_test'] 203 | ... 204 | test_run:cmd("switch join1") 205 | --- 206 | - true 207 | ... 208 | box.space.demo:select() 209 | --- 210 | - - [4, 'test'] 211 | - [5, 'test'] 212 | - [12, 'join_test'] 213 | - [17, 'join_test'] 214 | ... 215 | test_run:cmd("switch master5") 216 | --- 217 | - true 218 | ... 219 | box.space.demo:select() 220 | --- 221 | - - [3, 'test'] 222 | - [6, 'test'] 223 | - [9, 'test'] 224 | - [10, 'test'] 225 | - [11, 'join_test'] 226 | - [16, 'join_test'] 227 | - [18, 'join_test'] 228 | - [19, 'join_test'] 229 | ... 230 | test_run:cmd("switch join2") 231 | --- 232 | - true 233 | ... 234 | box.space.demo:select() 235 | --- 236 | - - [3, 'test'] 237 | - [6, 'test'] 238 | - [9, 'test'] 239 | - [10, 'test'] 240 | - [11, 'join_test'] 241 | - [16, 'join_test'] 242 | - [18, 'join_test'] 243 | - [19, 'join_test'] 244 | ... 245 | test_run:cmd("switch default") 246 | --- 247 | - true 248 | ... 249 | -- cleanup 250 | test_run:cmd("setopt delimiter ';'") 251 | --- 252 | - true 253 | ... 254 | cluster(function(id) 255 | if id ~= '1' and id ~= '2' then 256 | _ = test_run:cmd("stop server master"..id) 257 | end 258 | test_run:cmd("cleanup server master"..id) 259 | end); 260 | --- 261 | ... 262 | _ = test_run:cmd("stop server join1") 263 | test_run:cmd("cleanup server join1") 264 | _ = test_run:cmd("stop server join2") 265 | test_run:cmd("cleanup server join2") 266 | test_run:cmd("setopt delimiter ''"); 267 | --- 268 | ... 269 | test_run:cmd("restart server default with cleanup=1") 270 | -------------------------------------------------------------------------------- /test/join/multi_node.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | 4 | test_run:cmd("setopt delimiter ';'") 5 | -- start shards 6 | cluster(function(id) 7 | test_run:cmd("create server master"..id.." with script='join/master"..id..".lua'") 8 | test_run:cmd("start server master"..id) 9 | end); 10 | test_run:cmd("setopt delimiter ''"); 11 | shard.wait_connection() 12 | 13 | for i=1, 10 do shard.demo:insert{i, 'test'} end 14 | 15 | -- check data 16 | box.space.demo:select() 17 | test_run:cmd("switch master3") 18 | box.space.demo:select() 19 | 20 | test_run:cmd("switch master1") 21 | box.space.demo:select() 22 | test_run:cmd("switch master4") 23 | box.space.demo:select() 24 | 25 | test_run:cmd("switch master2") 26 | box.space.demo:select() 27 | test_run:cmd("switch master5") 28 | box.space.demo:select() 29 | test_run:cmd("switch default") 30 | 31 | -- stop replica 32 | test_run:cmd("stop server master1") 33 | test_run:cmd("stop server master2") 34 | -- add tuples 35 | for i=11, 20 do shard.demo:insert{i, 'join_test'} end 36 | 37 | -- join replica 38 | test_run:cmd("create server join1 with script='join/join1.lua'") 39 | test_run:cmd("start server join1") 40 | test_run:cmd("create server join2 with script='join/join2.lua'") 41 | test_run:cmd("start server join2") 42 | 43 | status = shard_status() 44 | status 45 | _ = remote_join(status.offline[1].id) 46 | _ = remote_join(status.offline[2].id) 47 | shard_status() 48 | 49 | -- check joined replica 50 | box.space.demo:select() 51 | test_run:cmd("switch master3") 52 | box.space.demo:select() 53 | 54 | test_run:cmd("switch master4") 55 | box.space.demo:select() 56 | test_run:cmd("switch join1") 57 | box.space.demo:select() 58 | 59 | test_run:cmd("switch master5") 60 | box.space.demo:select() 61 | test_run:cmd("switch join2") 62 | box.space.demo:select() 63 | test_run:cmd("switch default") 64 | 65 | -- cleanup 66 | test_run:cmd("setopt delimiter ';'") 67 | cluster(function(id) 68 | if id ~= '1' and id ~= '2' then 69 | _ = test_run:cmd("stop server master"..id) 70 | end 71 | test_run:cmd("cleanup server master"..id) 72 | end); 73 | 74 | _ = test_run:cmd("stop server join1") 75 | test_run:cmd("cleanup server join1") 76 | _ = test_run:cmd("stop server join2") 77 | test_run:cmd("cleanup server join2") 78 | test_run:cmd("setopt delimiter ''"); 79 | test_run:cmd("restart server default with cleanup=1") 80 | -------------------------------------------------------------------------------- /test/join/multi_pair.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("setopt delimiter ';'") 8 | --- 9 | - true 10 | ... 11 | -- start shards 12 | cluster(function(id) 13 | test_run:cmd("create server master"..id.." with script='join/master"..id..".lua'") 14 | test_run:cmd("start server master"..id) 15 | end); 16 | --- 17 | ... 18 | test_run:cmd("setopt delimiter ''"); 19 | --- 20 | - true 21 | ... 22 | shard.wait_connection() 23 | --- 24 | ... 25 | for i=1, 10 do shard.demo:insert{i, 'test'} end 26 | --- 27 | ... 28 | -- check data 29 | box.space.demo:select() 30 | --- 31 | - - [1, 'test'] 32 | - [2, 'test'] 33 | - [7, 'test'] 34 | - [8, 'test'] 35 | ... 36 | test_run:cmd("switch master3") 37 | --- 38 | - true 39 | ... 40 | box.space.demo:select() 41 | --- 42 | - - [1, 'test'] 43 | - [2, 'test'] 44 | - [7, 'test'] 45 | - [8, 'test'] 46 | ... 47 | test_run:cmd("switch master1") 48 | --- 49 | - true 50 | ... 51 | box.space.demo:select() 52 | --- 53 | - - [4, 'test'] 54 | - [5, 'test'] 55 | ... 56 | test_run:cmd("switch master4") 57 | --- 58 | - true 59 | ... 60 | box.space.demo:select() 61 | --- 62 | - - [4, 'test'] 63 | - [5, 'test'] 64 | ... 65 | test_run:cmd("switch master2") 66 | --- 67 | - true 68 | ... 69 | box.space.demo:select() 70 | --- 71 | - - [3, 'test'] 72 | - [6, 'test'] 73 | - [9, 'test'] 74 | - [10, 'test'] 75 | ... 76 | test_run:cmd("switch master5") 77 | --- 78 | - true 79 | ... 80 | box.space.demo:select() 81 | --- 82 | - - [3, 'test'] 83 | - [6, 'test'] 84 | - [9, 'test'] 85 | - [10, 'test'] 86 | ... 87 | test_run:cmd("switch default") 88 | --- 89 | - true 90 | ... 91 | -- stop 2 and 3 pairs 92 | test_run:cmd("stop server master1") 93 | --- 94 | - true 95 | ... 96 | test_run:cmd("stop server master4") 97 | --- 98 | - true 99 | ... 100 | test_run:cmd("stop server master2") 101 | --- 102 | - true 103 | ... 104 | test_run:cmd("stop server master5") 105 | --- 106 | - true 107 | ... 108 | status = shard_status() 109 | --- 110 | ... 111 | _ = remote_unjoin(status.offline[1].id) 112 | --- 113 | ... 114 | _ = remote_unjoin(status.offline[2].id) 115 | --- 116 | ... 117 | _ = remote_unjoin(status.offline[3].id) 118 | --- 119 | ... 120 | _ = remote_unjoin(status.offline[4].id) 121 | --- 122 | ... 123 | status = shard_status() 124 | --- 125 | ... 126 | status 127 | --- 128 | - maintenance: 129 | - null 130 | - true 131 | - true 132 | - null 133 | - true 134 | - true 135 | offline: 136 | - uri: localhost:33134 137 | id: 5 138 | - uri: localhost:33131 139 | id: 2 140 | - uri: localhost:33135 141 | id: 6 142 | - uri: localhost:33132 143 | id: 3 144 | online: 145 | - uri: localhost:33133 146 | id: 4 147 | - uri: localhost:33130 148 | id: 1 149 | ... 150 | -- add tuples 151 | result = shard.demo:insert{12, 'test_pair'} 152 | --- 153 | ... 154 | result 155 | --- 156 | - [] 157 | ... 158 | result = shard.demo:insert{19, 'test_pair'} 159 | --- 160 | ... 161 | result 162 | --- 163 | - [] 164 | ... 165 | -- start servers 166 | test_run:cmd("start server master1") 167 | --- 168 | - true 169 | ... 170 | test_run:cmd("start server master4") 171 | --- 172 | - true 173 | ... 174 | test_run:cmd("start server master2") 175 | --- 176 | - true 177 | ... 178 | test_run:cmd("start server master5") 179 | --- 180 | - true 181 | ... 182 | _ = remote_join(status.offline[2].id) 183 | --- 184 | ... 185 | _ = remote_join(status.offline[1].id) 186 | --- 187 | ... 188 | _ = remote_join(status.offline[4].id) 189 | --- 190 | ... 191 | _ = remote_join(status.offline[3].id) 192 | --- 193 | ... 194 | shard_status() 195 | --- 196 | - maintenance: [] 197 | offline: [] 198 | online: 199 | - uri: localhost:33133 200 | id: 4 201 | - uri: localhost:33130 202 | id: 1 203 | - uri: localhost:33134 204 | id: 5 205 | - uri: localhost:33131 206 | id: 2 207 | - uri: localhost:33135 208 | id: 6 209 | - uri: localhost:33132 210 | id: 3 211 | ... 212 | shard.demo:insert{12, 'test_pair'} 213 | --- 214 | - - [12, 'test_pair'] 215 | ... 216 | shard.demo:insert{19, 'test_pair'} 217 | --- 218 | - - [19, 'test_pair'] 219 | ... 220 | -- check joined replica 221 | box.space.demo:select() 222 | --- 223 | - - [1, 'test'] 224 | - [2, 'test'] 225 | - [7, 'test'] 226 | - [8, 'test'] 227 | ... 228 | test_run:cmd("switch master3") 229 | --- 230 | - true 231 | ... 232 | box.space.demo:select() 233 | --- 234 | - - [1, 'test'] 235 | - [2, 'test'] 236 | - [7, 'test'] 237 | - [8, 'test'] 238 | ... 239 | test_run:cmd("switch master1") 240 | --- 241 | - true 242 | ... 243 | box.space.demo:select() 244 | --- 245 | - - [4, 'test'] 246 | - [5, 'test'] 247 | - [12, 'test_pair'] 248 | ... 249 | test_run:cmd("switch master4") 250 | --- 251 | - true 252 | ... 253 | box.space.demo:select() 254 | --- 255 | - - [4, 'test'] 256 | - [5, 'test'] 257 | - [12, 'test_pair'] 258 | ... 259 | test_run:cmd("switch master2") 260 | --- 261 | - true 262 | ... 263 | box.space.demo:select() 264 | --- 265 | - - [3, 'test'] 266 | - [6, 'test'] 267 | - [9, 'test'] 268 | - [10, 'test'] 269 | - [19, 'test_pair'] 270 | ... 271 | test_run:cmd("switch master5") 272 | --- 273 | - true 274 | ... 275 | box.space.demo:select() 276 | --- 277 | - - [3, 'test'] 278 | - [6, 'test'] 279 | - [9, 'test'] 280 | - [10, 'test'] 281 | - [19, 'test_pair'] 282 | ... 283 | test_run:cmd("switch default") 284 | --- 285 | - true 286 | ... 287 | -- cleanup 288 | test_run:cmd("setopt delimiter ';'") 289 | --- 290 | - true 291 | ... 292 | cluster(function(id) 293 | _ = test_run:cmd("stop server master"..id) 294 | test_run:cmd("cleanup server master"..id) 295 | end); 296 | --- 297 | ... 298 | test_run:cmd("setopt delimiter ''"); 299 | --- 300 | - true 301 | ... 302 | test_run:cmd("restart server default with cleanup=1") 303 | -------------------------------------------------------------------------------- /test/join/multi_pair.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | 4 | test_run:cmd("setopt delimiter ';'") 5 | -- start shards 6 | cluster(function(id) 7 | test_run:cmd("create server master"..id.." with script='join/master"..id..".lua'") 8 | test_run:cmd("start server master"..id) 9 | end); 10 | test_run:cmd("setopt delimiter ''"); 11 | shard.wait_connection() 12 | 13 | 14 | for i=1, 10 do shard.demo:insert{i, 'test'} end 15 | 16 | -- check data 17 | box.space.demo:select() 18 | test_run:cmd("switch master3") 19 | box.space.demo:select() 20 | 21 | test_run:cmd("switch master1") 22 | box.space.demo:select() 23 | test_run:cmd("switch master4") 24 | box.space.demo:select() 25 | 26 | test_run:cmd("switch master2") 27 | box.space.demo:select() 28 | test_run:cmd("switch master5") 29 | box.space.demo:select() 30 | test_run:cmd("switch default") 31 | 32 | -- stop 2 and 3 pairs 33 | test_run:cmd("stop server master1") 34 | test_run:cmd("stop server master4") 35 | test_run:cmd("stop server master2") 36 | test_run:cmd("stop server master5") 37 | status = shard_status() 38 | _ = remote_unjoin(status.offline[1].id) 39 | _ = remote_unjoin(status.offline[2].id) 40 | _ = remote_unjoin(status.offline[3].id) 41 | _ = remote_unjoin(status.offline[4].id) 42 | status = shard_status() 43 | status 44 | 45 | -- add tuples 46 | result = shard.demo:insert{12, 'test_pair'} 47 | result 48 | result = shard.demo:insert{19, 'test_pair'} 49 | result 50 | 51 | -- start servers 52 | test_run:cmd("start server master1") 53 | test_run:cmd("start server master4") 54 | test_run:cmd("start server master2") 55 | test_run:cmd("start server master5") 56 | 57 | _ = remote_join(status.offline[2].id) 58 | _ = remote_join(status.offline[1].id) 59 | _ = remote_join(status.offline[4].id) 60 | _ = remote_join(status.offline[3].id) 61 | shard_status() 62 | shard.demo:insert{12, 'test_pair'} 63 | shard.demo:insert{19, 'test_pair'} 64 | 65 | -- check joined replica 66 | box.space.demo:select() 67 | test_run:cmd("switch master3") 68 | box.space.demo:select() 69 | 70 | test_run:cmd("switch master1") 71 | box.space.demo:select() 72 | test_run:cmd("switch master4") 73 | box.space.demo:select() 74 | 75 | test_run:cmd("switch master2") 76 | box.space.demo:select() 77 | test_run:cmd("switch master5") 78 | box.space.demo:select() 79 | test_run:cmd("switch default") 80 | -- cleanup 81 | test_run:cmd("setopt delimiter ';'") 82 | cluster(function(id) 83 | _ = test_run:cmd("stop server master"..id) 84 | test_run:cmd("cleanup server master"..id) 85 | end); 86 | 87 | test_run:cmd("setopt delimiter ''"); 88 | test_run:cmd("restart server default with cleanup=1") 89 | -------------------------------------------------------------------------------- /test/join/nd_single.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | executed = false 8 | --- 9 | ... 10 | test_run:cmd("setopt delimiter ';'") 11 | --- 12 | - true 13 | ... 14 | -- start shards 15 | cluster(function(id) 16 | test_run:cmd("create server master"..id.." with script='join/master"..id..".lua'") 17 | test_run:cmd("start server master"..id) 18 | end); 19 | --- 20 | ... 21 | function on_join(self) 22 | if not executed then 23 | test_run:cmd('stop server master2') 24 | executed = true 25 | end 26 | end; 27 | --- 28 | ... 29 | test_run:cmd("setopt delimiter ''"); 30 | --- 31 | - true 32 | ... 33 | shard.wait_connection() 34 | --- 35 | ... 36 | _ = rawset(shard, 'on_action', on_join) 37 | --- 38 | ... 39 | for i=1, 10 do shard.demo:insert{i, 'test'} end 40 | --- 41 | ... 42 | -- check data 43 | box.space.demo:select() 44 | --- 45 | - - [1, 'test'] 46 | - [2, 'test'] 47 | - [7, 'test'] 48 | - [8, 'test'] 49 | ... 50 | test_run:cmd("switch master3") 51 | --- 52 | - true 53 | ... 54 | box.space.demo:select() 55 | --- 56 | - - [1, 'test'] 57 | - [2, 'test'] 58 | - [7, 'test'] 59 | - [8, 'test'] 60 | ... 61 | test_run:cmd("switch master1") 62 | --- 63 | - true 64 | ... 65 | box.space.demo:select() 66 | --- 67 | - - [4, 'test'] 68 | - [5, 'test'] 69 | ... 70 | test_run:cmd("switch master4") 71 | --- 72 | - true 73 | ... 74 | box.space.demo:select() 75 | --- 76 | - - [4, 'test'] 77 | - [5, 'test'] 78 | ... 79 | test_run:cmd("switch master2") 80 | --- 81 | - true 82 | ... 83 | box.space.demo:select() 84 | --- 85 | - - [3, 'test'] 86 | - [6, 'test'] 87 | - [9, 'test'] 88 | - [10, 'test'] 89 | ... 90 | test_run:cmd("switch master5") 91 | --- 92 | - true 93 | ... 94 | box.space.demo:select() 95 | --- 96 | - - [3, 'test'] 97 | - [6, 'test'] 98 | - [9, 'test'] 99 | - [10, 'test'] 100 | ... 101 | test_run:cmd("switch default") 102 | --- 103 | - true 104 | ... 105 | -- stop replica 106 | test_run:cmd("stop server master1") 107 | --- 108 | - true 109 | ... 110 | -- add tuples 111 | for i=11, 20 do shard.demo:insert{i, 'join_test'} end 112 | --- 113 | ... 114 | -- join replica 115 | test_run:cmd("create server join1 with script='join/join1.lua'") 116 | --- 117 | - true 118 | ... 119 | test_run:cmd("start server join1") 120 | --- 121 | - true 122 | ... 123 | status = shard_status() 124 | --- 125 | ... 126 | status 127 | --- 128 | - maintenance: [] 129 | offline: 130 | - uri: localhost:33131 131 | id: 2 132 | online: 133 | - uri: localhost:33133 134 | id: 4 135 | - uri: localhost:33130 136 | id: 1 137 | - uri: localhost:33134 138 | id: 5 139 | - uri: localhost:33135 140 | id: 6 141 | - uri: localhost:33132 142 | id: 3 143 | ... 144 | _ = remote_join(status.offline[1].id) 145 | --- 146 | ... 147 | shard_status() 148 | --- 149 | - maintenance: [] 150 | offline: 151 | - uri: localhost:33132 152 | id: 3 153 | online: 154 | - uri: localhost:33133 155 | id: 4 156 | - uri: localhost:33130 157 | id: 1 158 | - uri: localhost:33131 159 | id: 2 160 | - uri: localhost:33134 161 | id: 5 162 | - uri: localhost:33135 163 | id: 6 164 | ... 165 | -- check joined replica 166 | box.space.demo:select() 167 | --- 168 | - - [1, 'test'] 169 | - [2, 'test'] 170 | - [7, 'test'] 171 | - [8, 'test'] 172 | - [13, 'join_test'] 173 | - [14, 'join_test'] 174 | - [15, 'join_test'] 175 | - [20, 'join_test'] 176 | ... 177 | test_run:cmd("switch master3") 178 | --- 179 | - true 180 | ... 181 | box.space.demo:select() 182 | --- 183 | - - [1, 'test'] 184 | - [2, 'test'] 185 | - [7, 'test'] 186 | - [8, 'test'] 187 | - [13, 'join_test'] 188 | - [14, 'join_test'] 189 | - [15, 'join_test'] 190 | - [20, 'join_test'] 191 | ... 192 | test_run:cmd("switch master4") 193 | --- 194 | - true 195 | ... 196 | box.space.demo:select() 197 | --- 198 | - - [4, 'test'] 199 | - [5, 'test'] 200 | - [12, 'join_test'] 201 | - [17, 'join_test'] 202 | ... 203 | test_run:cmd("switch join1") 204 | --- 205 | - true 206 | ... 207 | box.space.demo:select() 208 | --- 209 | - - [4, 'test'] 210 | - [5, 'test'] 211 | - [12, 'join_test'] 212 | - [17, 'join_test'] 213 | ... 214 | test_run:cmd("switch master5") 215 | --- 216 | - true 217 | ... 218 | box.space.demo:select() 219 | --- 220 | - - [3, 'test'] 221 | - [6, 'test'] 222 | - [9, 'test'] 223 | - [10, 'test'] 224 | - [11, 'join_test'] 225 | - [16, 'join_test'] 226 | - [18, 'join_test'] 227 | - [19, 'join_test'] 228 | ... 229 | test_run:cmd("switch default") 230 | --- 231 | - true 232 | ... 233 | -- join one more replica 234 | test_run:cmd("create server join2 with script='join/join2.lua'") 235 | --- 236 | - true 237 | ... 238 | test_run:cmd("start server join2") 239 | --- 240 | - true 241 | ... 242 | status = shard_status() 243 | --- 244 | ... 245 | status 246 | --- 247 | - maintenance: [] 248 | offline: 249 | - uri: localhost:33132 250 | id: 3 251 | online: 252 | - uri: localhost:33133 253 | id: 4 254 | - uri: localhost:33130 255 | id: 1 256 | - uri: localhost:33131 257 | id: 2 258 | - uri: localhost:33134 259 | id: 5 260 | - uri: localhost:33135 261 | id: 6 262 | ... 263 | _ = remote_join(status.offline[1].id) 264 | --- 265 | ... 266 | shard_status() 267 | --- 268 | - maintenance: [] 269 | offline: [] 270 | online: 271 | - uri: localhost:33133 272 | id: 4 273 | - uri: localhost:33130 274 | id: 1 275 | - uri: localhost:33131 276 | id: 2 277 | - uri: localhost:33134 278 | id: 5 279 | - uri: localhost:33132 280 | id: 3 281 | - uri: localhost:33135 282 | id: 6 283 | ... 284 | -- check joined replica 285 | test_run:cmd("switch join2") 286 | --- 287 | - true 288 | ... 289 | box.space.demo:select() 290 | --- 291 | - - [3, 'test'] 292 | - [6, 'test'] 293 | - [9, 'test'] 294 | - [10, 'test'] 295 | - [11, 'join_test'] 296 | - [16, 'join_test'] 297 | - [18, 'join_test'] 298 | - [19, 'join_test'] 299 | ... 300 | test_run:cmd("switch master5") 301 | --- 302 | - true 303 | ... 304 | box.space.demo:select() 305 | --- 306 | - - [3, 'test'] 307 | - [6, 'test'] 308 | - [9, 'test'] 309 | - [10, 'test'] 310 | - [11, 'join_test'] 311 | - [16, 'join_test'] 312 | - [18, 'join_test'] 313 | - [19, 'join_test'] 314 | ... 315 | test_run:cmd("switch default") 316 | --- 317 | - true 318 | ... 319 | -- cleanup 320 | test_run:cmd("setopt delimiter ';'") 321 | --- 322 | - true 323 | ... 324 | cluster(function(id) 325 | if id ~= '1' and id ~= '2' then 326 | _ = test_run:cmd("stop server master"..id) 327 | end 328 | test_run:cmd("cleanup server master"..id) 329 | end); 330 | --- 331 | ... 332 | _ = test_run:cmd("stop server join1") 333 | test_run:cmd("cleanup server join1") 334 | _ = test_run:cmd("stop server join2") 335 | test_run:cmd("cleanup server join2") 336 | test_run:cmd("setopt delimiter ''"); 337 | --- 338 | ... 339 | test_run:cmd("restart server default with cleanup=1") 340 | -------------------------------------------------------------------------------- /test/join/nd_single.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | executed = false 4 | 5 | test_run:cmd("setopt delimiter ';'") 6 | -- start shards 7 | cluster(function(id) 8 | test_run:cmd("create server master"..id.." with script='join/master"..id..".lua'") 9 | test_run:cmd("start server master"..id) 10 | end); 11 | 12 | function on_join(self) 13 | if not executed then 14 | test_run:cmd('stop server master2') 15 | executed = true 16 | end 17 | end; 18 | 19 | test_run:cmd("setopt delimiter ''"); 20 | shard.wait_connection() 21 | 22 | _ = rawset(shard, 'on_action', on_join) 23 | 24 | 25 | for i=1, 10 do shard.demo:insert{i, 'test'} end 26 | 27 | -- check data 28 | box.space.demo:select() 29 | test_run:cmd("switch master3") 30 | box.space.demo:select() 31 | 32 | test_run:cmd("switch master1") 33 | box.space.demo:select() 34 | test_run:cmd("switch master4") 35 | box.space.demo:select() 36 | 37 | test_run:cmd("switch master2") 38 | box.space.demo:select() 39 | test_run:cmd("switch master5") 40 | box.space.demo:select() 41 | test_run:cmd("switch default") 42 | 43 | -- stop replica 44 | test_run:cmd("stop server master1") 45 | -- add tuples 46 | for i=11, 20 do shard.demo:insert{i, 'join_test'} end 47 | 48 | -- join replica 49 | test_run:cmd("create server join1 with script='join/join1.lua'") 50 | test_run:cmd("start server join1") 51 | 52 | status = shard_status() 53 | status 54 | _ = remote_join(status.offline[1].id) 55 | shard_status() 56 | 57 | -- check joined replica 58 | box.space.demo:select() 59 | test_run:cmd("switch master3") 60 | box.space.demo:select() 61 | 62 | test_run:cmd("switch master4") 63 | box.space.demo:select() 64 | test_run:cmd("switch join1") 65 | box.space.demo:select() 66 | 67 | test_run:cmd("switch master5") 68 | box.space.demo:select() 69 | test_run:cmd("switch default") 70 | 71 | -- join one more replica 72 | test_run:cmd("create server join2 with script='join/join2.lua'") 73 | test_run:cmd("start server join2") 74 | 75 | status = shard_status() 76 | status 77 | _ = remote_join(status.offline[1].id) 78 | shard_status() 79 | 80 | -- check joined replica 81 | test_run:cmd("switch join2") 82 | box.space.demo:select() 83 | test_run:cmd("switch master5") 84 | box.space.demo:select() 85 | test_run:cmd("switch default") 86 | 87 | -- cleanup 88 | test_run:cmd("setopt delimiter ';'") 89 | cluster(function(id) 90 | if id ~= '1' and id ~= '2' then 91 | _ = test_run:cmd("stop server master"..id) 92 | end 93 | test_run:cmd("cleanup server master"..id) 94 | end); 95 | 96 | _ = test_run:cmd("stop server join1") 97 | test_run:cmd("cleanup server join1") 98 | _ = test_run:cmd("stop server join2") 99 | test_run:cmd("cleanup server join2") 100 | test_run:cmd("setopt delimiter ''"); 101 | test_run:cmd("restart server default with cleanup=1") 102 | -------------------------------------------------------------------------------- /test/join/pair.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("setopt delimiter ';'") 8 | --- 9 | - true 10 | ... 11 | -- start shards 12 | cluster(function(id) 13 | test_run:cmd("create server master"..id.." with script='join/master"..id..".lua'") 14 | test_run:cmd("start server master"..id) 15 | end); 16 | --- 17 | ... 18 | test_run:cmd("setopt delimiter ''"); 19 | --- 20 | - true 21 | ... 22 | shard.wait_connection() 23 | --- 24 | ... 25 | for i=1, 10 do shard.demo:insert{i, 'test'} end 26 | --- 27 | ... 28 | -- check data 29 | box.space.demo:select() 30 | --- 31 | - - [1, 'test'] 32 | - [2, 'test'] 33 | - [7, 'test'] 34 | - [8, 'test'] 35 | ... 36 | test_run:cmd("switch master3") 37 | --- 38 | - true 39 | ... 40 | box.space.demo:select() 41 | --- 42 | - - [1, 'test'] 43 | - [2, 'test'] 44 | - [7, 'test'] 45 | - [8, 'test'] 46 | ... 47 | test_run:cmd("switch master1") 48 | --- 49 | - true 50 | ... 51 | box.space.demo:select() 52 | --- 53 | - - [4, 'test'] 54 | - [5, 'test'] 55 | ... 56 | test_run:cmd("switch master4") 57 | --- 58 | - true 59 | ... 60 | box.space.demo:select() 61 | --- 62 | - - [4, 'test'] 63 | - [5, 'test'] 64 | ... 65 | test_run:cmd("switch master2") 66 | --- 67 | - true 68 | ... 69 | box.space.demo:select() 70 | --- 71 | - - [3, 'test'] 72 | - [6, 'test'] 73 | - [9, 'test'] 74 | - [10, 'test'] 75 | ... 76 | test_run:cmd("switch master5") 77 | --- 78 | - true 79 | ... 80 | box.space.demo:select() 81 | --- 82 | - - [3, 'test'] 83 | - [6, 'test'] 84 | - [9, 'test'] 85 | - [10, 'test'] 86 | ... 87 | test_run:cmd("switch default") 88 | --- 89 | - true 90 | ... 91 | -- stop replica 92 | test_run:cmd("stop server master1") 93 | --- 94 | - true 95 | ... 96 | test_run:cmd("stop server master4") 97 | --- 98 | - true 99 | ... 100 | status = shard_status() 101 | --- 102 | ... 103 | _ = remote_unjoin(status.offline[1].id) 104 | --- 105 | ... 106 | _ = remote_unjoin(status.offline[2].id) 107 | --- 108 | ... 109 | status = shard_status() 110 | --- 111 | ... 112 | status 113 | --- 114 | - maintenance: 115 | - null 116 | - true 117 | - null 118 | - null 119 | - true 120 | offline: 121 | - uri: localhost:33134 122 | id: 5 123 | - uri: localhost:33131 124 | id: 2 125 | online: 126 | - uri: localhost:33133 127 | id: 4 128 | - uri: localhost:33130 129 | id: 1 130 | - uri: localhost:33135 131 | id: 6 132 | - uri: localhost:33132 133 | id: 3 134 | ... 135 | -- add tuples 136 | result = shard.demo:insert{12, 'test_pair'} 137 | --- 138 | ... 139 | result 140 | --- 141 | - [] 142 | ... 143 | -- start servers 144 | test_run:cmd("start server master1") 145 | --- 146 | - true 147 | ... 148 | test_run:cmd("start server master4") 149 | --- 150 | - true 151 | ... 152 | _ = remote_join(status.offline[2].id) 153 | --- 154 | ... 155 | _ = remote_join(status.offline[1].id) 156 | --- 157 | ... 158 | shard_status() 159 | --- 160 | - maintenance: [] 161 | offline: [] 162 | online: 163 | - uri: localhost:33133 164 | id: 4 165 | - uri: localhost:33130 166 | id: 1 167 | - uri: localhost:33134 168 | id: 5 169 | - uri: localhost:33131 170 | id: 2 171 | - uri: localhost:33135 172 | id: 6 173 | - uri: localhost:33132 174 | id: 3 175 | ... 176 | shard.demo:insert{12, 'test_pair'} 177 | --- 178 | - - [12, 'test_pair'] 179 | ... 180 | -- check joined replica 181 | box.space.demo:select() 182 | --- 183 | - - [1, 'test'] 184 | - [2, 'test'] 185 | - [7, 'test'] 186 | - [8, 'test'] 187 | ... 188 | test_run:cmd("switch master3") 189 | --- 190 | - true 191 | ... 192 | box.space.demo:select() 193 | --- 194 | - - [1, 'test'] 195 | - [2, 'test'] 196 | - [7, 'test'] 197 | - [8, 'test'] 198 | ... 199 | test_run:cmd("switch master1") 200 | --- 201 | - true 202 | ... 203 | box.space.demo:select() 204 | --- 205 | - - [4, 'test'] 206 | - [5, 'test'] 207 | - [12, 'test_pair'] 208 | ... 209 | test_run:cmd("switch master4") 210 | --- 211 | - true 212 | ... 213 | box.space.demo:select() 214 | --- 215 | - - [4, 'test'] 216 | - [5, 'test'] 217 | - [12, 'test_pair'] 218 | ... 219 | test_run:cmd("switch master2") 220 | --- 221 | - true 222 | ... 223 | box.space.demo:select() 224 | --- 225 | - - [3, 'test'] 226 | - [6, 'test'] 227 | - [9, 'test'] 228 | - [10, 'test'] 229 | ... 230 | test_run:cmd("switch master5") 231 | --- 232 | - true 233 | ... 234 | box.space.demo:select() 235 | --- 236 | - - [3, 'test'] 237 | - [6, 'test'] 238 | - [9, 'test'] 239 | - [10, 'test'] 240 | ... 241 | test_run:cmd("switch default") 242 | --- 243 | - true 244 | ... 245 | -- cleanup 246 | test_run:cmd("setopt delimiter ';'") 247 | --- 248 | - true 249 | ... 250 | cluster(function(id) 251 | _ = test_run:cmd("stop server master"..id) 252 | test_run:cmd("cleanup server master"..id) 253 | end); 254 | --- 255 | ... 256 | test_run:cmd("setopt delimiter ''"); 257 | --- 258 | - true 259 | ... 260 | test_run:cmd("restart server default with cleanup=1") 261 | -------------------------------------------------------------------------------- /test/join/pair.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | 4 | test_run:cmd("setopt delimiter ';'") 5 | -- start shards 6 | cluster(function(id) 7 | test_run:cmd("create server master"..id.." with script='join/master"..id..".lua'") 8 | test_run:cmd("start server master"..id) 9 | end); 10 | test_run:cmd("setopt delimiter ''"); 11 | shard.wait_connection() 12 | 13 | 14 | for i=1, 10 do shard.demo:insert{i, 'test'} end 15 | 16 | -- check data 17 | box.space.demo:select() 18 | test_run:cmd("switch master3") 19 | box.space.demo:select() 20 | 21 | test_run:cmd("switch master1") 22 | box.space.demo:select() 23 | test_run:cmd("switch master4") 24 | box.space.demo:select() 25 | 26 | test_run:cmd("switch master2") 27 | box.space.demo:select() 28 | test_run:cmd("switch master5") 29 | box.space.demo:select() 30 | test_run:cmd("switch default") 31 | 32 | -- stop replica 33 | test_run:cmd("stop server master1") 34 | test_run:cmd("stop server master4") 35 | status = shard_status() 36 | _ = remote_unjoin(status.offline[1].id) 37 | _ = remote_unjoin(status.offline[2].id) 38 | status = shard_status() 39 | status 40 | 41 | -- add tuples 42 | result = shard.demo:insert{12, 'test_pair'} 43 | result 44 | 45 | -- start servers 46 | test_run:cmd("start server master1") 47 | test_run:cmd("start server master4") 48 | 49 | _ = remote_join(status.offline[2].id) 50 | _ = remote_join(status.offline[1].id) 51 | shard_status() 52 | shard.demo:insert{12, 'test_pair'} 53 | 54 | -- check joined replica 55 | box.space.demo:select() 56 | test_run:cmd("switch master3") 57 | box.space.demo:select() 58 | 59 | test_run:cmd("switch master1") 60 | box.space.demo:select() 61 | test_run:cmd("switch master4") 62 | box.space.demo:select() 63 | 64 | test_run:cmd("switch master2") 65 | box.space.demo:select() 66 | test_run:cmd("switch master5") 67 | box.space.demo:select() 68 | test_run:cmd("switch default") 69 | -- cleanup 70 | test_run:cmd("setopt delimiter ';'") 71 | cluster(function(id) 72 | _ = test_run:cmd("stop server master"..id) 73 | test_run:cmd("cleanup server master"..id) 74 | end); 75 | 76 | test_run:cmd("setopt delimiter ''"); 77 | test_run:cmd("restart server default with cleanup=1") 78 | -------------------------------------------------------------------------------- /test/join/single.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("setopt delimiter ';'") 8 | --- 9 | - true 10 | ... 11 | -- start shards 12 | cluster(function(id) 13 | test_run:cmd("create server master"..id.." with script='join/master"..id..".lua'") 14 | test_run:cmd("start server master"..id) 15 | end); 16 | --- 17 | ... 18 | test_run:cmd("setopt delimiter ''"); 19 | --- 20 | - true 21 | ... 22 | shard.wait_connection() 23 | --- 24 | ... 25 | for i=1, 10 do shard.demo:insert{i, 'test'} end 26 | --- 27 | ... 28 | -- check data 29 | box.space.demo:select() 30 | --- 31 | - - [1, 'test'] 32 | - [2, 'test'] 33 | - [7, 'test'] 34 | - [8, 'test'] 35 | ... 36 | test_run:cmd("switch master3") 37 | --- 38 | - true 39 | ... 40 | box.space.demo:select() 41 | --- 42 | - - [1, 'test'] 43 | - [2, 'test'] 44 | - [7, 'test'] 45 | - [8, 'test'] 46 | ... 47 | test_run:cmd("switch master1") 48 | --- 49 | - true 50 | ... 51 | box.space.demo:select() 52 | --- 53 | - - [4, 'test'] 54 | - [5, 'test'] 55 | ... 56 | test_run:cmd("switch master4") 57 | --- 58 | - true 59 | ... 60 | box.space.demo:select() 61 | --- 62 | - - [4, 'test'] 63 | - [5, 'test'] 64 | ... 65 | test_run:cmd("switch master2") 66 | --- 67 | - true 68 | ... 69 | box.space.demo:select() 70 | --- 71 | - - [3, 'test'] 72 | - [6, 'test'] 73 | - [9, 'test'] 74 | - [10, 'test'] 75 | ... 76 | test_run:cmd("switch master5") 77 | --- 78 | - true 79 | ... 80 | box.space.demo:select() 81 | --- 82 | - - [3, 'test'] 83 | - [6, 'test'] 84 | - [9, 'test'] 85 | - [10, 'test'] 86 | ... 87 | test_run:cmd("switch default") 88 | --- 89 | - true 90 | ... 91 | -- stop replica 92 | test_run:cmd("stop server master1") 93 | --- 94 | - true 95 | ... 96 | -- add tuples 97 | for i=11, 20 do shard.demo:insert{i, 'join_test'} end 98 | --- 99 | ... 100 | -- join replica 101 | test_run:cmd("create server join1 with script='join/join1.lua'") 102 | --- 103 | - true 104 | ... 105 | test_run:cmd("start server join1") 106 | --- 107 | - true 108 | ... 109 | status = shard_status() 110 | --- 111 | ... 112 | status 113 | --- 114 | - maintenance: [] 115 | offline: 116 | - uri: localhost:33131 117 | id: 2 118 | online: 119 | - uri: localhost:33133 120 | id: 4 121 | - uri: localhost:33130 122 | id: 1 123 | - uri: localhost:33134 124 | id: 5 125 | - uri: localhost:33135 126 | id: 6 127 | - uri: localhost:33132 128 | id: 3 129 | ... 130 | _ = remote_join(status.offline[1].id) 131 | --- 132 | ... 133 | shard_status() 134 | --- 135 | - maintenance: [] 136 | offline: [] 137 | online: 138 | - uri: localhost:33133 139 | id: 4 140 | - uri: localhost:33130 141 | id: 1 142 | - uri: localhost:33131 143 | id: 2 144 | - uri: localhost:33134 145 | id: 5 146 | - uri: localhost:33135 147 | id: 6 148 | - uri: localhost:33132 149 | id: 3 150 | ... 151 | -- check joined replica 152 | box.space.demo:select() 153 | --- 154 | - - [1, 'test'] 155 | - [2, 'test'] 156 | - [7, 'test'] 157 | - [8, 'test'] 158 | - [13, 'join_test'] 159 | - [14, 'join_test'] 160 | - [15, 'join_test'] 161 | - [20, 'join_test'] 162 | ... 163 | test_run:cmd("switch master3") 164 | --- 165 | - true 166 | ... 167 | box.space.demo:select() 168 | --- 169 | - - [1, 'test'] 170 | - [2, 'test'] 171 | - [7, 'test'] 172 | - [8, 'test'] 173 | - [13, 'join_test'] 174 | - [14, 'join_test'] 175 | - [15, 'join_test'] 176 | - [20, 'join_test'] 177 | ... 178 | test_run:cmd("switch master4") 179 | --- 180 | - true 181 | ... 182 | box.space.demo:select() 183 | --- 184 | - - [4, 'test'] 185 | - [5, 'test'] 186 | - [12, 'join_test'] 187 | - [17, 'join_test'] 188 | ... 189 | test_run:cmd("switch join1") 190 | --- 191 | - true 192 | ... 193 | box.space.demo:select() 194 | --- 195 | - - [4, 'test'] 196 | - [5, 'test'] 197 | - [12, 'join_test'] 198 | - [17, 'join_test'] 199 | ... 200 | test_run:cmd("switch master2") 201 | --- 202 | - true 203 | ... 204 | box.space.demo:select() 205 | --- 206 | - - [3, 'test'] 207 | - [6, 'test'] 208 | - [9, 'test'] 209 | - [10, 'test'] 210 | - [11, 'join_test'] 211 | - [16, 'join_test'] 212 | - [18, 'join_test'] 213 | - [19, 'join_test'] 214 | ... 215 | test_run:cmd("switch master5") 216 | --- 217 | - true 218 | ... 219 | box.space.demo:select() 220 | --- 221 | - - [3, 'test'] 222 | - [6, 'test'] 223 | - [9, 'test'] 224 | - [10, 'test'] 225 | - [11, 'join_test'] 226 | - [16, 'join_test'] 227 | - [18, 'join_test'] 228 | - [19, 'join_test'] 229 | ... 230 | test_run:cmd("switch default") 231 | --- 232 | - true 233 | ... 234 | -- cleanup 235 | test_run:cmd("setopt delimiter ';'") 236 | --- 237 | - true 238 | ... 239 | cluster(function(id) 240 | if id ~= '1' then 241 | _ = test_run:cmd("stop server master"..id) 242 | end 243 | test_run:cmd("cleanup server master"..id) 244 | end); 245 | --- 246 | ... 247 | _ = test_run:cmd("stop server join1") 248 | test_run:cmd("cleanup server join1") 249 | test_run:cmd("setopt delimiter ''"); 250 | --- 251 | ... 252 | test_run:cmd("restart server default with cleanup=1") 253 | -------------------------------------------------------------------------------- /test/join/single.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | 4 | test_run:cmd("setopt delimiter ';'") 5 | -- start shards 6 | cluster(function(id) 7 | test_run:cmd("create server master"..id.." with script='join/master"..id..".lua'") 8 | test_run:cmd("start server master"..id) 9 | end); 10 | test_run:cmd("setopt delimiter ''"); 11 | shard.wait_connection() 12 | 13 | 14 | for i=1, 10 do shard.demo:insert{i, 'test'} end 15 | 16 | -- check data 17 | box.space.demo:select() 18 | test_run:cmd("switch master3") 19 | box.space.demo:select() 20 | 21 | test_run:cmd("switch master1") 22 | box.space.demo:select() 23 | test_run:cmd("switch master4") 24 | box.space.demo:select() 25 | 26 | test_run:cmd("switch master2") 27 | box.space.demo:select() 28 | test_run:cmd("switch master5") 29 | box.space.demo:select() 30 | test_run:cmd("switch default") 31 | 32 | -- stop replica 33 | test_run:cmd("stop server master1") 34 | -- add tuples 35 | for i=11, 20 do shard.demo:insert{i, 'join_test'} end 36 | 37 | -- join replica 38 | test_run:cmd("create server join1 with script='join/join1.lua'") 39 | test_run:cmd("start server join1") 40 | 41 | status = shard_status() 42 | status 43 | _ = remote_join(status.offline[1].id) 44 | shard_status() 45 | 46 | -- check joined replica 47 | box.space.demo:select() 48 | test_run:cmd("switch master3") 49 | box.space.demo:select() 50 | 51 | test_run:cmd("switch master4") 52 | box.space.demo:select() 53 | test_run:cmd("switch join1") 54 | box.space.demo:select() 55 | 56 | test_run:cmd("switch master2") 57 | box.space.demo:select() 58 | test_run:cmd("switch master5") 59 | box.space.demo:select() 60 | test_run:cmd("switch default") 61 | 62 | -- cleanup 63 | test_run:cmd("setopt delimiter ';'") 64 | cluster(function(id) 65 | if id ~= '1' then 66 | _ = test_run:cmd("stop server master"..id) 67 | end 68 | test_run:cmd("cleanup server master"..id) 69 | end); 70 | 71 | _ = test_run:cmd("stop server join1") 72 | test_run:cmd("cleanup server join1") 73 | test_run:cmd("setopt delimiter ''"); 74 | test_run:cmd("restart server default with cleanup=1") 75 | -------------------------------------------------------------------------------- /test/join/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = tarantool 3 | description = single node shard tests 4 | script = master.lua 5 | -------------------------------------------------------------------------------- /test/node_down/auto_increment.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='node_down/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("start server master1") 12 | --- 13 | - true 14 | ... 15 | shard.wait_connection() 16 | --- 17 | ... 18 | shard.demo:auto_increment{'test'} 19 | --- 20 | - - [1, 'test'] 21 | - [1, 'test'] 22 | ... 23 | shard.demo:auto_increment{'test2'} 24 | --- 25 | - - [3, 'test2'] 26 | - [3, 'test2'] 27 | ... 28 | shard.demo:auto_increment{'test3'} 29 | --- 30 | - - [5, 'test3'] 31 | - [5, 'test3'] 32 | ... 33 | _ = test_run:cmd("stop server master1") 34 | --- 35 | ... 36 | shard.demo:q_auto_increment(1, {'test4'}) 37 | --- 38 | - [7, 'test4'] 39 | ... 40 | batch = shard.q_begin() 41 | --- 42 | ... 43 | batch.demo:q_auto_increment(2, {'test5'}) 44 | --- 45 | - [9, 'test5'] 46 | ... 47 | batch.demo:q_auto_increment(3, {'test6'}) 48 | --- 49 | - [11, 'test6'] 50 | ... 51 | batch:q_end() 52 | --- 53 | ... 54 | shard.wait_operations() 55 | --- 56 | ... 57 | box.space.demo:select() 58 | --- 59 | - - [1, 'test'] 60 | - [3, 'test2'] 61 | - [5, 'test3'] 62 | - [7, 'test4'] 63 | - [9, 'test5'] 64 | - [11, 'test6'] 65 | ... 66 | box.space._shard_operations:select() 67 | --- 68 | - - ['1', 2, [[512, 'insert', [[7, 'test4']]]]] 69 | - ['3', 2, [[512, 'insert', [[9, 'test5']]], [512, 'insert', [[11, 'test6']]]]] 70 | ... 71 | test_run:cmd("cleanup server master1") 72 | --- 73 | - true 74 | ... 75 | test_run:cmd("restart server default with cleanup=1") 76 | -------------------------------------------------------------------------------- /test/node_down/auto_increment.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='node_down/master1.lua'") 4 | test_run:cmd("start server master1") 5 | shard.wait_connection() 6 | 7 | shard.demo:auto_increment{'test'} 8 | shard.demo:auto_increment{'test2'} 9 | shard.demo:auto_increment{'test3'} 10 | 11 | _ = test_run:cmd("stop server master1") 12 | 13 | shard.demo:q_auto_increment(1, {'test4'}) 14 | batch = shard.q_begin() 15 | batch.demo:q_auto_increment(2, {'test5'}) 16 | batch.demo:q_auto_increment(3, {'test6'}) 17 | batch:q_end() 18 | 19 | shard.wait_operations() 20 | box.space.demo:select() 21 | box.space._shard_operations:select() 22 | 23 | test_run:cmd("cleanup server master1") 24 | test_run:cmd("restart server default with cleanup=1") 25 | -------------------------------------------------------------------------------- /test/node_down/basic.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='node_down/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("start server master1") 12 | --- 13 | - true 14 | ... 15 | shard.wait_connection() 16 | --- 17 | ... 18 | shard.demo:insert{1, 'test'} 19 | --- 20 | - - [1, 'test'] 21 | - [1, 'test'] 22 | ... 23 | shard.demo:replace{1, 'test2'} 24 | --- 25 | - - [1, 'test2'] 26 | - [1, 'test2'] 27 | ... 28 | shard.demo:update({1}, {{'=', 2, 'test3'}}) 29 | --- 30 | - - [1, 'test3'] 31 | - [1, 'test3'] 32 | ... 33 | _ = test_run:cmd("stop server master1") 34 | --- 35 | ... 36 | shard.demo:insert{2, 'test4'} 37 | --- 38 | - - [2, 'test4'] 39 | ... 40 | shard.demo:insert{3, 'test5'}; 41 | --- 42 | - - [3, 'test5'] 43 | ... 44 | shard.demo:delete({3}) 45 | --- 46 | - - [3, 'test5'] 47 | ... 48 | box.space.demo:select() 49 | --- 50 | - - [1, 'test3'] 51 | - [2, 'test4'] 52 | ... 53 | test_run:cmd("cleanup server master1") 54 | --- 55 | - true 56 | ... 57 | test_run:cmd("restart server default with cleanup=1") 58 | -------------------------------------------------------------------------------- /test/node_down/basic.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='node_down/master1.lua'") 4 | test_run:cmd("start server master1") 5 | shard.wait_connection() 6 | 7 | shard.demo:insert{1, 'test'} 8 | shard.demo:replace{1, 'test2'} 9 | shard.demo:update({1}, {{'=', 2, 'test3'}}) 10 | 11 | _ = test_run:cmd("stop server master1") 12 | 13 | shard.demo:insert{2, 'test4'} 14 | shard.demo:insert{3, 'test5'}; 15 | shard.demo:delete({3}) 16 | 17 | box.space.demo:select() 18 | 19 | test_run:cmd("cleanup server master1") 20 | test_run:cmd("restart server default with cleanup=1") 21 | -------------------------------------------------------------------------------- /test/node_down/batch.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='node_down/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("start server master1") 12 | --- 13 | - true 14 | ... 15 | shard.wait_connection() 16 | --- 17 | ... 18 | -- bipahse operations 19 | batch = shard.q_begin() 20 | --- 21 | ... 22 | batch.demo:q_insert(1, {0, 'test'}) 23 | --- 24 | - [0, 'test'] 25 | ... 26 | batch.demo:q_replace(2, {0, 'test2'}) 27 | --- 28 | - [0, 'test2'] 29 | ... 30 | batch.demo:q_update(3, 0, {{'=', 2, 'test3'}}) 31 | --- 32 | ... 33 | batch.demo:q_insert(4, {1, 'test4'}) 34 | --- 35 | - [1, 'test4'] 36 | ... 37 | batch.demo:q_insert(5, {2, 'test_to_delete'}) 38 | --- 39 | - [2, 'test_to_delete'] 40 | ... 41 | batch.demo:q_delete(6, 2) 42 | --- 43 | ... 44 | batch:q_end() 45 | --- 46 | ... 47 | _ = test_run:cmd("stop server master1") 48 | --- 49 | ... 50 | shard.wait_operations() 51 | --- 52 | ... 53 | box.space.demo:select() 54 | --- 55 | - - [0, 'test3'] 56 | - [1, 'test4'] 57 | ... 58 | box.space._shard_operations:select() 59 | --- 60 | - - ['6', 2, [[512, 'insert', [[0, 'test']]], [512, 'replace', [[0, 'test2']]], [ 61 | 512, 'update', [0, [['=', 2, 'test3']]]], [512, 'insert', [[1, 'test4']]], 62 | [512, 'insert', [[2, 'test_to_delete']]], [512, 'delete', [2]]]] 63 | ... 64 | -- check for operation q_insert is in shard 65 | shard.demo:check_operation(6, 0) 66 | --- 67 | - true 68 | ... 69 | -- check for not exists operations 70 | shard.demo:check_operation('12345', 0) 71 | --- 72 | - false 73 | ... 74 | test_run:cmd("cleanup server master1") 75 | --- 76 | - true 77 | ... 78 | test_run:cmd("restart server default with cleanup=1") 79 | -------------------------------------------------------------------------------- /test/node_down/batch.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='node_down/master1.lua'") 4 | test_run:cmd("start server master1") 5 | shard.wait_connection() 6 | 7 | -- bipahse operations 8 | batch = shard.q_begin() 9 | batch.demo:q_insert(1, {0, 'test'}) 10 | batch.demo:q_replace(2, {0, 'test2'}) 11 | batch.demo:q_update(3, 0, {{'=', 2, 'test3'}}) 12 | batch.demo:q_insert(4, {1, 'test4'}) 13 | batch.demo:q_insert(5, {2, 'test_to_delete'}) 14 | batch.demo:q_delete(6, 2) 15 | batch:q_end() 16 | 17 | _ = test_run:cmd("stop server master1") 18 | 19 | shard.wait_operations() 20 | box.space.demo:select() 21 | box.space._shard_operations:select() 22 | 23 | -- check for operation q_insert is in shard 24 | shard.demo:check_operation(6, 0) 25 | -- check for not exists operations 26 | shard.demo:check_operation('12345', 0) 27 | 28 | test_run:cmd("cleanup server master1") 29 | test_run:cmd("restart server default with cleanup=1") 30 | -------------------------------------------------------------------------------- /test/node_down/master.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '1' }; 10 | }; 11 | login = 'tester'; 12 | password = 'pass'; 13 | redundancy = 2; 14 | binary = 33130; 15 | } 16 | 17 | box.cfg { 18 | slab_alloc_arena = 0.1; 19 | wal_mode = 'none'; 20 | listen = cfg.binary; 21 | custom_proc_title = "master" 22 | } 23 | 24 | require('console').listen(os.getenv('ADMIN')) 25 | 26 | if not box.space.demo then 27 | box.schema.user.create(cfg.login, { password = cfg.password }) 28 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 29 | 30 | local demo = box.schema.create_space('demo') 31 | demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 32 | end 33 | 34 | function print_shard_map() 35 | local result = {} 36 | for uri, hb_table in pairs(shard.get_heartbeat()) do 37 | table.insert(result, uri) 38 | for server, data in pairs(hb_table) do 39 | table.insert(result, server) 40 | table.insert(result, data.try) 41 | end 42 | end 43 | return result 44 | end 45 | 46 | -- init shards 47 | fiber.create(function() 48 | shard.init(cfg) 49 | end) 50 | 51 | -------------------------------------------------------------------------------- /test/node_down/master1.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '1' }; 10 | }; 11 | login = 'tester'; 12 | password = 'pass'; 13 | redundancy = 2; 14 | binary = 33131; 15 | } 16 | 17 | box.cfg { 18 | slab_alloc_arena = 0.1; 19 | wal_mode = 'none'; 20 | listen = cfg.binary; 21 | custom_proc_title = "master1"; 22 | } 23 | 24 | require('console').listen(os.getenv('ADMIN')) 25 | 26 | if not box.space.demo then 27 | box.schema.user.create(cfg.login, { password = cfg.password }) 28 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 29 | 30 | local demo = box.schema.create_space('demo') 31 | demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 32 | end 33 | 34 | function print_shard_map() 35 | local result = {} 36 | for uri, hb_table in pairs(shard.get_heartbeat()) do 37 | table.insert(result, uri) 38 | for server, data in pairs(hb_table) do 39 | table.insert(result, server) 40 | table.insert(result, data.try) 41 | end 42 | end 43 | return result 44 | end 45 | 46 | -- init shards 47 | fiber.create(function() 48 | shard.init(cfg) 49 | end) 50 | -------------------------------------------------------------------------------- /test/node_down/node_down.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='node_down/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("start server master1") 12 | --- 13 | - true 14 | ... 15 | shard.wait_connection() 16 | --- 17 | ... 18 | shard.wait_table_fill() 19 | --- 20 | ... 21 | shard.is_table_filled() 22 | --- 23 | - true 24 | ... 25 | -- Kill server and wait for monitoring fibers kill 26 | _ = test_run:cmd("stop server master1") 27 | --- 28 | ... 29 | shard.wait_epoch(2) 30 | --- 31 | ... 32 | shard.is_table_filled() 33 | --- 34 | - true 35 | ... 36 | test_run:cmd("cleanup server master1") 37 | --- 38 | - true 39 | ... 40 | test_run:cmd("restart server default with cleanup=1") 41 | -------------------------------------------------------------------------------- /test/node_down/node_down.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='node_down/master1.lua'") 4 | test_run:cmd("start server master1") 5 | shard.wait_connection() 6 | shard.wait_table_fill() 7 | shard.is_table_filled() 8 | 9 | -- Kill server and wait for monitoring fibers kill 10 | _ = test_run:cmd("stop server master1") 11 | shard.wait_epoch(2) 12 | shard.is_table_filled() 13 | 14 | test_run:cmd("cleanup server master1") 15 | test_run:cmd("restart server default with cleanup=1") 16 | -------------------------------------------------------------------------------- /test/node_down/q_basic.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='node_down/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("start server master1") 12 | --- 13 | - true 14 | ... 15 | shard.wait_connection() 16 | --- 17 | ... 18 | -- bipahse operations 19 | shard.demo:q_insert(1, {0, 'test'}) 20 | --- 21 | - [0, 'test'] 22 | ... 23 | shard.demo:q_replace(2, {0, 'test2'}) 24 | --- 25 | - [0, 'test2'] 26 | ... 27 | shard.demo:q_update(3, 0, {{'=', 2, 'test3'}}) 28 | --- 29 | ... 30 | _ = test_run:cmd("stop server master1") 31 | --- 32 | ... 33 | shard.demo:q_insert(4, {1, 'test4'}) 34 | --- 35 | - [1, 'test4'] 36 | ... 37 | shard.demo:q_insert(5, {2, 'test_to_delete'}) 38 | --- 39 | - [2, 'test_to_delete'] 40 | ... 41 | shard.demo:q_delete(6, 2) 42 | --- 43 | ... 44 | shard.wait_operations() 45 | --- 46 | ... 47 | box.space.demo:select() 48 | --- 49 | - - [0, 'test3'] 50 | - [1, 'test4'] 51 | ... 52 | -- check for operation q_insert is in shard 53 | shard.demo:check_operation(1, 0) 54 | --- 55 | - true 56 | ... 57 | -- check for not exists operations 58 | shard.demo:check_operation('12345', 0) 59 | --- 60 | - false 61 | ... 62 | test_run:cmd("cleanup server master1") 63 | --- 64 | - true 65 | ... 66 | test_run:cmd("restart server default with cleanup=1") 67 | -------------------------------------------------------------------------------- /test/node_down/q_basic.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='node_down/master1.lua'") 4 | test_run:cmd("start server master1") 5 | shard.wait_connection() 6 | 7 | -- bipahse operations 8 | shard.demo:q_insert(1, {0, 'test'}) 9 | shard.demo:q_replace(2, {0, 'test2'}) 10 | shard.demo:q_update(3, 0, {{'=', 2, 'test3'}}) 11 | 12 | _ = test_run:cmd("stop server master1") 13 | 14 | shard.demo:q_insert(4, {1, 'test4'}) 15 | shard.demo:q_insert(5, {2, 'test_to_delete'}) 16 | shard.demo:q_delete(6, 2) 17 | 18 | shard.wait_operations() 19 | box.space.demo:select() 20 | 21 | -- check for operation q_insert is in shard 22 | shard.demo:check_operation(1, 0) 23 | -- check for not exists operations 24 | shard.demo:check_operation('12345', 0) 25 | 26 | test_run:cmd("cleanup server master1") 27 | test_run:cmd("restart server default with cleanup=1") 28 | -------------------------------------------------------------------------------- /test/node_down/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = tarantool 3 | description = single node shard tests 4 | script = master.lua 5 | -------------------------------------------------------------------------------- /test/redundancy1/auto_increment.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy1/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy1/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | shard.demo:auto_increment{'test'} 27 | --- 28 | - - [1, 'test'] 29 | ... 30 | shard.demo:auto_increment{'test2'} 31 | --- 32 | - - [4, 'test2'] 33 | ... 34 | shard.demo:auto_increment{'test3'} 35 | --- 36 | - - [7, 'test3'] 37 | ... 38 | shard.demo:q_auto_increment(1, {'test4'}) 39 | --- 40 | - [10, 'test4'] 41 | ... 42 | batch = shard.q_begin() 43 | --- 44 | ... 45 | batch.demo:q_auto_increment(2, {'test5'}) 46 | --- 47 | - [13, 'test5'] 48 | ... 49 | batch.demo:q_auto_increment(3, {'test6'}) 50 | --- 51 | - [16, 'test6'] 52 | ... 53 | batch:q_end() 54 | --- 55 | ... 56 | shard.wait_operations() 57 | --- 58 | ... 59 | box.space.demo:select() 60 | --- 61 | - - [10, 'test4'] 62 | - [16, 'test6'] 63 | ... 64 | test_run:cmd("switch master1") 65 | --- 66 | - true 67 | ... 68 | shard.wait_operations() 69 | --- 70 | ... 71 | box.space.demo:select() 72 | --- 73 | - - [4, 'test2'] 74 | ... 75 | test_run:cmd("switch master2") 76 | --- 77 | - true 78 | ... 79 | shard.wait_operations() 80 | --- 81 | ... 82 | box.space.demo:select() 83 | --- 84 | - - [1, 'test'] 85 | - [7, 'test3'] 86 | - [13, 'test5'] 87 | ... 88 | test_run:cmd("switch default") 89 | --- 90 | - true 91 | ... 92 | box.space._shard_operations:select() 93 | --- 94 | - - ['1', 2, [[512, 'insert', [[10, 'test4']]]]] 95 | - ['3', 2, [[512, 'insert', [[16, 'test6']]]]] 96 | ... 97 | _ = test_run:cmd("stop server master1") 98 | --- 99 | ... 100 | _ = test_run:cmd("stop server master2") 101 | --- 102 | ... 103 | test_run:cmd("cleanup server master1") 104 | --- 105 | - true 106 | ... 107 | test_run:cmd("cleanup server master2") 108 | --- 109 | - true 110 | ... 111 | test_run:cmd("restart server default with cleanup=1") 112 | -------------------------------------------------------------------------------- /test/redundancy1/auto_increment.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy1/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy1/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | 8 | shard.wait_connection() 9 | 10 | shard.demo:auto_increment{'test'} 11 | shard.demo:auto_increment{'test2'} 12 | shard.demo:auto_increment{'test3'} 13 | 14 | shard.demo:q_auto_increment(1, {'test4'}) 15 | batch = shard.q_begin() 16 | batch.demo:q_auto_increment(2, {'test5'}) 17 | batch.demo:q_auto_increment(3, {'test6'}) 18 | batch:q_end() 19 | 20 | shard.wait_operations() 21 | box.space.demo:select() 22 | 23 | test_run:cmd("switch master1") 24 | shard.wait_operations() 25 | box.space.demo:select() 26 | 27 | test_run:cmd("switch master2") 28 | shard.wait_operations() 29 | box.space.demo:select() 30 | 31 | test_run:cmd("switch default") 32 | 33 | box.space._shard_operations:select() 34 | 35 | _ = test_run:cmd("stop server master1") 36 | _ = test_run:cmd("stop server master2") 37 | test_run:cmd("cleanup server master1") 38 | test_run:cmd("cleanup server master2") 39 | test_run:cmd("restart server default with cleanup=1") 40 | -------------------------------------------------------------------------------- /test/redundancy1/basic.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy1/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy1/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | shard.demo:insert{1, 'test'} 27 | --- 28 | - - [1, 'test'] 29 | ... 30 | shard.demo:replace{1, 'test2'} 31 | --- 32 | - - [1, 'test2'] 33 | ... 34 | shard.demo:update({1}, {{'=', 2, 'test3'}}) 35 | --- 36 | - - [1, 'test3'] 37 | ... 38 | shard.demo:insert{2, 'test4'} 39 | --- 40 | - - [2, 'test4'] 41 | ... 42 | shard.demo:insert{3, 'test5'} 43 | --- 44 | - - [3, 'test5'] 45 | ... 46 | shard.demo:delete({3}) 47 | --- 48 | - - [3, 'test5'] 49 | ... 50 | shard.demo2:replace{1, 2, 10} 51 | --- 52 | - - [1, 2, 10] 53 | ... 54 | shard.demo2:replace{2, 2, 20} 55 | --- 56 | - - [2, 2, 20] 57 | ... 58 | shard.demo2:replace{3, 2, 30} 59 | --- 60 | - - [3, 2, 30] 61 | ... 62 | shard.demo2:replace{4, 2, 40} 63 | --- 64 | - - [4, 2, 40] 65 | ... 66 | shard.demo2:replace{5, 2, 50} 67 | --- 68 | - - [5, 2, 50] 69 | ... 70 | shard.demo2:replace{6, 2, 60} 71 | --- 72 | - - [6, 2, 60] 73 | ... 74 | shard.demo2:replace{7, 2, 70} 75 | --- 76 | - - [7, 2, 70] 77 | ... 78 | shard.demo2:replace{8, 2, 80} 79 | --- 80 | - - [8, 2, 80] 81 | ... 82 | shard.demo2:replace{9, 2, 90} 83 | --- 84 | - - [9, 2, 90] 85 | ... 86 | shard.demo2:replace{10, 2, 100} 87 | --- 88 | - - [10, 2, 100] 89 | ... 90 | box.space.demo:select() 91 | --- 92 | - [] 93 | ... 94 | box.space.demo2:select() 95 | --- 96 | - - [3, 2, 30] 97 | - [6, 2, 60] 98 | - [9, 2, 90] 99 | - [10, 2, 100] 100 | ... 101 | test_run:cmd("switch master1") 102 | --- 103 | - true 104 | ... 105 | box.space.demo:select() 106 | --- 107 | - [] 108 | ... 109 | box.space.demo2:select() 110 | --- 111 | - - [4, 2, 40] 112 | - [5, 2, 50] 113 | ... 114 | test_run:cmd("switch master2") 115 | --- 116 | - true 117 | ... 118 | box.space.demo:select() 119 | --- 120 | - - [1, 'test3'] 121 | - [2, 'test4'] 122 | ... 123 | box.space.demo2:select() 124 | --- 125 | - - [1, 2, 10] 126 | - [2, 2, 20] 127 | - [7, 2, 70] 128 | - [8, 2, 80] 129 | ... 130 | test_run:cmd("switch default") 131 | --- 132 | - true 133 | ... 134 | shard.demo2:secondary_select(1, {}, {2}) 135 | --- 136 | - - [1, 2, 10] 137 | - [2, 2, 20] 138 | - [3, 2, 30] 139 | - [4, 2, 40] 140 | - [5, 2, 50] 141 | - [6, 2, 60] 142 | - [7, 2, 70] 143 | - [8, 2, 80] 144 | - [9, 2, 90] 145 | - [10, 2, 100] 146 | ... 147 | shard.demo2:secondary_select(1, {}, {2, 10}) 148 | --- 149 | - - [1, 2, 10] 150 | ... 151 | shard.demo2:secondary_select(1, {}, {2, 200}) 152 | --- 153 | - [] 154 | ... 155 | shard.demo2:secondary_select(1, {limit = 3}, {2, 80}) 156 | --- 157 | - - [8, 2, 80] 158 | ... 159 | _ = test_run:cmd("stop server master1") 160 | --- 161 | ... 162 | _ = test_run:cmd("stop server master2") 163 | --- 164 | ... 165 | test_run:cmd("cleanup server master1") 166 | --- 167 | - true 168 | ... 169 | test_run:cmd("cleanup server master2") 170 | --- 171 | - true 172 | ... 173 | test_run:cmd("restart server default with cleanup=1") 174 | -------------------------------------------------------------------------------- /test/redundancy1/basic.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy1/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy1/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | shard.demo:insert{1, 'test'} 10 | shard.demo:replace{1, 'test2'} 11 | shard.demo:update({1}, {{'=', 2, 'test3'}}) 12 | shard.demo:insert{2, 'test4'} 13 | shard.demo:insert{3, 'test5'} 14 | shard.demo:delete({3}) 15 | 16 | shard.demo2:replace{1, 2, 10} 17 | shard.demo2:replace{2, 2, 20} 18 | shard.demo2:replace{3, 2, 30} 19 | shard.demo2:replace{4, 2, 40} 20 | shard.demo2:replace{5, 2, 50} 21 | shard.demo2:replace{6, 2, 60} 22 | shard.demo2:replace{7, 2, 70} 23 | shard.demo2:replace{8, 2, 80} 24 | shard.demo2:replace{9, 2, 90} 25 | shard.demo2:replace{10, 2, 100} 26 | 27 | box.space.demo:select() 28 | box.space.demo2:select() 29 | test_run:cmd("switch master1") 30 | box.space.demo:select() 31 | box.space.demo2:select() 32 | test_run:cmd("switch master2") 33 | box.space.demo:select() 34 | box.space.demo2:select() 35 | test_run:cmd("switch default") 36 | 37 | shard.demo2:secondary_select(1, {}, {2}) 38 | shard.demo2:secondary_select(1, {}, {2, 10}) 39 | shard.demo2:secondary_select(1, {}, {2, 200}) 40 | shard.demo2:secondary_select(1, {limit = 3}, {2, 80}) 41 | 42 | _ = test_run:cmd("stop server master1") 43 | _ = test_run:cmd("stop server master2") 44 | test_run:cmd("cleanup server master1") 45 | test_run:cmd("cleanup server master2") 46 | test_run:cmd("restart server default with cleanup=1") 47 | -------------------------------------------------------------------------------- /test/redundancy1/batch.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy1/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy1/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | -- bipahse operations 27 | batch = shard.q_begin() 28 | --- 29 | ... 30 | batch.demo:q_insert(1, {0, 'test'}) 31 | --- 32 | - [0, 'test'] 33 | ... 34 | batch.demo:q_replace(2, {0, 'test2'}) 35 | --- 36 | - [0, 'test2'] 37 | ... 38 | batch.demo:q_update(3, 0, {{'=', 2, 'test3'}}) 39 | --- 40 | ... 41 | batch.demo:q_insert(4, {1, 'test4'}) 42 | --- 43 | - [1, 'test4'] 44 | ... 45 | batch.demo:q_insert(5, {2, 'test_to_delete'}) 46 | --- 47 | - [2, 'test_to_delete'] 48 | ... 49 | batch.demo:q_delete(6, 2) 50 | --- 51 | ... 52 | batch:q_end() 53 | --- 54 | ... 55 | shard.wait_operations() 56 | --- 57 | ... 58 | box.space.demo:select() 59 | --- 60 | - [] 61 | ... 62 | test_run:cmd("switch master1") 63 | --- 64 | - true 65 | ... 66 | shard.wait_operations() 67 | --- 68 | ... 69 | box.space.demo:select() 70 | --- 71 | - [] 72 | ... 73 | test_run:cmd("switch master2") 74 | --- 75 | - true 76 | ... 77 | shard.wait_operations() 78 | --- 79 | ... 80 | box.space.demo:select() 81 | --- 82 | - - [0, 'test3'] 83 | - [1, 'test4'] 84 | ... 85 | test_run:cmd("switch default") 86 | --- 87 | - true 88 | ... 89 | box.space._shard_operations:select() 90 | --- 91 | - [] 92 | ... 93 | -- check for operation q_insert is in shard 94 | shard.demo:check_operation(6, 0) 95 | --- 96 | - true 97 | ... 98 | -- check for not exists operations 99 | shard.demo:check_operation('12345', 0) 100 | --- 101 | - false 102 | ... 103 | _ = test_run:cmd("stop server master1") 104 | --- 105 | ... 106 | _ = test_run:cmd("stop server master2") 107 | --- 108 | ... 109 | test_run:cmd("cleanup server master1") 110 | --- 111 | - true 112 | ... 113 | test_run:cmd("cleanup server master2") 114 | --- 115 | - true 116 | ... 117 | test_run:cmd("restart server default with cleanup=1") 118 | -------------------------------------------------------------------------------- /test/redundancy1/batch.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy1/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy1/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | -- bipahse operations 10 | batch = shard.q_begin() 11 | batch.demo:q_insert(1, {0, 'test'}) 12 | batch.demo:q_replace(2, {0, 'test2'}) 13 | batch.demo:q_update(3, 0, {{'=', 2, 'test3'}}) 14 | batch.demo:q_insert(4, {1, 'test4'}) 15 | batch.demo:q_insert(5, {2, 'test_to_delete'}) 16 | batch.demo:q_delete(6, 2) 17 | batch:q_end() 18 | 19 | shard.wait_operations() 20 | box.space.demo:select() 21 | test_run:cmd("switch master1") 22 | shard.wait_operations() 23 | box.space.demo:select() 24 | test_run:cmd("switch master2") 25 | shard.wait_operations() 26 | box.space.demo:select() 27 | test_run:cmd("switch default") 28 | 29 | box.space._shard_operations:select() 30 | 31 | -- check for operation q_insert is in shard 32 | shard.demo:check_operation(6, 0) 33 | -- check for not exists operations 34 | shard.demo:check_operation('12345', 0) 35 | 36 | _ = test_run:cmd("stop server master1") 37 | _ = test_run:cmd("stop server master2") 38 | test_run:cmd("cleanup server master1") 39 | test_run:cmd("cleanup server master2") 40 | test_run:cmd("restart server default with cleanup=1") 41 | -------------------------------------------------------------------------------- /test/redundancy1/master.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '1' }; 10 | { uri = 'localhost:33132', zone = '2' }; 11 | }; 12 | login = 'tester'; 13 | password = 'pass'; 14 | redundancy = 1; 15 | binary = 33130; 16 | } 17 | 18 | box.cfg { 19 | slab_alloc_arena = 0.1; 20 | wal_mode = 'none'; 21 | listen = cfg.binary; 22 | custom_proc_title = "master" 23 | } 24 | 25 | require('console').listen(os.getenv('ADMIN')) 26 | 27 | if not box.space.demo then 28 | box.schema.user.create(cfg.login, { password = cfg.password }) 29 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 30 | 31 | local demo = box.schema.create_space('demo') 32 | demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 33 | local demo2 = box.schema.create_space('demo2') 34 | demo2:create_index('pk') 35 | demo2:create_index('sk', {parts = {2, 'unsigned', 3, 'unsigned'}}) 36 | end 37 | 38 | function print_shard_map() 39 | local result = {} 40 | for uri, hb_table in pairs(shard.get_heartbeat()) do 41 | table.insert(result, uri) 42 | for server, data in pairs(hb_table) do 43 | table.insert(result, server) 44 | table.insert(result, data.try) 45 | end 46 | end 47 | return result 48 | end 49 | 50 | -- init shards 51 | fiber.create(function() 52 | shard.init(cfg) 53 | end) 54 | 55 | -------------------------------------------------------------------------------- /test/redundancy1/master1.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '1' }; 10 | { uri = 'localhost:33132', zone = '2' }; 11 | }; 12 | login = 'tester'; 13 | password = 'pass'; 14 | redundancy = 1; 15 | binary = 33131; 16 | } 17 | 18 | box.cfg { 19 | slab_alloc_arena = 0.1; 20 | wal_mode = 'none'; 21 | listen = cfg.binary; 22 | custom_proc_title = "master1"; 23 | } 24 | 25 | require('console').listen(os.getenv('ADMIN')) 26 | 27 | if not box.space.demo then 28 | box.schema.user.create(cfg.login, { password = cfg.password }) 29 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 30 | 31 | local demo = box.schema.create_space('demo') 32 | demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 33 | local demo2 = box.schema.create_space('demo2') 34 | demo2:create_index('pk') 35 | demo2:create_index('sk', {parts = {2, 'unsigned', 3, 'unsigned'}}) 36 | end 37 | 38 | function print_shard_map() 39 | local result = {} 40 | for uri, hb_table in pairs(shard.get_heartbeat()) do 41 | table.insert(result, uri) 42 | for server, data in pairs(hb_table) do 43 | table.insert(result, server) 44 | table.insert(result, data.try) 45 | end 46 | end 47 | return result 48 | end 49 | 50 | -- init shards 51 | fiber.create(function() 52 | shard.init(cfg) 53 | end) 54 | -------------------------------------------------------------------------------- /test/redundancy1/master2.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '1' }; 10 | { uri = 'localhost:33132', zone = '2' }; 11 | }; 12 | login = 'tester'; 13 | password = 'pass'; 14 | redundancy = 1; 15 | binary = 33132; 16 | } 17 | 18 | box.cfg { 19 | slab_alloc_arena = 0.1; 20 | wal_mode = 'none'; 21 | listen = cfg.binary; 22 | custom_proc_title = "master2"; 23 | } 24 | 25 | require('console').listen(os.getenv('ADMIN')) 26 | 27 | if not box.space.demo then 28 | box.schema.user.create(cfg.login, { password = cfg.password }) 29 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 30 | 31 | local demo = box.schema.create_space('demo') 32 | demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 33 | local demo2 = box.schema.create_space('demo2') 34 | demo2:create_index('pk') 35 | demo2:create_index('sk', {parts = {2, 'unsigned', 3, 'unsigned'}}) 36 | end 37 | 38 | function print_shard_map() 39 | local result = {} 40 | for uri, hb_table in pairs(shard.get_heartbeat()) do 41 | table.insert(result, uri) 42 | for server, data in pairs(hb_table) do 43 | table.insert(result, server) 44 | table.insert(result, data.try) 45 | end 46 | end 47 | return result 48 | end 49 | 50 | -- init shards 51 | fiber.create(function() 52 | shard.init(cfg) 53 | end) 54 | 55 | -------------------------------------------------------------------------------- /test/redundancy1/monitoring.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy1/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy1/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | -- monitoring test 27 | shard.wait_table_fill() 28 | --- 29 | ... 30 | shard.is_table_filled() 31 | --- 32 | - true 33 | ... 34 | test_run:cmd("switch master1") 35 | --- 36 | - true 37 | ... 38 | shard.wait_table_fill() 39 | --- 40 | ... 41 | shard.is_table_filled() 42 | --- 43 | - true 44 | ... 45 | test_run:cmd("switch master2") 46 | --- 47 | - true 48 | ... 49 | shard.wait_table_fill() 50 | --- 51 | ... 52 | shard.is_table_filled() 53 | --- 54 | - true 55 | ... 56 | test_run:cmd("switch default") 57 | --- 58 | - true 59 | ... 60 | _ = test_run:cmd("stop server master1") 61 | --- 62 | ... 63 | _ = test_run:cmd("stop server master2") 64 | --- 65 | ... 66 | test_run:cmd("cleanup server master1") 67 | --- 68 | - true 69 | ... 70 | test_run:cmd("cleanup server master2") 71 | --- 72 | - true 73 | ... 74 | test_run:cmd("restart server default with cleanup=1") 75 | -------------------------------------------------------------------------------- /test/redundancy1/monitoring.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy1/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy1/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | -- monitoring test 10 | shard.wait_table_fill() 11 | shard.is_table_filled() 12 | 13 | test_run:cmd("switch master1") 14 | shard.wait_table_fill() 15 | shard.is_table_filled() 16 | 17 | test_run:cmd("switch master2") 18 | shard.wait_table_fill() 19 | shard.is_table_filled() 20 | 21 | test_run:cmd("switch default") 22 | 23 | _ = test_run:cmd("stop server master1") 24 | _ = test_run:cmd("stop server master2") 25 | test_run:cmd("cleanup server master1") 26 | test_run:cmd("cleanup server master2") 27 | test_run:cmd("restart server default with cleanup=1") 28 | -------------------------------------------------------------------------------- /test/redundancy1/node_down.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy1/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy1/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | shard.wait_table_fill() 27 | --- 28 | ... 29 | shard.is_table_filled() 30 | --- 31 | - true 32 | ... 33 | test_run:cmd("switch master1") 34 | --- 35 | - true 36 | ... 37 | shard.wait_table_fill() 38 | --- 39 | ... 40 | shard.is_table_filled() 41 | --- 42 | - true 43 | ... 44 | test_run:cmd("switch master2") 45 | --- 46 | - true 47 | ... 48 | shard.wait_table_fill() 49 | --- 50 | ... 51 | shard.is_table_filled() 52 | --- 53 | - true 54 | ... 55 | test_run:cmd("switch default") 56 | --- 57 | - true 58 | ... 59 | -- Kill server and wait for monitoring fibers kill 60 | _ = test_run:cmd("stop server master1") 61 | --- 62 | ... 63 | -- Check that node is removed from shard 64 | shard.wait_epoch(2) 65 | --- 66 | ... 67 | shard.is_table_filled() 68 | --- 69 | - true 70 | ... 71 | test_run:cmd("switch master2") 72 | --- 73 | - true 74 | ... 75 | shard.wait_epoch(2) 76 | --- 77 | ... 78 | shard.is_table_filled() 79 | --- 80 | - true 81 | ... 82 | test_run:cmd("switch default") 83 | --- 84 | - true 85 | ... 86 | _ = test_run:cmd("stop server master2") 87 | --- 88 | ... 89 | test_run:cmd("cleanup server master1") 90 | --- 91 | - true 92 | ... 93 | test_run:cmd("cleanup server master2") 94 | --- 95 | - true 96 | ... 97 | test_run:cmd("restart server default with cleanup=1") 98 | -------------------------------------------------------------------------------- /test/redundancy1/node_down.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy1/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy1/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | shard.wait_table_fill() 9 | shard.is_table_filled() 10 | 11 | test_run:cmd("switch master1") 12 | shard.wait_table_fill() 13 | shard.is_table_filled() 14 | 15 | test_run:cmd("switch master2") 16 | shard.wait_table_fill() 17 | shard.is_table_filled() 18 | 19 | test_run:cmd("switch default") 20 | 21 | -- Kill server and wait for monitoring fibers kill 22 | _ = test_run:cmd("stop server master1") 23 | 24 | -- Check that node is removed from shard 25 | shard.wait_epoch(2) 26 | shard.is_table_filled() 27 | 28 | test_run:cmd("switch master2") 29 | shard.wait_epoch(2) 30 | shard.is_table_filled() 31 | test_run:cmd("switch default") 32 | 33 | _ = test_run:cmd("stop server master2") 34 | test_run:cmd("cleanup server master1") 35 | test_run:cmd("cleanup server master2") 36 | test_run:cmd("restart server default with cleanup=1") 37 | -------------------------------------------------------------------------------- /test/redundancy1/q_basic.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy1/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy1/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | -- bipahse operations 27 | shard.demo:q_insert(1, {0, 'test'}) 28 | --- 29 | - [0, 'test'] 30 | ... 31 | shard.demo:q_replace(2, {0, 'test2'}) 32 | --- 33 | - [0, 'test2'] 34 | ... 35 | shard.demo:q_update(3, 0, {{'=', 2, 'test3'}}) 36 | --- 37 | ... 38 | shard.demo:q_insert(4, {1, 'test4'}) 39 | --- 40 | - [1, 'test4'] 41 | ... 42 | shard.demo:q_insert(5, {2, 'test_to_delete'}) 43 | --- 44 | - [2, 'test_to_delete'] 45 | ... 46 | shard.demo:q_delete(6, 2) 47 | --- 48 | ... 49 | --# set connection default 50 | shard.wait_operations() 51 | --- 52 | ... 53 | box.space.demo:select() 54 | --- 55 | - [] 56 | ... 57 | test_run:cmd("switch master1") 58 | --- 59 | - true 60 | ... 61 | shard.wait_operations() 62 | --- 63 | ... 64 | box.space.demo:select() 65 | --- 66 | - [] 67 | ... 68 | test_run:cmd("switch master2") 69 | --- 70 | - true 71 | ... 72 | shard.wait_operations() 73 | --- 74 | ... 75 | box.space.demo:select() 76 | --- 77 | - - [0, 'test3'] 78 | - [1, 'test4'] 79 | ... 80 | test_run:cmd("switch default") 81 | --- 82 | - true 83 | ... 84 | -- check for operation q_insert is in shard 85 | shard.demo:check_operation(1, 0) 86 | --- 87 | - true 88 | ... 89 | -- check for not exists operations 90 | shard.demo:check_operation('12345', 0) 91 | --- 92 | - false 93 | ... 94 | _ = test_run:cmd("stop server master1") 95 | --- 96 | ... 97 | _ = test_run:cmd("stop server master2") 98 | --- 99 | ... 100 | test_run:cmd("cleanup server master1") 101 | --- 102 | - true 103 | ... 104 | test_run:cmd("cleanup server master2") 105 | --- 106 | - true 107 | ... 108 | test_run:cmd("restart server default with cleanup=1") 109 | -------------------------------------------------------------------------------- /test/redundancy1/q_basic.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy1/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy1/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | -- bipahse operations 10 | shard.demo:q_insert(1, {0, 'test'}) 11 | shard.demo:q_replace(2, {0, 'test2'}) 12 | shard.demo:q_update(3, 0, {{'=', 2, 'test3'}}) 13 | shard.demo:q_insert(4, {1, 'test4'}) 14 | shard.demo:q_insert(5, {2, 'test_to_delete'}) 15 | shard.demo:q_delete(6, 2) 16 | 17 | --# set connection default 18 | shard.wait_operations() 19 | box.space.demo:select() 20 | test_run:cmd("switch master1") 21 | shard.wait_operations() 22 | box.space.demo:select() 23 | test_run:cmd("switch master2") 24 | shard.wait_operations() 25 | box.space.demo:select() 26 | test_run:cmd("switch default") 27 | 28 | -- check for operation q_insert is in shard 29 | shard.demo:check_operation(1, 0) 30 | -- check for not exists operations 31 | shard.demo:check_operation('12345', 0) 32 | 33 | _ = test_run:cmd("stop server master1") 34 | _ = test_run:cmd("stop server master2") 35 | test_run:cmd("cleanup server master1") 36 | test_run:cmd("cleanup server master2") 37 | test_run:cmd("restart server default with cleanup=1") 38 | -------------------------------------------------------------------------------- /test/redundancy1/shard.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy1/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy1/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | -- num keys 27 | #shard.shard(0) 28 | --- 29 | - 1 30 | ... 31 | -- str keys 32 | #shard.shard('abc') 33 | --- 34 | - 1 35 | ... 36 | _ = test_run:cmd("stop server master1") 37 | --- 38 | ... 39 | _ = test_run:cmd("stop server master2") 40 | --- 41 | ... 42 | test_run:cmd("cleanup server master1") 43 | --- 44 | - true 45 | ... 46 | test_run:cmd("cleanup server master2") 47 | --- 48 | - true 49 | ... 50 | test_run:cmd("restart server default with cleanup=1") 51 | -------------------------------------------------------------------------------- /test/redundancy1/shard.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy1/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy1/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | -- num keys 10 | #shard.shard(0) 11 | 12 | -- str keys 13 | #shard.shard('abc') 14 | 15 | _ = test_run:cmd("stop server master1") 16 | _ = test_run:cmd("stop server master2") 17 | test_run:cmd("cleanup server master1") 18 | test_run:cmd("cleanup server master2") 19 | test_run:cmd("restart server default with cleanup=1") 20 | -------------------------------------------------------------------------------- /test/redundancy1/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = tarantool 3 | description = single node shard tests 4 | script = master.lua 5 | -------------------------------------------------------------------------------- /test/redundancy2/auto_increment.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy2/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy2/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | shard.demo:auto_increment{'test'} 27 | --- 28 | - - [1, 'test'] 29 | - [1, 'test'] 30 | ... 31 | shard.demo:auto_increment{'test2'} 32 | --- 33 | - - [4, 'test2'] 34 | - [4, 'test2'] 35 | ... 36 | shard.demo:auto_increment{'test3'} 37 | --- 38 | - - [7, 'test3'] 39 | - [7, 'test3'] 40 | ... 41 | shard.demo:q_auto_increment(1, {'test4'}) 42 | --- 43 | - [10, 'test4'] 44 | ... 45 | batch = shard.q_begin() 46 | --- 47 | ... 48 | batch.demo:q_auto_increment(2, {'test5'}) 49 | --- 50 | - [13, 'test5'] 51 | ... 52 | batch.demo:q_auto_increment(3, {'test6'}) 53 | --- 54 | - [16, 'test6'] 55 | ... 56 | batch:q_end() 57 | --- 58 | ... 59 | shard.wait_operations() 60 | --- 61 | ... 62 | box.space.demo:select() 63 | --- 64 | - [] 65 | ... 66 | test_run:cmd("switch master1") 67 | --- 68 | - true 69 | ... 70 | shard.wait_operations() 71 | --- 72 | ... 73 | box.space.demo:select() 74 | --- 75 | - - [1, 'test'] 76 | - [4, 'test2'] 77 | - [7, 'test3'] 78 | - [10, 'test4'] 79 | - [13, 'test5'] 80 | - [16, 'test6'] 81 | ... 82 | test_run:cmd("switch master2") 83 | --- 84 | - true 85 | ... 86 | shard.wait_operations() 87 | --- 88 | ... 89 | box.space.demo:select() 90 | --- 91 | - - [1, 'test'] 92 | - [4, 'test2'] 93 | - [7, 'test3'] 94 | - [10, 'test4'] 95 | - [13, 'test5'] 96 | - [16, 'test6'] 97 | ... 98 | box.space._shard_operations:select() 99 | --- 100 | - - ['1', 2, [[512, 'insert', [[10, 'test4']]]]] 101 | - ['3', 2, [[512, 'insert', [[13, 'test5']]], [512, 'insert', [[16, 'test6']]]]] 102 | ... 103 | test_run:cmd("switch default") 104 | --- 105 | - true 106 | ... 107 | _ = test_run:cmd("stop server master1") 108 | --- 109 | ... 110 | _ = test_run:cmd("stop server master2") 111 | --- 112 | ... 113 | test_run:cmd("cleanup server master1") 114 | --- 115 | - true 116 | ... 117 | test_run:cmd("cleanup server master2") 118 | --- 119 | - true 120 | ... 121 | test_run:cmd("restart server default with cleanup=1") 122 | -------------------------------------------------------------------------------- /test/redundancy2/auto_increment.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy2/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy2/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | shard.demo:auto_increment{'test'} 10 | shard.demo:auto_increment{'test2'} 11 | shard.demo:auto_increment{'test3'} 12 | 13 | shard.demo:q_auto_increment(1, {'test4'}) 14 | batch = shard.q_begin() 15 | batch.demo:q_auto_increment(2, {'test5'}) 16 | batch.demo:q_auto_increment(3, {'test6'}) 17 | batch:q_end() 18 | 19 | shard.wait_operations() 20 | box.space.demo:select() 21 | test_run:cmd("switch master1") 22 | shard.wait_operations() 23 | box.space.demo:select() 24 | test_run:cmd("switch master2") 25 | shard.wait_operations() 26 | box.space.demo:select() 27 | 28 | box.space._shard_operations:select() 29 | test_run:cmd("switch default") 30 | 31 | _ = test_run:cmd("stop server master1") 32 | _ = test_run:cmd("stop server master2") 33 | test_run:cmd("cleanup server master1") 34 | test_run:cmd("cleanup server master2") 35 | test_run:cmd("restart server default with cleanup=1") 36 | -------------------------------------------------------------------------------- /test/redundancy2/basic.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy2/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy2/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | shard.demo:insert{1, 'test'} 27 | --- 28 | - - [1, 'test'] 29 | - [1, 'test'] 30 | ... 31 | shard.demo:replace{1, 'test2'} 32 | --- 33 | - - [1, 'test2'] 34 | - [1, 'test2'] 35 | ... 36 | shard.demo:update({1}, {{'=', 2, 'test3'}}) 37 | --- 38 | - - [1, 'test3'] 39 | - [1, 'test3'] 40 | ... 41 | shard.demo:insert{2, 'test4'} 42 | --- 43 | - - [2, 'test4'] 44 | - [2, 'test4'] 45 | ... 46 | shard.demo:insert{3, 'test5'} 47 | --- 48 | - - [3, 'test5'] 49 | - [3, 'test5'] 50 | ... 51 | shard.demo:delete({3}) 52 | --- 53 | - - [3, 'test5'] 54 | - [3, 'test5'] 55 | ... 56 | box.space.demo:select() 57 | --- 58 | - [] 59 | ... 60 | test_run:cmd("switch master1") 61 | --- 62 | - true 63 | ... 64 | box.space.demo:select() 65 | --- 66 | - - [1, 'test3'] 67 | - [2, 'test4'] 68 | ... 69 | test_run:cmd("switch master2") 70 | --- 71 | - true 72 | ... 73 | box.space.demo:select() 74 | --- 75 | - - [1, 'test3'] 76 | - [2, 'test4'] 77 | ... 78 | test_run:cmd("switch default") 79 | --- 80 | - true 81 | ... 82 | _ = test_run:cmd("stop server master1") 83 | --- 84 | ... 85 | _ = test_run:cmd("stop server master2") 86 | --- 87 | ... 88 | test_run:cmd("cleanup server master1") 89 | --- 90 | - true 91 | ... 92 | test_run:cmd("cleanup server master2") 93 | --- 94 | - true 95 | ... 96 | test_run:cmd("restart server default with cleanup=1") 97 | -------------------------------------------------------------------------------- /test/redundancy2/basic.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy2/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy2/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | shard.demo:insert{1, 'test'} 10 | shard.demo:replace{1, 'test2'} 11 | shard.demo:update({1}, {{'=', 2, 'test3'}}) 12 | shard.demo:insert{2, 'test4'} 13 | shard.demo:insert{3, 'test5'} 14 | shard.demo:delete({3}) 15 | 16 | box.space.demo:select() 17 | test_run:cmd("switch master1") 18 | box.space.demo:select() 19 | test_run:cmd("switch master2") 20 | box.space.demo:select() 21 | test_run:cmd("switch default") 22 | 23 | _ = test_run:cmd("stop server master1") 24 | _ = test_run:cmd("stop server master2") 25 | test_run:cmd("cleanup server master1") 26 | test_run:cmd("cleanup server master2") 27 | test_run:cmd("restart server default with cleanup=1") 28 | -------------------------------------------------------------------------------- /test/redundancy2/batch.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy2/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy2/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | -- bipahse operations 27 | batch = shard.q_begin() 28 | --- 29 | ... 30 | batch.demo:q_insert(1, {0, 'test'}) 31 | --- 32 | - [0, 'test'] 33 | ... 34 | batch.demo:q_replace(2, {0, 'test2'}) 35 | --- 36 | - [0, 'test2'] 37 | ... 38 | batch.demo:q_update(3, 0, {{'=', 2, 'test3'}}) 39 | --- 40 | ... 41 | batch.demo:q_insert(4, {1, 'test4'}) 42 | --- 43 | - [1, 'test4'] 44 | ... 45 | batch.demo:q_insert(5, {2, 'test_to_delete'}) 46 | --- 47 | - [2, 'test_to_delete'] 48 | ... 49 | batch.demo:q_delete(6, 2) 50 | --- 51 | ... 52 | batch:q_end() 53 | --- 54 | ... 55 | shard.wait_operations() 56 | --- 57 | ... 58 | box.space.demo:select() 59 | --- 60 | - [] 61 | ... 62 | test_run:cmd("switch master1") 63 | --- 64 | - true 65 | ... 66 | shard.wait_operations() 67 | --- 68 | ... 69 | box.space.demo:select() 70 | --- 71 | - - [0, 'test3'] 72 | - [1, 'test4'] 73 | ... 74 | test_run:cmd("switch master2") 75 | --- 76 | - true 77 | ... 78 | shard.wait_operations() 79 | --- 80 | ... 81 | box.space.demo:select() 82 | --- 83 | - - [0, 'test3'] 84 | - [1, 'test4'] 85 | ... 86 | box.space._shard_operations:select() 87 | --- 88 | - - ['6', 2, [[512, 'insert', [[0, 'test']]], [512, 'replace', [[0, 'test2']]], [ 89 | 512, 'update', [0, [['=', 2, 'test3']]]], [512, 'insert', [[1, 'test4']]], 90 | [512, 'insert', [[2, 'test_to_delete']]], [512, 'delete', [2]]]] 91 | ... 92 | test_run:cmd("switch default") 93 | --- 94 | - true 95 | ... 96 | -- check for operation q_insert is in shard 97 | shard.demo:check_operation(6, 0) 98 | --- 99 | - true 100 | ... 101 | -- check for not exists operations 102 | shard.demo:check_operation('12345', 0) 103 | --- 104 | - false 105 | ... 106 | _ = test_run:cmd("stop server master1") 107 | --- 108 | ... 109 | _ = test_run:cmd("stop server master2") 110 | --- 111 | ... 112 | test_run:cmd("cleanup server master1") 113 | --- 114 | - true 115 | ... 116 | test_run:cmd("cleanup server master2") 117 | --- 118 | - true 119 | ... 120 | test_run:cmd("restart server default with cleanup=1") 121 | -------------------------------------------------------------------------------- /test/redundancy2/batch.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy2/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy2/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | -- bipahse operations 10 | batch = shard.q_begin() 11 | batch.demo:q_insert(1, {0, 'test'}) 12 | batch.demo:q_replace(2, {0, 'test2'}) 13 | batch.demo:q_update(3, 0, {{'=', 2, 'test3'}}) 14 | batch.demo:q_insert(4, {1, 'test4'}) 15 | batch.demo:q_insert(5, {2, 'test_to_delete'}) 16 | batch.demo:q_delete(6, 2) 17 | batch:q_end() 18 | 19 | shard.wait_operations() 20 | box.space.demo:select() 21 | test_run:cmd("switch master1") 22 | shard.wait_operations() 23 | box.space.demo:select() 24 | test_run:cmd("switch master2") 25 | shard.wait_operations() 26 | box.space.demo:select() 27 | 28 | box.space._shard_operations:select() 29 | 30 | test_run:cmd("switch default") 31 | 32 | -- check for operation q_insert is in shard 33 | shard.demo:check_operation(6, 0) 34 | -- check for not exists operations 35 | shard.demo:check_operation('12345', 0) 36 | 37 | _ = test_run:cmd("stop server master1") 38 | _ = test_run:cmd("stop server master2") 39 | test_run:cmd("cleanup server master1") 40 | test_run:cmd("cleanup server master2") 41 | test_run:cmd("restart server default with cleanup=1") 42 | -------------------------------------------------------------------------------- /test/redundancy2/master.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '1' }; 10 | { uri = 'localhost:33132', zone = '2' }; 11 | }; 12 | login = 'tester'; 13 | password = 'pass'; 14 | redundancy = 2; 15 | binary = 33130; 16 | } 17 | 18 | box.cfg { 19 | slab_alloc_arena = 0.1; 20 | wal_mode = 'none'; 21 | listen = cfg.binary; 22 | custom_proc_title = "master" 23 | } 24 | 25 | require('console').listen(os.getenv('ADMIN')) 26 | 27 | if not box.space.demo then 28 | box.schema.user.create(cfg.login, { password = cfg.password }) 29 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 30 | 31 | local demo = box.schema.create_space('demo') 32 | demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 33 | end 34 | 35 | function print_shard_map() 36 | local result = {} 37 | for uri, hb_table in pairs(shard.get_heartbeat()) do 38 | table.insert(result, uri) 39 | for server, data in pairs(hb_table) do 40 | table.insert(result, server) 41 | table.insert(result, data.try) 42 | end 43 | end 44 | return result 45 | end 46 | 47 | -- init shards 48 | fiber.create(function() 49 | shard.init(cfg) 50 | end) 51 | 52 | -------------------------------------------------------------------------------- /test/redundancy2/master1.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '1' }; 10 | { uri = 'localhost:33132', zone = '2' }; 11 | }; 12 | login = 'tester'; 13 | password = 'pass'; 14 | redundancy = 2; 15 | binary = 33131; 16 | } 17 | 18 | box.cfg { 19 | slab_alloc_arena = 0.1; 20 | wal_mode = 'none'; 21 | listen = cfg.binary; 22 | custom_proc_title = "master1"; 23 | } 24 | 25 | require('console').listen(os.getenv('ADMIN')) 26 | 27 | if not box.space.demo then 28 | box.schema.user.create(cfg.login, { password = cfg.password }) 29 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 30 | 31 | local demo = box.schema.create_space('demo') 32 | demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 33 | end 34 | 35 | function print_shard_map() 36 | local result = {} 37 | for uri, hb_table in pairs(shard.get_heartbeat()) do 38 | table.insert(result, uri) 39 | for server, data in pairs(hb_table) do 40 | table.insert(result, server) 41 | table.insert(result, data.try) 42 | end 43 | end 44 | return result 45 | end 46 | 47 | -- init shards 48 | fiber.create(function() 49 | shard.init(cfg) 50 | end) 51 | -------------------------------------------------------------------------------- /test/redundancy2/master2.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '1' }; 10 | { uri = 'localhost:33132', zone = '2' }; 11 | }; 12 | login = 'tester'; 13 | password = 'pass'; 14 | redundancy = 2; 15 | binary = 33132; 16 | } 17 | 18 | box.cfg { 19 | slab_alloc_arena = 0.1; 20 | wal_mode = 'none'; 21 | listen = cfg.binary; 22 | custom_proc_title = "master2"; 23 | } 24 | 25 | require('console').listen(os.getenv('ADMIN')) 26 | 27 | if not box.space.demo then 28 | box.schema.user.create(cfg.login, { password = cfg.password }) 29 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 30 | 31 | local demo = box.schema.create_space('demo') 32 | demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 33 | end 34 | 35 | function print_shard_map() 36 | local result = {} 37 | for uri, hb_table in pairs(shard.get_heartbeat()) do 38 | table.insert(result, uri) 39 | for server, data in pairs(hb_table) do 40 | table.insert(result, server) 41 | table.insert(result, data.try) 42 | end 43 | end 44 | return result 45 | end 46 | 47 | -- init shards 48 | fiber.create(function() 49 | shard.init(cfg) 50 | end) 51 | 52 | -------------------------------------------------------------------------------- /test/redundancy2/monitoring.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy2/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy2/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | -- monitoring test 27 | shard.wait_table_fill() 28 | --- 29 | ... 30 | shard.is_table_filled() 31 | --- 32 | - true 33 | ... 34 | test_run:cmd("switch master1") 35 | --- 36 | - true 37 | ... 38 | shard.wait_table_fill() 39 | --- 40 | ... 41 | shard.is_table_filled() 42 | --- 43 | - true 44 | ... 45 | test_run:cmd("switch master2") 46 | --- 47 | - true 48 | ... 49 | shard.wait_table_fill() 50 | --- 51 | ... 52 | shard.is_table_filled() 53 | --- 54 | - true 55 | ... 56 | test_run:cmd("switch default") 57 | --- 58 | - true 59 | ... 60 | _ = test_run:cmd("stop server master1") 61 | --- 62 | ... 63 | _ = test_run:cmd("stop server master2") 64 | --- 65 | ... 66 | test_run:cmd("cleanup server master1") 67 | --- 68 | - true 69 | ... 70 | test_run:cmd("cleanup server master2") 71 | --- 72 | - true 73 | ... 74 | test_run:cmd("restart server default with cleanup=1") 75 | -------------------------------------------------------------------------------- /test/redundancy2/monitoring.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy2/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy2/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | -- monitoring test 10 | shard.wait_table_fill() 11 | shard.is_table_filled() 12 | 13 | test_run:cmd("switch master1") 14 | shard.wait_table_fill() 15 | shard.is_table_filled() 16 | 17 | test_run:cmd("switch master2") 18 | shard.wait_table_fill() 19 | shard.is_table_filled() 20 | 21 | test_run:cmd("switch default") 22 | 23 | _ = test_run:cmd("stop server master1") 24 | _ = test_run:cmd("stop server master2") 25 | test_run:cmd("cleanup server master1") 26 | test_run:cmd("cleanup server master2") 27 | test_run:cmd("restart server default with cleanup=1") 28 | -------------------------------------------------------------------------------- /test/redundancy2/node_down.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy2/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy2/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | shard.wait_table_fill() 27 | --- 28 | ... 29 | shard.is_table_filled() 30 | --- 31 | - true 32 | ... 33 | test_run:cmd("switch master1") 34 | --- 35 | - true 36 | ... 37 | shard.wait_table_fill() 38 | --- 39 | ... 40 | shard.is_table_filled() 41 | --- 42 | - true 43 | ... 44 | test_run:cmd("switch master2") 45 | --- 46 | - true 47 | ... 48 | shard.wait_table_fill() 49 | --- 50 | ... 51 | shard.is_table_filled() 52 | --- 53 | - true 54 | ... 55 | test_run:cmd("switch default") 56 | --- 57 | - true 58 | ... 59 | -- Kill server and wait for monitoring fibers kill 60 | _ = test_run:cmd("stop server master1") 61 | --- 62 | ... 63 | -- Check that node is removed from shard 64 | shard.wait_epoch(2) 65 | --- 66 | ... 67 | shard.is_table_filled() 68 | --- 69 | - true 70 | ... 71 | test_run:cmd("switch master2") 72 | --- 73 | - true 74 | ... 75 | shard.wait_epoch(2) 76 | --- 77 | ... 78 | shard.is_table_filled() 79 | --- 80 | - true 81 | ... 82 | test_run:cmd("switch default") 83 | --- 84 | - true 85 | ... 86 | _ = test_run:cmd("stop server master2") 87 | --- 88 | ... 89 | test_run:cmd("cleanup server master1") 90 | --- 91 | - true 92 | ... 93 | test_run:cmd("cleanup server master2") 94 | --- 95 | - true 96 | ... 97 | test_run:cmd("restart server default with cleanup=1") 98 | -------------------------------------------------------------------------------- /test/redundancy2/node_down.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy2/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy2/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | shard.wait_table_fill() 10 | shard.is_table_filled() 11 | 12 | test_run:cmd("switch master1") 13 | shard.wait_table_fill() 14 | shard.is_table_filled() 15 | 16 | test_run:cmd("switch master2") 17 | shard.wait_table_fill() 18 | shard.is_table_filled() 19 | 20 | test_run:cmd("switch default") 21 | 22 | -- Kill server and wait for monitoring fibers kill 23 | _ = test_run:cmd("stop server master1") 24 | 25 | 26 | -- Check that node is removed from shard 27 | shard.wait_epoch(2) 28 | shard.is_table_filled() 29 | 30 | test_run:cmd("switch master2") 31 | shard.wait_epoch(2) 32 | shard.is_table_filled() 33 | 34 | test_run:cmd("switch default") 35 | _ = test_run:cmd("stop server master2") 36 | test_run:cmd("cleanup server master1") 37 | test_run:cmd("cleanup server master2") 38 | test_run:cmd("restart server default with cleanup=1") 39 | -------------------------------------------------------------------------------- /test/redundancy2/q_basic.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy2/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy2/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | -- bipahse operations 27 | shard.demo:q_insert(1, {0, 'test'}) 28 | --- 29 | - [0, 'test'] 30 | ... 31 | shard.demo:q_replace(2, {0, 'test2'}) 32 | --- 33 | - [0, 'test2'] 34 | ... 35 | shard.demo:q_update(3, 0, {{'=', 2, 'test3'}}) 36 | --- 37 | ... 38 | shard.demo:q_insert(4, {1, 'test4'}) 39 | --- 40 | - [1, 'test4'] 41 | ... 42 | shard.demo:q_insert(5, {2, 'test_to_delete'}) 43 | --- 44 | - [2, 'test_to_delete'] 45 | ... 46 | shard.demo:q_delete(6, 2) 47 | --- 48 | ... 49 | shard.wait_operations() 50 | --- 51 | ... 52 | box.space.demo:select() 53 | --- 54 | - [] 55 | ... 56 | test_run:cmd("switch master1") 57 | --- 58 | - true 59 | ... 60 | shard.wait_operations() 61 | --- 62 | ... 63 | box.space.demo:select() 64 | --- 65 | - - [0, 'test3'] 66 | - [1, 'test4'] 67 | ... 68 | test_run:cmd("switch master2") 69 | --- 70 | - true 71 | ... 72 | shard.wait_operations() 73 | --- 74 | ... 75 | box.space.demo:select() 76 | --- 77 | - - [0, 'test3'] 78 | - [1, 'test4'] 79 | ... 80 | test_run:cmd("switch default") 81 | --- 82 | - true 83 | ... 84 | -- check for operation q_insert is in shard 85 | shard.demo:check_operation(1, 0) 86 | --- 87 | - true 88 | ... 89 | -- check for not exists operations 90 | shard.demo:check_operation('12345', 0) 91 | --- 92 | - false 93 | ... 94 | _ = test_run:cmd("stop server master1") 95 | --- 96 | ... 97 | _ = test_run:cmd("stop server master2") 98 | --- 99 | ... 100 | test_run:cmd("cleanup server master1") 101 | --- 102 | - true 103 | ... 104 | test_run:cmd("cleanup server master2") 105 | --- 106 | - true 107 | ... 108 | test_run:cmd("restart server default with cleanup=1") 109 | -------------------------------------------------------------------------------- /test/redundancy2/q_basic.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy2/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy2/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | -- bipahse operations 10 | shard.demo:q_insert(1, {0, 'test'}) 11 | shard.demo:q_replace(2, {0, 'test2'}) 12 | shard.demo:q_update(3, 0, {{'=', 2, 'test3'}}) 13 | shard.demo:q_insert(4, {1, 'test4'}) 14 | shard.demo:q_insert(5, {2, 'test_to_delete'}) 15 | shard.demo:q_delete(6, 2) 16 | 17 | shard.wait_operations() 18 | box.space.demo:select() 19 | test_run:cmd("switch master1") 20 | shard.wait_operations() 21 | box.space.demo:select() 22 | test_run:cmd("switch master2") 23 | shard.wait_operations() 24 | box.space.demo:select() 25 | test_run:cmd("switch default") 26 | 27 | -- check for operation q_insert is in shard 28 | shard.demo:check_operation(1, 0) 29 | -- check for not exists operations 30 | shard.demo:check_operation('12345', 0) 31 | 32 | _ = test_run:cmd("stop server master1") 33 | _ = test_run:cmd("stop server master2") 34 | test_run:cmd("cleanup server master1") 35 | test_run:cmd("cleanup server master2") 36 | test_run:cmd("restart server default with cleanup=1") 37 | -------------------------------------------------------------------------------- /test/redundancy2/shard.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy2/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy2/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | -- num keys 27 | #shard.shard(0) 28 | --- 29 | - 2 30 | ... 31 | -- str keys 32 | #shard.shard('abc') 33 | --- 34 | - 2 35 | ... 36 | _ = test_run:cmd("stop server master1") 37 | --- 38 | ... 39 | _ = test_run:cmd("stop server master2") 40 | --- 41 | ... 42 | test_run:cmd("cleanup server master1") 43 | --- 44 | - true 45 | ... 46 | test_run:cmd("cleanup server master2") 47 | --- 48 | - true 49 | ... 50 | test_run:cmd("restart server default with cleanup=1") 51 | -------------------------------------------------------------------------------- /test/redundancy2/shard.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy2/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy2/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | -- num keys 10 | #shard.shard(0) 11 | 12 | -- str keys 13 | #shard.shard('abc') 14 | 15 | _ = test_run:cmd("stop server master1") 16 | _ = test_run:cmd("stop server master2") 17 | test_run:cmd("cleanup server master1") 18 | test_run:cmd("cleanup server master2") 19 | test_run:cmd("restart server default with cleanup=1") 20 | -------------------------------------------------------------------------------- /test/redundancy2/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = tarantool 3 | description = single node shard tests 4 | script = master.lua 5 | -------------------------------------------------------------------------------- /test/redundancy3/auto_increment.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy3/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy3/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | shard.demo:auto_increment{'test'} 27 | --- 28 | - - [1, 'test'] 29 | - [1, 'test'] 30 | - [1, 'test'] 31 | ... 32 | shard.demo:auto_increment{'test2'} 33 | --- 34 | - - [4, 'test2'] 35 | - [4, 'test2'] 36 | - [4, 'test2'] 37 | ... 38 | shard.demo:auto_increment{'test3'} 39 | --- 40 | - - [7, 'test3'] 41 | - [7, 'test3'] 42 | - [7, 'test3'] 43 | ... 44 | shard.demo:q_auto_increment(1, {'test4'}) 45 | --- 46 | - [10, 'test4'] 47 | ... 48 | batch = shard.q_begin() 49 | --- 50 | ... 51 | batch.demo:q_auto_increment(2, {'test5'}) 52 | --- 53 | - [13, 'test5'] 54 | ... 55 | batch.demo:q_auto_increment(3, {'test6'}) 56 | --- 57 | - [16, 'test6'] 58 | ... 59 | batch:q_end() 60 | --- 61 | ... 62 | shard.wait_operations() 63 | --- 64 | ... 65 | box.space.demo:select() 66 | --- 67 | - - [1, 'test'] 68 | - [4, 'test2'] 69 | - [7, 'test3'] 70 | - [10, 'test4'] 71 | - [13, 'test5'] 72 | - [16, 'test6'] 73 | ... 74 | test_run:cmd("switch master1") 75 | --- 76 | - true 77 | ... 78 | shard.wait_operations() 79 | --- 80 | ... 81 | box.space.demo:select() 82 | --- 83 | - - [1, 'test'] 84 | - [4, 'test2'] 85 | - [7, 'test3'] 86 | - [10, 'test4'] 87 | - [13, 'test5'] 88 | - [16, 'test6'] 89 | ... 90 | test_run:cmd("switch master2") 91 | --- 92 | - true 93 | ... 94 | shard.wait_operations() 95 | --- 96 | ... 97 | box.space.demo:select() 98 | --- 99 | - - [1, 'test'] 100 | - [4, 'test2'] 101 | - [7, 'test3'] 102 | - [10, 'test4'] 103 | - [13, 'test5'] 104 | - [16, 'test6'] 105 | ... 106 | test_run:cmd("switch default") 107 | --- 108 | - true 109 | ... 110 | box.space._shard_operations:select() 111 | --- 112 | - - ['1', 2, [[512, 'insert', [[10, 'test4']]]]] 113 | - ['3', 2, [[512, 'insert', [[13, 'test5']]], [512, 'insert', [[16, 'test6']]]]] 114 | ... 115 | _ = test_run:cmd("stop server master1") 116 | --- 117 | ... 118 | _ = test_run:cmd("stop server master2") 119 | --- 120 | ... 121 | test_run:cmd("cleanup server master1") 122 | --- 123 | - true 124 | ... 125 | test_run:cmd("cleanup server master2") 126 | --- 127 | - true 128 | ... 129 | test_run:cmd("restart server default with cleanup=1") 130 | -------------------------------------------------------------------------------- /test/redundancy3/auto_increment.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy3/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy3/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | shard.demo:auto_increment{'test'} 10 | shard.demo:auto_increment{'test2'} 11 | shard.demo:auto_increment{'test3'} 12 | 13 | shard.demo:q_auto_increment(1, {'test4'}) 14 | batch = shard.q_begin() 15 | batch.demo:q_auto_increment(2, {'test5'}) 16 | batch.demo:q_auto_increment(3, {'test6'}) 17 | batch:q_end() 18 | 19 | shard.wait_operations() 20 | box.space.demo:select() 21 | test_run:cmd("switch master1") 22 | shard.wait_operations() 23 | box.space.demo:select() 24 | test_run:cmd("switch master2") 25 | shard.wait_operations() 26 | box.space.demo:select() 27 | test_run:cmd("switch default") 28 | 29 | box.space._shard_operations:select() 30 | 31 | _ = test_run:cmd("stop server master1") 32 | _ = test_run:cmd("stop server master2") 33 | test_run:cmd("cleanup server master1") 34 | test_run:cmd("cleanup server master2") 35 | test_run:cmd("restart server default with cleanup=1") 36 | -------------------------------------------------------------------------------- /test/redundancy3/basic.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy3/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy3/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | shard.demo:insert{1, 'test'} 27 | --- 28 | - - [1, 'test'] 29 | - [1, 'test'] 30 | - [1, 'test'] 31 | ... 32 | shard.demo:replace{1, 'test2'} 33 | --- 34 | - - [1, 'test2'] 35 | - [1, 'test2'] 36 | - [1, 'test2'] 37 | ... 38 | shard.demo:update({1}, {{'=', 2, 'test3'}}) 39 | --- 40 | - - [1, 'test3'] 41 | - [1, 'test3'] 42 | - [1, 'test3'] 43 | ... 44 | shard.demo:insert{2, 'test4'} 45 | --- 46 | - - [2, 'test4'] 47 | - [2, 'test4'] 48 | - [2, 'test4'] 49 | ... 50 | shard.demo:insert{3, 'test5'} 51 | --- 52 | - - [3, 'test5'] 53 | - [3, 'test5'] 54 | - [3, 'test5'] 55 | ... 56 | shard.demo:delete({3}) 57 | --- 58 | - - [3, 'test5'] 59 | - [3, 'test5'] 60 | - [3, 'test5'] 61 | ... 62 | box.space.demo:select() 63 | --- 64 | - - [1, 'test3'] 65 | - [2, 'test4'] 66 | ... 67 | test_run:cmd("switch master1") 68 | --- 69 | - true 70 | ... 71 | box.space.demo:select() 72 | --- 73 | - - [1, 'test3'] 74 | - [2, 'test4'] 75 | ... 76 | test_run:cmd("switch master2") 77 | --- 78 | - true 79 | ... 80 | box.space.demo:select() 81 | --- 82 | - - [1, 'test3'] 83 | - [2, 'test4'] 84 | ... 85 | test_run:cmd("switch default") 86 | --- 87 | - true 88 | ... 89 | _ = test_run:cmd("stop server master1") 90 | --- 91 | ... 92 | _ = test_run:cmd("stop server master2") 93 | --- 94 | ... 95 | test_run:cmd("cleanup server master1") 96 | --- 97 | - true 98 | ... 99 | test_run:cmd("cleanup server master2") 100 | --- 101 | - true 102 | ... 103 | test_run:cmd("restart server default with cleanup=1") 104 | -------------------------------------------------------------------------------- /test/redundancy3/basic.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy3/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy3/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | shard.demo:insert{1, 'test'} 10 | shard.demo:replace{1, 'test2'} 11 | shard.demo:update({1}, {{'=', 2, 'test3'}}) 12 | shard.demo:insert{2, 'test4'} 13 | shard.demo:insert{3, 'test5'} 14 | shard.demo:delete({3}) 15 | 16 | box.space.demo:select() 17 | test_run:cmd("switch master1") 18 | box.space.demo:select() 19 | test_run:cmd("switch master2") 20 | box.space.demo:select() 21 | test_run:cmd("switch default") 22 | 23 | _ = test_run:cmd("stop server master1") 24 | _ = test_run:cmd("stop server master2") 25 | test_run:cmd("cleanup server master1") 26 | test_run:cmd("cleanup server master2") 27 | test_run:cmd("restart server default with cleanup=1") 28 | -------------------------------------------------------------------------------- /test/redundancy3/batch.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy3/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy3/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | -- bipahse operations 27 | batch_obj = shard.q_begin() 28 | --- 29 | ... 30 | batch_obj.demo:q_insert(1, {0, 'test'}) 31 | --- 32 | - [0, 'test'] 33 | ... 34 | batch_obj.demo:q_replace(2, {0, 'test2'}) 35 | --- 36 | - [0, 'test2'] 37 | ... 38 | batch_obj.demo:q_update(3, 0, {{'=', 2, 'test3'}}) 39 | --- 40 | ... 41 | batch_obj.demo:q_insert(4, {1, 'test4'}) 42 | --- 43 | - [1, 'test4'] 44 | ... 45 | batch_obj.demo:q_insert(5, {2, 'test_to_delete'}) 46 | --- 47 | - [2, 'test_to_delete'] 48 | ... 49 | batch_obj.demo:q_delete(6, 2) 50 | --- 51 | ... 52 | batch_obj:q_end() 53 | --- 54 | ... 55 | shard.wait_operations() 56 | --- 57 | ... 58 | box.space.demo:select() 59 | --- 60 | - - [0, 'test3'] 61 | - [1, 'test4'] 62 | ... 63 | test_run:cmd("switch master1") 64 | --- 65 | - true 66 | ... 67 | shard.wait_operations() 68 | --- 69 | ... 70 | box.space.demo:select() 71 | --- 72 | - - [0, 'test3'] 73 | - [1, 'test4'] 74 | ... 75 | test_run:cmd("switch master2") 76 | --- 77 | - true 78 | ... 79 | shard.wait_operations() 80 | --- 81 | ... 82 | box.space.demo:select() 83 | --- 84 | - - [0, 'test3'] 85 | - [1, 'test4'] 86 | ... 87 | test_run:cmd("switch default") 88 | --- 89 | - true 90 | ... 91 | -- check for operation q_insert is in shard 92 | shard.demo:check_operation(6, 0) 93 | --- 94 | - true 95 | ... 96 | -- check for not exists operations 97 | shard.demo:check_operation('12345', 0) 98 | --- 99 | - false 100 | ... 101 | box.space._shard_operations:select() 102 | --- 103 | - - ['6', 2, [[512, 'insert', [[0, 'test']]], [512, 'replace', [[0, 'test2']]], [ 104 | 512, 'update', [0, [['=', 2, 'test3']]]], [512, 'insert', [[1, 'test4']]], 105 | [512, 'insert', [[2, 'test_to_delete']]], [512, 'delete', [2]]]] 106 | ... 107 | _ = test_run:cmd("stop server master1") 108 | --- 109 | ... 110 | _ = test_run:cmd("stop server master2") 111 | --- 112 | ... 113 | test_run:cmd("cleanup server master1") 114 | --- 115 | - true 116 | ... 117 | test_run:cmd("cleanup server master2") 118 | --- 119 | - true 120 | ... 121 | test_run:cmd("restart server default with cleanup=1") 122 | -------------------------------------------------------------------------------- /test/redundancy3/batch.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy3/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy3/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | -- bipahse operations 10 | batch_obj = shard.q_begin() 11 | batch_obj.demo:q_insert(1, {0, 'test'}) 12 | batch_obj.demo:q_replace(2, {0, 'test2'}) 13 | batch_obj.demo:q_update(3, 0, {{'=', 2, 'test3'}}) 14 | batch_obj.demo:q_insert(4, {1, 'test4'}) 15 | batch_obj.demo:q_insert(5, {2, 'test_to_delete'}) 16 | batch_obj.demo:q_delete(6, 2) 17 | batch_obj:q_end() 18 | 19 | shard.wait_operations() 20 | box.space.demo:select() 21 | test_run:cmd("switch master1") 22 | shard.wait_operations() 23 | box.space.demo:select() 24 | test_run:cmd("switch master2") 25 | shard.wait_operations() 26 | box.space.demo:select() 27 | test_run:cmd("switch default") 28 | 29 | -- check for operation q_insert is in shard 30 | shard.demo:check_operation(6, 0) 31 | -- check for not exists operations 32 | shard.demo:check_operation('12345', 0) 33 | 34 | box.space._shard_operations:select() 35 | 36 | _ = test_run:cmd("stop server master1") 37 | _ = test_run:cmd("stop server master2") 38 | test_run:cmd("cleanup server master1") 39 | test_run:cmd("cleanup server master2") 40 | test_run:cmd("restart server default with cleanup=1") 41 | -------------------------------------------------------------------------------- /test/redundancy3/master.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '1' }; 10 | { uri = 'localhost:33132', zone = '2' }; 11 | }; 12 | login = 'tester'; 13 | password = 'pass'; 14 | redundancy = 3; 15 | binary = 33130; 16 | } 17 | 18 | box.cfg { 19 | slab_alloc_arena = 0.1; 20 | wal_mode = 'none'; 21 | listen = cfg.binary; 22 | custom_proc_title = "master" 23 | } 24 | 25 | require('console').listen(os.getenv('ADMIN')) 26 | 27 | if not box.space.demo then 28 | box.schema.user.create(cfg.login, { password = cfg.password }) 29 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 30 | 31 | local demo = box.schema.create_space('demo') 32 | demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 33 | end 34 | 35 | function print_shard_map() 36 | local result = {} 37 | for uri, hb_table in pairs(shard.get_heartbeat()) do 38 | table.insert(result, uri) 39 | for server, data in pairs(hb_table) do 40 | table.insert(result, server) 41 | table.insert(result, data.try) 42 | end 43 | end 44 | return result 45 | end 46 | 47 | -- init shards 48 | fiber.create(function() 49 | shard.init(cfg) 50 | end) 51 | 52 | -------------------------------------------------------------------------------- /test/redundancy3/master1.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '1' }; 10 | { uri = 'localhost:33132', zone = '2' }; 11 | }; 12 | login = 'tester'; 13 | password = 'pass'; 14 | redundancy = 3; 15 | binary = 33131; 16 | } 17 | 18 | box.cfg { 19 | slab_alloc_arena = 0.1; 20 | wal_mode = 'none'; 21 | listen = cfg.binary; 22 | custom_proc_title = "master1"; 23 | } 24 | 25 | require('console').listen(os.getenv('ADMIN')) 26 | 27 | if not box.space.demo then 28 | box.schema.user.create(cfg.login, { password = cfg.password }) 29 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 30 | 31 | local demo = box.schema.create_space('demo') 32 | demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 33 | end 34 | 35 | function print_shard_map() 36 | local result = {} 37 | for uri, hb_table in pairs(shard.get_heartbeat()) do 38 | table.insert(result, uri) 39 | for server, data in pairs(hb_table) do 40 | table.insert(result, server) 41 | table.insert(result, data.try) 42 | end 43 | end 44 | return result 45 | end 46 | 47 | -- init shards 48 | fiber.create(function() 49 | shard.init(cfg) 50 | end) 51 | -------------------------------------------------------------------------------- /test/redundancy3/master2.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | shard = require('shard') 3 | os = require('os') 4 | fiber = require('fiber') 5 | 6 | local cfg = { 7 | servers = { 8 | { uri = 'localhost:33130', zone = '0' }; 9 | { uri = 'localhost:33131', zone = '1' }; 10 | { uri = 'localhost:33132', zone = '2' }; 11 | }; 12 | login = 'tester'; 13 | password = 'pass'; 14 | redundancy = 3; 15 | binary = 33132; 16 | } 17 | 18 | box.cfg { 19 | slab_alloc_arena = 0.1; 20 | wal_mode = 'none'; 21 | listen = cfg.binary; 22 | custom_proc_title = "master2"; 23 | } 24 | 25 | require('console').listen(os.getenv('ADMIN')) 26 | 27 | if not box.space.demo then 28 | box.schema.user.create(cfg.login, { password = cfg.password }) 29 | box.schema.user.grant(cfg.login, 'read,write,execute', 'universe') 30 | 31 | local demo = box.schema.create_space('demo') 32 | demo:create_index('primary', {type = 'tree', parts = {1, 'num'}}) 33 | end 34 | 35 | function print_shard_map() 36 | local result = {} 37 | for uri, hb_table in pairs(shard.get_heartbeat()) do 38 | table.insert(result, uri) 39 | for server, data in pairs(hb_table) do 40 | table.insert(result, server) 41 | table.insert(result, data.try) 42 | end 43 | end 44 | return result 45 | end 46 | 47 | -- init shards 48 | fiber.create(function() 49 | shard.init(cfg) 50 | end) 51 | 52 | -------------------------------------------------------------------------------- /test/redundancy3/monitoring.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy3/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy3/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | -- monitoring test 27 | --shard.wait_epoch(3) 28 | shard.wait_table_fill() 29 | --- 30 | ... 31 | shard.is_table_filled() 32 | --- 33 | - true 34 | ... 35 | test_run:cmd("switch master1") 36 | --- 37 | - true 38 | ... 39 | --shard.wait_epoch(3) 40 | shard.wait_table_fill() 41 | --- 42 | ... 43 | shard.is_table_filled() 44 | --- 45 | - true 46 | ... 47 | test_run:cmd("switch master2") 48 | --- 49 | - true 50 | ... 51 | --shard.wait_epoch(3) 52 | shard.wait_table_fill() 53 | --- 54 | ... 55 | shard.is_table_filled() 56 | --- 57 | - true 58 | ... 59 | test_run:cmd("switch default") 60 | --- 61 | - true 62 | ... 63 | _ = test_run:cmd("stop server master1") 64 | --- 65 | ... 66 | _ = test_run:cmd("stop server master2") 67 | --- 68 | ... 69 | test_run:cmd("cleanup server master1") 70 | --- 71 | - true 72 | ... 73 | test_run:cmd("cleanup server master2") 74 | --- 75 | - true 76 | ... 77 | test_run:cmd("restart server default with cleanup=1") 78 | -------------------------------------------------------------------------------- /test/redundancy3/monitoring.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy3/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy3/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | -- monitoring test 10 | --shard.wait_epoch(3) 11 | shard.wait_table_fill() 12 | shard.is_table_filled() 13 | 14 | test_run:cmd("switch master1") 15 | --shard.wait_epoch(3) 16 | shard.wait_table_fill() 17 | shard.is_table_filled() 18 | 19 | test_run:cmd("switch master2") 20 | --shard.wait_epoch(3) 21 | shard.wait_table_fill() 22 | shard.is_table_filled() 23 | 24 | test_run:cmd("switch default") 25 | 26 | _ = test_run:cmd("stop server master1") 27 | _ = test_run:cmd("stop server master2") 28 | test_run:cmd("cleanup server master1") 29 | test_run:cmd("cleanup server master2") 30 | test_run:cmd("restart server default with cleanup=1") 31 | -------------------------------------------------------------------------------- /test/redundancy3/node_down.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy3/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy3/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | --shard.wait_epoch(3) 27 | shard.wait_table_fill() 28 | --- 29 | ... 30 | shard.is_table_filled() 31 | --- 32 | - true 33 | ... 34 | test_run:cmd("switch master1") 35 | --- 36 | - true 37 | ... 38 | shard.wait_table_fill() 39 | --- 40 | ... 41 | shard.is_table_filled() 42 | --- 43 | - true 44 | ... 45 | test_run:cmd("switch master2") 46 | --- 47 | - true 48 | ... 49 | shard.wait_table_fill() 50 | --- 51 | ... 52 | shard.is_table_filled() 53 | --- 54 | - true 55 | ... 56 | test_run:cmd("switch default") 57 | --- 58 | - true 59 | ... 60 | -- Kill server and wait for monitoring fibers kill 61 | --# stop server master1 62 | _ = test_run:cmd("stop server master1") 63 | --- 64 | ... 65 | -- Check that node is removed from shard 66 | shard.wait_epoch(2) 67 | --- 68 | ... 69 | shard.is_table_filled() 70 | --- 71 | - true 72 | ... 73 | test_run:cmd("switch master2") 74 | --- 75 | - true 76 | ... 77 | shard.wait_epoch(2) 78 | --- 79 | ... 80 | shard.is_table_filled() 81 | --- 82 | - true 83 | ... 84 | test_run:cmd("switch default") 85 | --- 86 | - true 87 | ... 88 | _ = test_run:cmd("stop server master2") 89 | --- 90 | ... 91 | test_run:cmd("cleanup server master1") 92 | --- 93 | - true 94 | ... 95 | test_run:cmd("cleanup server master2") 96 | --- 97 | - true 98 | ... 99 | test_run:cmd("restart server default with cleanup=1") 100 | -------------------------------------------------------------------------------- /test/redundancy3/node_down.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy3/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy3/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | --shard.wait_epoch(3) 9 | shard.wait_table_fill() 10 | shard.is_table_filled() 11 | 12 | test_run:cmd("switch master1") 13 | shard.wait_table_fill() 14 | shard.is_table_filled() 15 | 16 | test_run:cmd("switch master2") 17 | shard.wait_table_fill() 18 | shard.is_table_filled() 19 | 20 | test_run:cmd("switch default") 21 | 22 | -- Kill server and wait for monitoring fibers kill 23 | --# stop server master1 24 | _ = test_run:cmd("stop server master1") 25 | 26 | -- Check that node is removed from shard 27 | shard.wait_epoch(2) 28 | shard.is_table_filled() 29 | 30 | test_run:cmd("switch master2") 31 | shard.wait_epoch(2) 32 | shard.is_table_filled() 33 | 34 | test_run:cmd("switch default") 35 | _ = test_run:cmd("stop server master2") 36 | test_run:cmd("cleanup server master1") 37 | test_run:cmd("cleanup server master2") 38 | test_run:cmd("restart server default with cleanup=1") 39 | -------------------------------------------------------------------------------- /test/redundancy3/q_basic.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy3/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy3/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | -- bipahse operations 27 | shard.demo:q_insert(1, {0, 'test'}) 28 | --- 29 | - [0, 'test'] 30 | ... 31 | shard.demo:q_replace(2, {0, 'test2'}) 32 | --- 33 | - [0, 'test2'] 34 | ... 35 | shard.demo:q_update(3, 0, {{'=', 2, 'test3'}}) 36 | --- 37 | ... 38 | shard.demo:q_insert(4, {1, 'test4'}) 39 | --- 40 | - [1, 'test4'] 41 | ... 42 | shard.demo:q_insert(5, {2, 'test_to_delete'}) 43 | --- 44 | - [2, 'test_to_delete'] 45 | ... 46 | shard.demo:q_delete(6, 2) 47 | --- 48 | ... 49 | shard.wait_operations() 50 | --- 51 | ... 52 | box.space.demo:select() 53 | --- 54 | - - [0, 'test3'] 55 | - [1, 'test4'] 56 | ... 57 | test_run:cmd("switch master1") 58 | --- 59 | - true 60 | ... 61 | shard.wait_operations() 62 | --- 63 | ... 64 | box.space.demo:select() 65 | --- 66 | - - [0, 'test3'] 67 | - [1, 'test4'] 68 | ... 69 | test_run:cmd("switch master2") 70 | --- 71 | - true 72 | ... 73 | shard.wait_operations() 74 | --- 75 | ... 76 | box.space.demo:select() 77 | --- 78 | - - [0, 'test3'] 79 | - [1, 'test4'] 80 | ... 81 | test_run:cmd("switch default") 82 | --- 83 | - true 84 | ... 85 | -- check for operation q_insert is in shard 86 | shard.demo:check_operation(1, 0) 87 | --- 88 | - true 89 | ... 90 | -- check for not exists operations 91 | shard.demo:check_operation('12345', 0) 92 | --- 93 | - false 94 | ... 95 | _ = test_run:cmd("stop server master1") 96 | --- 97 | ... 98 | _ = test_run:cmd("stop server master2") 99 | --- 100 | ... 101 | test_run:cmd("cleanup server master1") 102 | --- 103 | - true 104 | ... 105 | test_run:cmd("cleanup server master2") 106 | --- 107 | - true 108 | ... 109 | test_run:cmd("restart server default with cleanup=1") 110 | -------------------------------------------------------------------------------- /test/redundancy3/q_basic.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy3/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy3/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | -- bipahse operations 10 | shard.demo:q_insert(1, {0, 'test'}) 11 | shard.demo:q_replace(2, {0, 'test2'}) 12 | shard.demo:q_update(3, 0, {{'=', 2, 'test3'}}) 13 | shard.demo:q_insert(4, {1, 'test4'}) 14 | shard.demo:q_insert(5, {2, 'test_to_delete'}) 15 | shard.demo:q_delete(6, 2) 16 | 17 | shard.wait_operations() 18 | box.space.demo:select() 19 | test_run:cmd("switch master1") 20 | shard.wait_operations() 21 | box.space.demo:select() 22 | test_run:cmd("switch master2") 23 | shard.wait_operations() 24 | box.space.demo:select() 25 | test_run:cmd("switch default") 26 | 27 | -- check for operation q_insert is in shard 28 | shard.demo:check_operation(1, 0) 29 | -- check for not exists operations 30 | shard.demo:check_operation('12345', 0) 31 | 32 | _ = test_run:cmd("stop server master1") 33 | _ = test_run:cmd("stop server master2") 34 | test_run:cmd("cleanup server master1") 35 | test_run:cmd("cleanup server master2") 36 | test_run:cmd("restart server default with cleanup=1") 37 | -------------------------------------------------------------------------------- /test/redundancy3/shard.result: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | --- 3 | ... 4 | test_run = env.new() 5 | --- 6 | ... 7 | test_run:cmd("create server master1 with script='redundancy3/master1.lua'") 8 | --- 9 | - true 10 | ... 11 | test_run:cmd("create server master2 with script='redundancy3/master2.lua'") 12 | --- 13 | - true 14 | ... 15 | test_run:cmd("start server master1") 16 | --- 17 | - true 18 | ... 19 | test_run:cmd("start server master2") 20 | --- 21 | - true 22 | ... 23 | shard.wait_connection() 24 | --- 25 | ... 26 | -- num keys 27 | #shard.shard(0) 28 | --- 29 | - 3 30 | ... 31 | -- str keys 32 | #shard.shard('abc') 33 | --- 34 | - 3 35 | ... 36 | _ = test_run:cmd("stop server master1") 37 | --- 38 | ... 39 | _ = test_run:cmd("stop server master2") 40 | --- 41 | ... 42 | test_run:cmd("cleanup server master1") 43 | --- 44 | - true 45 | ... 46 | test_run:cmd("cleanup server master2") 47 | --- 48 | - true 49 | ... 50 | test_run:cmd("restart server default with cleanup=1") 51 | -------------------------------------------------------------------------------- /test/redundancy3/shard.test.lua: -------------------------------------------------------------------------------- 1 | env = require('test_run') 2 | test_run = env.new() 3 | test_run:cmd("create server master1 with script='redundancy3/master1.lua'") 4 | test_run:cmd("create server master2 with script='redundancy3/master2.lua'") 5 | test_run:cmd("start server master1") 6 | test_run:cmd("start server master2") 7 | shard.wait_connection() 8 | 9 | -- num keys 10 | #shard.shard(0) 11 | 12 | -- str keys 13 | #shard.shard('abc') 14 | 15 | _ = test_run:cmd("stop server master1") 16 | _ = test_run:cmd("stop server master2") 17 | test_run:cmd("cleanup server master1") 18 | test_run:cmd("cleanup server master2") 19 | test_run:cmd("restart server default with cleanup=1") 20 | -------------------------------------------------------------------------------- /test/redundancy3/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = tarantool 3 | description = single node shard tests 4 | script = master.lua 5 | -------------------------------------------------------------------------------- /test/test-run.py: -------------------------------------------------------------------------------- 1 | ../test-run/test-run.py --------------------------------------------------------------------------------