├── tmp └── .keep ├── examples └── getting-started-app │ ├── tmp │ └── .keep │ ├── .cartridge.yml │ ├── .luacov │ ├── images │ ├── api-role.png │ ├── failover.png │ ├── storage-role.png │ ├── two-replicasets.png │ └── unconfigured-cluster.png │ ├── .luacheckrc │ ├── test │ ├── helper │ │ ├── unit.lua │ │ └── integration.lua │ ├── helper.lua │ └── unit │ │ └── sample_test.lua │ ├── deps.sh │ ├── cartridge.pre-build │ ├── cartridge.post-build │ ├── .gitignore │ ├── Dockerfile.build.cartridge │ ├── .editorconfig │ ├── getting-started-app-scm-1.rockspec │ ├── Dockerfile.cartridge │ ├── instances.yml │ ├── stateboard.init.lua │ └── init.lua ├── cli ├── create │ ├── templates │ │ └── cartridge │ │ │ ├── tmp │ │ │ └── .keep │ │ │ ├── .luacov │ │ │ ├── .luacheckrc │ │ │ ├── systemd-unit-params.yml │ │ │ ├── failover.yml │ │ │ ├── deps.sh │ │ │ ├── cartridge.pre-build │ │ │ ├── package-deps.txt │ │ │ ├── .cartridge.yml │ │ │ ├── cartridge.post-build │ │ │ ├── .gitignore │ │ │ ├── pack-cache-config.yml │ │ │ ├── test │ │ │ ├── unit │ │ │ │ └── sample_test.lua │ │ │ ├── integration │ │ │ │ └── api_test.lua │ │ │ └── helper.lua │ │ │ ├── Dockerfile.build.cartridge │ │ │ ├── .editorconfig │ │ │ ├── {{ .NameToLower }}-scm-1.rockspec │ │ │ ├── replicasets.yml │ │ │ ├── Dockerfile.cartridge │ │ │ ├── instances.yml │ │ │ ├── app │ │ │ ├── roles │ │ │ │ └── custom.lua │ │ │ └── admin.lua │ │ │ ├── stateboard.init.lua │ │ │ ├── init.lua │ │ │ └── README.md │ └── git.go ├── connect │ ├── lua │ │ ├── eval_func_body.lua │ │ ├── get_suggestions_func_body.lua │ │ └── get_title_func_body.lua │ ├── common.go │ └── connect.go ├── connector │ ├── lua │ │ ├── call_func_template.lua │ │ └── eval_func_template.lua │ ├── plain_text.go │ ├── conn_opts.go │ ├── request.go │ └── binary.go ├── replicasets │ ├── lua │ │ ├── get_cluster_is_healthy_body.lua │ │ ├── edit_instance_body.lua │ │ ├── get_known_vshard_groups_body.lua │ │ ├── bootstrap_vshard_body.lua │ │ ├── get_known_roles_body.lua │ │ ├── get_topology_replicasets_body_template.lua │ │ ├── edit_replicasets_body_template.lua │ │ └── format_topology_replicaset_func_template.lua │ ├── vshard_group.go │ ├── weight_test.go │ ├── bootstrap_vshard.go │ ├── expel_test.go │ ├── roles_test.go │ ├── failover_priority_test.go │ ├── weight.go │ ├── completion.go │ └── save.go ├── admin │ ├── lua │ │ ├── eval_func_get_res_body_template.lua │ │ └── admin_list_func_body_template.lua │ ├── list.go │ ├── admin.go │ └── help.go ├── failover │ ├── lua │ │ ├── get_failover_params_body.lua │ │ └── manage_failover_body.lua │ ├── disable.go │ ├── manage.go │ ├── set_test.go │ ├── validate.go │ ├── setup.go │ └── set.go ├── cluster │ ├── lua │ │ ├── probe_instances_body.lua │ │ └── get_membership_instances_body.lua │ └── cluster.go ├── commands │ ├── const.go │ ├── common_test.go │ ├── version.go │ ├── build.go │ ├── status.go │ ├── stop.go │ ├── clean.go │ ├── connect.go │ ├── log.go │ ├── bench.go │ └── cartridge.go ├── main.go ├── project │ └── common.go ├── rpm │ ├── lead_test.go │ ├── cpio.go │ ├── lead.go │ ├── common.go │ └── signature.go ├── codegen │ └── static │ │ └── utils.go ├── pack │ ├── tgz.go │ ├── tmpfiles_dir.go │ ├── rpm.go │ └── validate.go ├── repair │ ├── advertise_uri.go │ ├── lua │ │ └── reload_clusterwide_config_func_body.lua │ ├── set_leader.go │ └── remove.go ├── build │ ├── post_build.go │ └── local.go ├── docker │ ├── build_test.go │ └── common.go ├── bench │ ├── config.go │ └── types.go ├── common │ ├── crypto.go │ └── lua.go └── running │ └── writer_test.go ├── .flake8 ├── test ├── files │ ├── rundir.cartridge.yml │ ├── init_print_environment.lua │ ├── router_with_eval.lua │ ├── init_no_cartridge.lua │ ├── init_ignore_sigterm.lua │ ├── init_roles_reload_allowed.lua │ └── init_check_passed_params.lua ├── requirements.txt ├── integration │ ├── failover │ │ └── conftest.py │ ├── cli │ │ └── test_completion.py │ ├── replicasets │ │ ├── test_vshard_groups.py │ │ ├── utils.py │ │ ├── test_set_weight.py │ │ └── test_bootstrap_vshard.py │ ├── admin │ │ ├── test_list.py │ │ └── test_help.py │ ├── bench │ │ └── test_bench.py │ └── connect │ │ └── test_enter.py └── make_preparations │ └── test_build_projects.py ├── doc ├── requirements.txt ├── locale │ └── ru │ │ └── LC_MESSAGES │ │ └── doc │ │ ├── index.po │ │ ├── commands │ │ ├── connect.po │ │ ├── pack │ │ │ └── tgz.po │ │ └── enter.po │ │ ├── connect.po │ │ ├── global-flags.po │ │ └── lifecycle.po ├── commands │ ├── pack │ │ └── tgz.rst │ ├── connect.rst │ ├── enter.rst │ ├── status.rst │ ├── stop.rst │ ├── log.rst │ └── build.rst ├── conf.py ├── crowdin.yaml ├── README.md ├── index.rst ├── global-flags.rst ├── lifecycle.rst ├── cleanup.py ├── pre-post-build.rst ├── commands.rst ├── migration-to-tt.rst └── installation.rst ├── pytest.ini ├── .github ├── pull_request_template.md └── workflows │ ├── bump-homebrew-formula.yml │ ├── upload-translations.yml │ ├── pull-translation.yml │ ├── push-translation.yml │ └── release.yml ├── .lichen.yaml ├── AUTHORS ├── .gitignore ├── magefile.common.go ├── Dockerfile ├── Dockerfile.releaser ├── LICENSE ├── .goreleaser.yml └── README.dev.md /tmp/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/getting-started-app/tmp/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/tmp/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length=120 3 | -------------------------------------------------------------------------------- /test/files/rundir.cartridge.yml: -------------------------------------------------------------------------------- 1 | run-dir: rundir 2 | -------------------------------------------------------------------------------- /examples/getting-started-app/.cartridge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | run_dir: 'tmp' 3 | -------------------------------------------------------------------------------- /cli/connect/lua/eval_func_body.lua: -------------------------------------------------------------------------------- 1 | return require('console').eval(...) 2 | -------------------------------------------------------------------------------- /cli/connector/lua/call_func_template.lua: -------------------------------------------------------------------------------- 1 | return {{ .FunctionName }}(...) 2 | -------------------------------------------------------------------------------- /doc/requirements.txt: -------------------------------------------------------------------------------- 1 | Sphinx==4.0.2 2 | sphinx-intl==2.0.1 3 | polib==1.1.1 4 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | testpaths = test/python 3 | addopts = -vvl --durations=10 4 | -------------------------------------------------------------------------------- /cli/replicasets/lua/get_cluster_is_healthy_body.lua: -------------------------------------------------------------------------------- 1 | local cartridge = require('cartridge') 2 | return cartridge.is_healthy() 3 | -------------------------------------------------------------------------------- /cli/admin/lua/eval_func_get_res_body_template.lua: -------------------------------------------------------------------------------- 1 | local res, err = {{ .FuncName }}(...) 2 | assert(err == nil, err) 3 | return res 4 | -------------------------------------------------------------------------------- /cli/failover/lua/get_failover_params_body.lua: -------------------------------------------------------------------------------- 1 | local cartridge = require('cartridge') 2 | return require('cartridge').failover_get_params() 3 | -------------------------------------------------------------------------------- /cli/admin/lua/admin_list_func_body_template.lua: -------------------------------------------------------------------------------- 1 | local func_list, err = {{ .AdminListFuncName }}(...) 2 | assert(err == nil, err) 3 | return func_help 4 | -------------------------------------------------------------------------------- /examples/getting-started-app/.luacov: -------------------------------------------------------------------------------- 1 | statsfile = 'tmp/luacov.stats.out' 2 | reportfile = 'tmp/luacov.report.out' 3 | exclude = { 4 | '/test/', 5 | } 6 | -------------------------------------------------------------------------------- /examples/getting-started-app/images/api-role.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tarantool/cartridge-cli/HEAD/examples/getting-started-app/images/api-role.png -------------------------------------------------------------------------------- /examples/getting-started-app/images/failover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tarantool/cartridge-cli/HEAD/examples/getting-started-app/images/failover.png -------------------------------------------------------------------------------- /cli/create/templates/cartridge/.luacov: -------------------------------------------------------------------------------- 1 | 2 | statsfile = 'tmp/luacov.stats.out' 3 | reportfile = 'tmp/luacov.report.out' 4 | exclude = { 5 | '/test/', 6 | } 7 | -------------------------------------------------------------------------------- /examples/getting-started-app/images/storage-role.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tarantool/cartridge-cli/HEAD/examples/getting-started-app/images/storage-role.png -------------------------------------------------------------------------------- /cli/connect/lua/get_suggestions_func_body.lua: -------------------------------------------------------------------------------- 1 | local last_word, last_word_len = ... 2 | return unpack(require('console').completion_handler(last_word, 0, last_word_len)) 3 | -------------------------------------------------------------------------------- /examples/getting-started-app/.luacheckrc: -------------------------------------------------------------------------------- 1 | include_files = {'**/*.lua', '*.luacheckrc', '*.rockspec'} 2 | exclude_files = {'.rocks/', 'tmp/'} 3 | max_line_length = 120 4 | -------------------------------------------------------------------------------- /examples/getting-started-app/images/two-replicasets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tarantool/cartridge-cli/HEAD/examples/getting-started-app/images/two-replicasets.png -------------------------------------------------------------------------------- /examples/getting-started-app/images/unconfigured-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tarantool/cartridge-cli/HEAD/examples/getting-started-app/images/unconfigured-cluster.png -------------------------------------------------------------------------------- /cli/create/templates/cartridge/.luacheckrc: -------------------------------------------------------------------------------- 1 | include_files = {'**/*.lua', '*.luacheckrc', '*.rockspec'} 2 | exclude_files = {'.rocks/', 'tmp/'} 3 | max_line_length = 120 4 | redefined = false 5 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | What has been done? Why? What problem is being solved? 2 | 3 | I didn't forget about 4 | 5 | - [ ] Tests 6 | - [ ] Changelog 7 | - [ ] Documentation 8 | 9 | Closes #??? 10 | -------------------------------------------------------------------------------- /cli/cluster/lua/probe_instances_body.lua: -------------------------------------------------------------------------------- 1 | local cartridge = require('cartridge') 2 | 3 | local uris = ... 4 | 5 | for _, uri in ipairs(uris) do 6 | local ok, err = cartridge.admin_probe_server(uri) 7 | assert(ok, err) 8 | end 9 | -------------------------------------------------------------------------------- /.lichen.yaml: -------------------------------------------------------------------------------- 1 | allow: 2 | - "MIT" 3 | - "Apache-2.0" 4 | - "BSD-3-Clause" 5 | - "BSD-2-Clause" 6 | - "MPL-2.0" 7 | - "CC-BY-SA-4.0" 8 | override: 9 | - path: "github.com/robfig/config" 10 | licenses: ["MPL-2.0"] 11 | -------------------------------------------------------------------------------- /cli/failover/lua/manage_failover_body.lua: -------------------------------------------------------------------------------- 1 | local cartridge = require('cartridge') 2 | local res, err = require('cartridge').failover_set_params(...) 3 | 4 | if err ~= nil then 5 | return nil, err.err 6 | end 7 | 8 | return res, nil 9 | -------------------------------------------------------------------------------- /test/files/init_print_environment.lua: -------------------------------------------------------------------------------- 1 | require('strict').on() 2 | 3 | local log = require('log') 4 | 5 | log.info(os.getenv('TARANTOOL_CONSOLE_SOCK')) 6 | log.info(os.getenv('TARANTOOL_WORKDIR')) 7 | log.info(os.getenv('TARANTOOL_PID_FILE')) 8 | -------------------------------------------------------------------------------- /test/requirements.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | tarantool 3 | requests 4 | rpmfile==0.1.4 5 | docker 6 | flake8==6.1.0 7 | flake8-unused-arguments==0.0.6 8 | flake8-isort==6.1.0 9 | psutil==5.7.0 10 | pyyaml==6.0.1 11 | tenacity==6.1.0 12 | GitPython==3.1.37 13 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/systemd-unit-params.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Here you can specify parameters for unit files, such as: 3 | 4 | # fd-limit: 65535 # LimitNOFILE for application instance 5 | # stateboard-fd-limit: 65535 # LimitNOFILE for stateboard instance 6 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | # 2 | # Below is complete list of people, who contributed their 3 | # code. 4 | # 5 | # NOTE: If you can commit a change this list, please do not hesitate 6 | # to add your name to it. 7 | # 8 | 9 | Konstantin Nazarov, Elizaveta Dokshina, Oleg Babin 10 | -------------------------------------------------------------------------------- /cli/commands/const.go: -------------------------------------------------------------------------------- 1 | package commands 2 | 3 | import "time" 4 | 5 | // DEFAULT VALUES 6 | const ( 7 | defaultStartTimeout = 1 * time.Minute 8 | defaultLogLines = 15 9 | ) 10 | 11 | // ENV 12 | const ( 13 | cartridgeTmpDirEnv = "CARTRIDGE_TEMPDIR" 14 | ) 15 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/failover.yml: -------------------------------------------------------------------------------- 1 | # Specify here the failover parameters that will 2 | # be used by the `cartridge failover setup` command 3 | 4 | mode: stateful 5 | state_provider: stateboard 6 | stateboard_params: 7 | uri: localhost:4401 8 | password: passwd 9 | -------------------------------------------------------------------------------- /examples/getting-started-app/test/helper/unit.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | 3 | local shared = require('test.helper') 4 | 5 | local helper = {shared = shared} 6 | 7 | t.before_suite(function() box.cfg({work_dir = shared.datadir}) end) 8 | 9 | return helper 10 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/deps.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Call this script to install test dependencies 3 | 4 | set -e 5 | 6 | # Test dependencies: 7 | tarantoolctl rocks install luatest 0.5.6 8 | tarantoolctl rocks install luacov 0.13.0 9 | tarantoolctl rocks install luacheck 0.26.0 10 | -------------------------------------------------------------------------------- /examples/getting-started-app/deps.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Call this script to install test dependencies 3 | 4 | set -e 5 | 6 | # Test dependencies: 7 | tarantoolctl rocks install luatest 0.5.6 8 | tarantoolctl rocks install luacov 0.13.0 9 | tarantoolctl rocks install luacheck 0.26.0 10 | -------------------------------------------------------------------------------- /doc/locale/ru/LC_MESSAGES/doc/index.po: -------------------------------------------------------------------------------- 1 | 2 | msgid "Cartridge Command Line Interface" 3 | msgstr "Cartridge CLI" 4 | 5 | msgid "Control your Tarantool application instances via the command line." 6 | msgstr "" 7 | "Cartridge CLI позволяет управлять экземплярами приложений Tarantool через " 8 | "командную строку." 9 | -------------------------------------------------------------------------------- /cli/replicasets/lua/edit_instance_body.lua: -------------------------------------------------------------------------------- 1 | local cartridge = require('cartridge') 2 | 3 | local servers = ... 4 | 5 | local res, err = cartridge.admin_edit_topology({ 6 | servers = servers, 7 | }) 8 | 9 | if err ~= nil then 10 | err = err.err 11 | end 12 | 13 | assert(err == nil, tostring(err)) 14 | -------------------------------------------------------------------------------- /examples/getting-started-app/cartridge.pre-build: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Simple pre-build script 4 | # Will be ran before `tarantoolctl rocks make` on application build 5 | # Could be useful to install non-standart rocks modules 6 | 7 | # For example: 8 | # tarantoolctl rocks make --chdir ./third_party/my-custom-rock-module 9 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/cartridge.pre-build: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Simple pre-build script 4 | # Will be ran before "tarantoolctl rocks make" on application build 5 | # Could be useful to install non-standart rocks modules 6 | 7 | # For example: 8 | # tarantoolctl rocks make --chdir ./third_party/my-custom-rock-module 9 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/package-deps.txt: -------------------------------------------------------------------------------- 1 | // Empty package dependencies file. 2 | // Used by "pack" command when format is DEB or RPM option is specified. 3 | 4 | // You can specify here dependencies of your package. 5 | // See format of this file in doc: 6 | // https://github.com/tarantool/cartridge-cli#packing-an-application 7 | -------------------------------------------------------------------------------- /examples/getting-started-app/cartridge.post-build: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Simple post-build script 4 | # Will be ran after `tarantoolctl rocks make` on application packing 5 | # Could be useful to remove some build artifacts from result package 6 | 7 | # For example: 8 | # rm -rf third_party 9 | # rm -rf node_modules 10 | # rm -rf doc 11 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/.cartridge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Here you can specify default parameters for local running, such as: 3 | 4 | # cfg: path-to-cfg-file 5 | # log-dir: path-to-log-dir 6 | # run-dir: path-to-run-dir 7 | # data-dir: path-to-data-dir 8 | 9 | stateboard: true # If no arguments are supplied, --stateboard flag is true 10 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/cartridge.post-build: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Simple post-build script 4 | # Will be ran after "tarantoolctl rocks make" on application packing 5 | # Could be useful to remove some build artifacts from result package 6 | 7 | # For example: 8 | # rm -rf third_party 9 | # rm -rf node_modules 10 | # rm -rf doc 11 | -------------------------------------------------------------------------------- /examples/getting-started-app/.gitignore: -------------------------------------------------------------------------------- 1 | .rocks 2 | .swo 3 | .swp 4 | CMakeCache.txt 5 | CMakeFiles 6 | cmake_install.cmake 7 | *.dylib 8 | *.idea 9 | __pycache__ 10 | *pyc 11 | .cache 12 | .pytest_cache 13 | .vagrant 14 | .DS_Store 15 | *.xlog 16 | *.snap 17 | *.rpm 18 | *.deb 19 | *.tar.gz 20 | node_modules 21 | /tmp/* 22 | !/tmp/.keep 23 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/.gitignore: -------------------------------------------------------------------------------- 1 | .rocks 2 | .swo 3 | .swp 4 | CMakeCache.txt 5 | CMakeFiles 6 | cmake_install.cmake 7 | *.dylib 8 | *.idea 9 | __pycache__ 10 | *pyc 11 | .cache 12 | .pytest_cache 13 | .vagrant 14 | .DS_Store 15 | *.xlog 16 | *.snap 17 | *.rpm 18 | *.deb 19 | *.tar.gz 20 | node_modules 21 | /tmp/* 22 | !/tmp/.keep 23 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/pack-cache-config.yml: -------------------------------------------------------------------------------- 1 | # Specify here paths you want to cache when packaging the application with 2 | # `cartridge pack` command. By default, we propose to cache project rocks. 3 | # See https://github.com/tarantool/cartridge-cli#packing-an-application for details. 4 | 5 | - path: '.rocks' 6 | key-path: {{ .Name }}-scm-1.rockspec 7 | -------------------------------------------------------------------------------- /cli/replicasets/lua/get_known_vshard_groups_body.lua: -------------------------------------------------------------------------------- 1 | local vshard_utils = require('cartridge.vshard-utils') 2 | 3 | local known_groups = vshard_utils.get_known_groups() 4 | 5 | local known_groups_names = {} 6 | for group_name in pairs(known_groups) do 7 | table.insert(known_groups_names, group_name) 8 | end 9 | 10 | return unpack(known_groups_names) 11 | -------------------------------------------------------------------------------- /cli/connect/lua/get_title_func_body.lua: -------------------------------------------------------------------------------- 1 | local ok, api_topology = pcall(require, 'cartridge.lua-api.topology') 2 | if not ok then 3 | return '' 4 | end 5 | 6 | local self = api_topology.get_self() 7 | if self.app_name == nil or self.instance_name == nil then 8 | return '' 9 | end 10 | 11 | return string.format('%s.%s', self.app_name, self.instance_name) 12 | -------------------------------------------------------------------------------- /cli/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/apex/log" 5 | 6 | "github.com/tarantool/cartridge-cli/cli/commands" 7 | "github.com/tarantool/cartridge-cli/cli/project" 8 | ) 9 | 10 | func main() { 11 | defer func() { 12 | if r := recover(); r != nil { 13 | log.Fatalf("%s", project.InternalError("Unhandled internal error: %s", r)) 14 | } 15 | }() 16 | 17 | commands.Execute() 18 | } 19 | -------------------------------------------------------------------------------- /cli/replicasets/lua/bootstrap_vshard_body.lua: -------------------------------------------------------------------------------- 1 | local cartridge = require('cartridge') 2 | 3 | local bootstrap_function = cartridge.admin_bootstrap_vshard 4 | if bootstrap_function == nil then 5 | bootstrap_function = require('cartridge.admin').bootstrap_vshard 6 | end 7 | 8 | local ok, err = bootstrap_function() 9 | 10 | if err ~= nil then 11 | err = err.err 12 | end 13 | 14 | assert(ok, tostring(err)) 15 | -------------------------------------------------------------------------------- /doc/commands/pack/tgz.rst: -------------------------------------------------------------------------------- 1 | Packaging an application into a TGZ archive 2 | =========================================== 3 | 4 | ``cartridge pack tgz`` creates a ``.tgz`` archive. 5 | It contains the directory ```` 6 | with the application source code and the ``.rocks`` modules 7 | described in the application's ``.rockspec`` file. 8 | 9 | The resulting artifact name is ``-[.]..tar.gz``. 10 | 11 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/test/unit/sample_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local g = t.group('unit_sample') 3 | 4 | -- create your space here 5 | g.before_all(function(cg) end) -- luacheck: no unused args 6 | 7 | -- drop your space here 8 | g.after_all(function(cg) end) -- luacheck: no unused args 9 | 10 | g.test_sample = function(cg) -- luacheck: no unused args 11 | t.assert_equals(type(box.cfg), 'table') 12 | end 13 | -------------------------------------------------------------------------------- /cli/replicasets/lua/get_known_roles_body.lua: -------------------------------------------------------------------------------- 1 | local cartridge_roles = require('cartridge.roles') 2 | local known_roles = cartridge_roles.get_known_roles() 3 | 4 | local ret = {} 5 | for _, role_name in ipairs(known_roles) do 6 | local role = { 7 | name = role_name, 8 | dependencies = cartridge_roles.get_role_dependencies(role_name), 9 | } 10 | 11 | table.insert(ret, role) 12 | end 13 | 14 | return unpack(ret) 15 | -------------------------------------------------------------------------------- /doc/commands/connect.rst: -------------------------------------------------------------------------------- 1 | Connect to an instance at a specific address 2 | ============================================ 3 | 4 | .. code-block:: bash 5 | 6 | cartridge connect [URI] [flags] 7 | 8 | Specify the instance's address or path to its UNIX socket. 9 | Username and password can be passed as part of the URI 10 | or via the following flags (has greater priority): 11 | 12 | * ``-u, --username`` 13 | * ``-p, --password`` 14 | 15 | -------------------------------------------------------------------------------- /examples/getting-started-app/Dockerfile.build.cartridge: -------------------------------------------------------------------------------- 1 | # Simple Dockerfile 2 | # Used by `pack` command as a base for build image 3 | # when --use-dcoker option is specified 4 | 5 | # The base image must be centos:8 6 | FROM centos:8 7 | 8 | # Here you can install some packages required 9 | # for your application build 10 | 11 | # RUN set -x \ 12 | # && curl -sL https://rpm.nodesource.com/setup_10.x | bash - \ 13 | # && yum -y install nodejs 14 | -------------------------------------------------------------------------------- /cli/connector/lua/eval_func_template.lua: -------------------------------------------------------------------------------- 1 | local function func(...) 2 | {{ .FunctionBody }} 3 | end 4 | local args = require('msgpack').decode(string.fromhex('{{ .ArgsEncoded }}')) 5 | 6 | local ret = { 7 | load( 8 | 'local func, args = ... return func(unpack(args))', 9 | '@eval' 10 | )(func, args) 11 | } 12 | return { 13 | data_enc = require('digest').base64_encode( 14 | require('msgpack').encode(ret) 15 | ) 16 | } 17 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/Dockerfile.build.cartridge: -------------------------------------------------------------------------------- 1 | # Simple Dockerfile 2 | # Used by "pack" command as a base for build image 3 | # when --use-docker option is specified 4 | # 5 | # Image based on centos:7 is expected to be used 6 | FROM centos:7 7 | 8 | # Here you can install some packages required 9 | # for your application build 10 | # 11 | # RUN set -x \ 12 | # && curl -sL https://rpm.nodesource.com/setup_10.x | bash - \ 13 | # && yum -y install nodejs 14 | -------------------------------------------------------------------------------- /cli/project/common.go: -------------------------------------------------------------------------------- 1 | package project 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/apex/log" 7 | ) 8 | 9 | // RemoveTmpPath removes specified path if debug flag isn't set 10 | // If path deletion fails, it warns 11 | func RemoveTmpPath(path string, debug bool) { 12 | if debug { 13 | log.Warnf("%s is not removed due to debug mode", path) 14 | return 15 | } 16 | if err := os.RemoveAll(path); err != nil { 17 | log.Warnf("Failed to remove: %s", err) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /.github/workflows/bump-homebrew-formula.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bump Homebrew formula 3 | 4 | on: 5 | push: 6 | tags: 7 | - '*' 8 | 9 | jobs: 10 | bump-homebrew-formula: 11 | name: Bump Homebrew formula 12 | runs-on: macos-latest 13 | steps: 14 | - name: Homebrew bump formula 15 | uses: dawidd6/action-homebrew-bump-formula@v3.7.1 16 | with: 17 | formula: cartridge-cli 18 | token: ${{ secrets.BUMP_HOMEBREW_FORMULA_TOKEN }} 19 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/.editorconfig: -------------------------------------------------------------------------------- 1 | # top-most EditorConfig file 2 | root = true 3 | 4 | # Unix-style newlines with a newline ending every file 5 | [*] 6 | end_of_line = lf 7 | insert_final_newline = true 8 | 9 | [CMakeLists.txt] 10 | indent_style = space 11 | indent_size = 4 12 | 13 | [*.cmake] 14 | indent_style = space 15 | indent_size = 4 16 | 17 | [*.lua] 18 | indent_style = space 19 | indent_size = 4 20 | 21 | [*.{h,c,cc}] 22 | indent_style = tab 23 | tab_width = 8 24 | -------------------------------------------------------------------------------- /examples/getting-started-app/.editorconfig: -------------------------------------------------------------------------------- 1 | # top-most EditorConfig file 2 | root = true 3 | 4 | # Unix-style newlines with a newline ending every file 5 | [*] 6 | end_of_line = lf 7 | insert_final_newline = true 8 | 9 | [CMakeLists.txt] 10 | indent_style = space 11 | indent_size = 4 12 | 13 | [*.cmake] 14 | indent_style = space 15 | indent_size = 4 16 | 17 | [*.lua] 18 | indent_style = space 19 | indent_size = 4 20 | 21 | [*.{h,c,cc}] 22 | indent_style = tab 23 | tab_width = 8 24 | -------------------------------------------------------------------------------- /examples/getting-started-app/getting-started-app-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = 'getting-started-app' 2 | version = 'scm-1' 3 | source = { 4 | url = '/dev/null', 5 | } 6 | -- Put any modules your app depends on here 7 | dependencies = { 8 | 'tarantool', 9 | 'lua >= 5.1', 10 | 'checks == 3.3.0-1', 11 | 'cartridge == 2.8.4-1', 12 | 'ldecnumber == 1.1.3-1', 13 | 'metrics == 1.0.0-1', 14 | 'cartridge-metrics-role == 0.1.1-1', 15 | } 16 | build = { 17 | type = 'none'; 18 | } 19 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/{{ .NameToLower }}-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = '{{ .Name }}' 2 | version = 'scm-1' 3 | source = { 4 | url = '/dev/null', 5 | } 6 | -- Put any modules your app depends on here 7 | dependencies = { 8 | 'tarantool', 9 | 'lua >= 5.1', 10 | 'checks == 3.3.0-1', 11 | 'cartridge == 2.8.4-1', 12 | 'metrics == 1.0.0-1', 13 | 'cartridge-metrics-role == 0.1.1-1', 14 | 'cartridge-cli-extensions == 1.1.1-1', 15 | } 16 | build = { 17 | type = 'none'; 18 | } 19 | -------------------------------------------------------------------------------- /test/integration/failover/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from utils import run_command_and_get_output 3 | 4 | 5 | # Fixtures 6 | @pytest.fixture(scope="function") 7 | def project_with_topology_and_vshard(cartridge_cmd, default_project_with_instances): 8 | project = default_project_with_instances.project 9 | 10 | cmd = [cartridge_cmd, "replicasets", "setup", "--bootstrap-vshard"] 11 | rc, _ = run_command_and_get_output(cmd, cwd=project.path) 12 | assert rc == 0 13 | 14 | return project 15 | -------------------------------------------------------------------------------- /examples/getting-started-app/Dockerfile.cartridge: -------------------------------------------------------------------------------- 1 | # Simple Dockerfile 2 | # Used by `pack docker` command as a base for runtime image 3 | 4 | # The base image must be centos:8 5 | FROM centos:8 6 | 7 | # Here you can install some packages required 8 | # for your application in runtime 9 | # 10 | # For example, if you need to install some python packages, 11 | # you can do it this way: 12 | # 13 | # COPY requirements.txt /tmp 14 | # RUN yum install -y python3-pip 15 | # RUN pip3 install -r /tmp/requirements.txt 16 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/replicasets.yml: -------------------------------------------------------------------------------- 1 | router: 2 | instances: 3 | - router 4 | roles: 5 | - failover-coordinator 6 | - vshard-router 7 | - app.roles.custom 8 | all_rw: false 9 | s-1: 10 | instances: 11 | - s1-master 12 | - s1-replica 13 | roles: 14 | - vshard-storage 15 | weight: 1 16 | all_rw: false 17 | vshard_group: default 18 | s-2: 19 | instances: 20 | - s2-master 21 | - s2-replica 22 | roles: 23 | - vshard-storage 24 | weight: 1 25 | all_rw: false 26 | vshard_group: default 27 | -------------------------------------------------------------------------------- /cli/rpm/lead_test.go: -------------------------------------------------------------------------------- 1 | package rpm 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestLead(t *testing.T) { 10 | t.Parallel() 11 | 12 | assert := assert.New(t) 13 | 14 | lead := genRpmLead("myapp") 15 | assert.Equal( 16 | "edabeedb0300000000016d796170700000000000000000000000000"+ 17 | "000000000000000000000000000000000000000000000000000"+ 18 | "000000000000000000000000000000000000000000000000010"+ 19 | "00500000000000000000000000000000000", 20 | hex(lead), 21 | ) 22 | } 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | CMakeFiles/ 2 | CMakeCache.txt 3 | Makefile 4 | cmake_*.cmake 5 | install_manifest.txt 6 | *.a 7 | *.cbp 8 | *.d 9 | *.dylib 10 | *.gcno 11 | *.gcda 12 | *.user 13 | *.o 14 | *.reject 15 | *.so 16 | *~ 17 | .gdb_history 18 | Testing 19 | CTestTestfile.cmake 20 | *.snap 21 | *.xlog 22 | .rocks 23 | .cache 24 | .vagrant 25 | __pycache__ 26 | /tmp/* 27 | !/tmp/.keep 28 | .DS_Store 29 | ./packpack 30 | ./build 31 | /build.luarocks 32 | /venv 33 | /cartridge 34 | dist 35 | completion/ 36 | *_gen.go 37 | doc/locale/en/ 38 | doc/output/ 39 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/Dockerfile.cartridge: -------------------------------------------------------------------------------- 1 | # Simple Dockerfile 2 | # Used by "pack docker" command as a base for runtime image 3 | # 4 | # Image based on centos:7 is expected to be used 5 | FROM centos:7 6 | 7 | # Here you can install some packages required 8 | # for your application in runtime 9 | # 10 | # 11 | # For example, if you need to install some python packages, 12 | # you can do it this way: 13 | # 14 | # COPY requirements.txt /tmp 15 | # RUN yum install -y python3-pip 16 | # RUN pip3 install -r /tmp/requirements.txt 17 | -------------------------------------------------------------------------------- /doc/conf.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | sys.path.insert(0, os.path.abspath('')) 5 | 6 | master_doc = 'README' 7 | 8 | source_suffix = '.rst' 9 | 10 | project = u'Cartridge-cli' 11 | 12 | exclude_patterns = [ 13 | 'doc/locale', 14 | 'doc/output', 15 | 'doc/README.md', 16 | 'doc/cleanup.py', 17 | 'doc/requirements.txt', 18 | 'venv', 19 | ] 20 | 21 | language = 'en' 22 | locale_dirs = ['./doc/locale'] 23 | gettext_compact = False 24 | gettext_location = True 25 | gettext_additional_targets = ['literal-block'] 26 | -------------------------------------------------------------------------------- /examples/getting-started-app/test/helper.lua: -------------------------------------------------------------------------------- 1 | -- This file is required automatically by luatest. 2 | -- Add common configuration here. 3 | 4 | local fio = require('fio') 5 | local t = require('luatest') 6 | 7 | local helper = {} 8 | 9 | helper.root = fio.dirname(fio.abspath(package.search('init'))) 10 | helper.datadir = fio.pathjoin(helper.root, 'tmp', 'db_test') 11 | helper.server_command = fio.pathjoin(helper.root, 'init.lua') 12 | 13 | t.before_suite(function() 14 | fio.rmtree(helper.datadir) 15 | fio.mktree(helper.datadir) 16 | end) 17 | 18 | return helper 19 | -------------------------------------------------------------------------------- /test/files/router_with_eval.lua: -------------------------------------------------------------------------------- 1 | local cartridge = require('cartridge') 2 | 3 | local function eval_handler(req) 4 | local resp = req:render({json = { data = loadstring(req:json().eval_string)() }}) 5 | return resp 6 | end 7 | 8 | local function init(opts) -- luacheck: no unused args 9 | local httpd = assert(cartridge.service_get('httpd'), "Failed to get httpd serivce") 10 | httpd:route({method = 'PUT', path = '/eval'}, eval_handler) 11 | 12 | return true 13 | end 14 | 15 | return { 16 | role_name = 'app.roles.custom', 17 | init = init, 18 | } 19 | -------------------------------------------------------------------------------- /cli/cluster/cluster.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/tarantool/cartridge-cli/cli/common" 8 | "github.com/tarantool/cartridge-cli/cli/connector" 9 | ) 10 | 11 | const ( 12 | SimpleOperationTimeout = 10 * time.Second 13 | ) 14 | 15 | func HealthCheckIsNeeded(conn *connector.Conn) (bool, error) { 16 | majorCartridgeVersion, err := common.GetMajorCartridgeVersion(conn) 17 | if err != nil { 18 | return false, fmt.Errorf("Failed to get Cartridge major version: %s", err) 19 | } 20 | 21 | return majorCartridgeVersion < 2, nil 22 | } 23 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/instances.yml: -------------------------------------------------------------------------------- 1 | --- 2 | {{ .Name }}.router: 3 | advertise_uri: localhost:3301 4 | http_port: 8081 5 | 6 | {{ .Name }}.s1-master: 7 | advertise_uri: localhost:3302 8 | http_port: 8082 9 | 10 | {{ .Name }}.s1-replica: 11 | advertise_uri: localhost:3303 12 | http_port: 8083 13 | 14 | {{ .Name }}.s2-master: 15 | advertise_uri: localhost:3304 16 | http_port: 8084 17 | 18 | {{ .Name }}.s2-replica: 19 | advertise_uri: localhost:3305 20 | http_port: 8085 21 | 22 | {{ .StateboardName }}: 23 | listen: localhost:4401 24 | password: passwd 25 | -------------------------------------------------------------------------------- /cli/failover/disable.go: -------------------------------------------------------------------------------- 1 | package failover 2 | 3 | import ( 4 | "github.com/apex/log" 5 | "github.com/tarantool/cartridge-cli/cli/context" 6 | "github.com/tarantool/cartridge-cli/cli/project" 7 | ) 8 | 9 | func Disable(ctx *context.Ctx) error { 10 | if err := project.FillCtx(ctx); err != nil { 11 | return err 12 | } 13 | 14 | failoverOpts, err := getFailoverOpts(ctx) 15 | if err != nil { 16 | return err 17 | } 18 | 19 | if err := failoverOpts.Manage(ctx); err != nil { 20 | return err 21 | } 22 | 23 | log.Infof("Failover disabled successfully") 24 | 25 | return nil 26 | } 27 | -------------------------------------------------------------------------------- /cli/replicasets/lua/get_topology_replicasets_body_template.lua: -------------------------------------------------------------------------------- 1 | local cartridge = require('cartridge') 2 | 3 | {{ .FormatTopologyReplicasetFunc }} 4 | 5 | local topology_replicasets = {} 6 | 7 | local replicasets, err = cartridge.admin_get_replicasets() 8 | 9 | if err ~= nil then 10 | err = err.err 11 | end 12 | 13 | assert(err == nil, tostring(err)) 14 | 15 | for _, replicaset in pairs(replicasets) do 16 | local topology_replicaset = {{ .FormatTopologyReplicasetFuncName }}(replicaset) 17 | table.insert(topology_replicasets, topology_replicaset) 18 | end 19 | 20 | return unpack(topology_replicasets) 21 | -------------------------------------------------------------------------------- /doc/crowdin.yaml: -------------------------------------------------------------------------------- 1 | # https://support.crowdin.com/configuration-file/ 2 | # https://support.crowdin.com/cli-tool-v3/#configuration 3 | 4 | "project_id" : "457588" 5 | "base_path" : "doc/locale" 6 | "base_url": "https://crowdin.com" 7 | "api_token_env": "CROWDIN_PERSONAL_TOKEN" 8 | 9 | 10 | "preserve_hierarchy": true 11 | 12 | files: [ 13 | { 14 | "source" : "/en/**/*.pot", 15 | "translation" : "/%locale_with_underscore%/LC_MESSAGES/**/%file_name%.po", 16 | "update_option" : "update_as_unapproved", 17 | 18 | "languages_mapping" : { 19 | "locale_with_underscore" : { 20 | "ru" : "ru", 21 | } 22 | }, 23 | } 24 | ] 25 | -------------------------------------------------------------------------------- /test/integration/cli/test_completion.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | 4 | 5 | def test_completion(cartridge_cmd, tmpdir): 6 | cmd = [ 7 | cartridge_cmd, "gen", "completion", 8 | ] 9 | 10 | process = subprocess.run(cmd, cwd=tmpdir) 11 | assert process.returncode == 0 12 | 13 | comp_names = [ 14 | "completion/bash/cartridge", 15 | "completion/zsh/_cartridge", 16 | ] 17 | 18 | for comp_name in comp_names: 19 | comp_path = os.path.join(tmpdir, comp_name) 20 | assert os.path.exists(comp_path) 21 | 22 | filemode = os.stat(comp_path).st_mode & 0o777 23 | assert filemode == 0o644 24 | -------------------------------------------------------------------------------- /doc/README.md: -------------------------------------------------------------------------------- 1 | [![Crowdin](https://badges.crowdin.net/tarantool-cartridge-cli/localized.svg)](https://crowdin.com/project/tarantool-cartridge-cli) 2 | 3 | # Tarantool Cartridge-cli documentation 4 | Part of Tarantool documentation, published to 5 | https://www.tarantool.io/en/doc/latest/book/cartridge/cartridge_cli/ 6 | 7 | ### Create pot files from rst 8 | ```bash 9 | python -m sphinx . doc/locale/en -c doc -b gettext 10 | ``` 11 | 12 | ### Create/update po from pot files 13 | ```bash 14 | sphinx-intl update -p doc/locale/en -d doc/locale -l ru 15 | ``` 16 | 17 | ### Build documentation to doc/output 18 | ```bash 19 | python -m sphinx . doc/output -c doc 20 | ``` 21 | -------------------------------------------------------------------------------- /cli/codegen/static/utils.go: -------------------------------------------------------------------------------- 1 | package static 2 | 3 | import ( 4 | "io/ioutil" 5 | "net/http" 6 | 7 | "github.com/apex/log" 8 | ) 9 | 10 | // GetStaticFileContent open file in generated static filesystem 11 | func GetStaticFileContent(fs http.FileSystem, filename string) (string, error) { 12 | file, err := fs.Open(filename) 13 | if err != nil { 14 | log.Errorf("Failed to open static file: %s", err) 15 | return "", err 16 | } 17 | 18 | content, err := ioutil.ReadAll(file) 19 | if err != nil { 20 | log.Errorf("Failed to get static file content: %s", err) 21 | return "", err 22 | } 23 | 24 | defer file.Close() 25 | 26 | return string(content), nil 27 | } 28 | -------------------------------------------------------------------------------- /examples/getting-started-app/test/unit/sample_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local g = t.group('unit_sample') 3 | 4 | local storage_utils = require('app.roles.storage').utils 5 | 6 | require('test.helper.unit') 7 | 8 | g.test_update_balance = function() 9 | t.assert_equals(storage_utils.update_balance("88.95", 0.455), "89.40") 10 | t.assert_equals(storage_utils.update_balance("88.95", 0.455001), "89.41") 11 | 12 | t.assert_equals(storage_utils.update_balance("-18.99", 1.79), "-17.20") 13 | t.assert_equals(storage_utils.update_balance("88.95", 1.79), "90.74") 14 | t.assert_equals(storage_utils.update_balance("0.1", -0.2), "-0.10") 15 | end 16 | -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | Cartridge Command Line Interface 2 | ================================ 3 | 4 | .. important:: 5 | 6 | Cartridge CLI is deprecated in favor of `tt CLI `_. 7 | Find the instructions on switching from Cartridge CLI to ``tt`` in 8 | :doc:`Migration from Cartridge CLI to tt `. 9 | 10 | Control your Tarantool application instances via the command line. 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | :includehidden: 15 | 16 | lifecycle 17 | migration-to-tt 18 | installation 19 | commands 20 | global-flags 21 | instance-paths 22 | pre-post-build 23 | 24 | -------------------------------------------------------------------------------- /examples/getting-started-app/instances.yml: -------------------------------------------------------------------------------- 1 | getting-started-app.router: 2 | workdir: ./tmp/db_dev/3301 3 | advertise_uri: localhost:3301 4 | http_port: 8081 5 | 6 | getting-started-app.s1-master: 7 | workdir: ./tmp/db_dev/3302 8 | advertise_uri: localhost:3302 9 | http_port: 8082 10 | 11 | getting-started-app.s1-replica: 12 | workdir: ./tmp/db_dev/3303 13 | advertise_uri: localhost:3303 14 | http_port: 8083 15 | 16 | getting-started-app.s2-master: 17 | workdir: ./tmp/db_dev/3304 18 | advertise_uri: localhost:3304 19 | http_port: 8084 20 | 21 | getting-started-app.s2-replica: 22 | workdir: ./tmp/db_dev/3305 23 | advertise_uri: localhost:3305 24 | http_port: 8085 25 | -------------------------------------------------------------------------------- /cli/replicasets/lua/edit_replicasets_body_template.lua: -------------------------------------------------------------------------------- 1 | local cartridge = require('cartridge') 2 | 3 | {{ .FormatTopologyReplicasetFunc }} 4 | 5 | local replicasets = ... 6 | 7 | local res, err = cartridge.admin_edit_topology({ 8 | replicasets = replicasets, 9 | }) 10 | 11 | if err ~= nil then 12 | err = err.err 13 | end 14 | 15 | assert(err == nil, tostring(err)) 16 | 17 | local replicasets = res.replicasets 18 | 19 | local topology_replicasets = {} 20 | for _, replicaset in pairs(replicasets) do 21 | local topology_replicaset = {{ .FormatTopologyReplicasetFuncName }}(replicaset) 22 | table.insert(topology_replicasets, topology_replicaset) 23 | end 24 | 25 | return unpack(topology_replicasets) 26 | -------------------------------------------------------------------------------- /cli/cluster/lua/get_membership_instances_body.lua: -------------------------------------------------------------------------------- 1 | local membership = require('membership') 2 | 3 | local instances = {} 4 | 5 | local members = membership.members() 6 | 7 | for uri, member in pairs(members) do 8 | local uuid 9 | if member.payload ~= nil and member.payload.uuid ~= nil then 10 | uuid = member.payload.uuid 11 | end 12 | 13 | local alias 14 | if member.payload ~= nil and member.payload.alias ~= nil then 15 | alias = member.payload.alias 16 | end 17 | 18 | local instance = { 19 | uri = uri, 20 | alias = alias, 21 | uuid = uuid, 22 | status = member.status, 23 | } 24 | 25 | table.insert(instances, instance) 26 | end 27 | 28 | return unpack(instances) 29 | -------------------------------------------------------------------------------- /magefile.common.go: -------------------------------------------------------------------------------- 1 | // +build mage 2 | 3 | package main 4 | 5 | import ( 6 | "fmt" 7 | "io" 8 | "net/http" 9 | "os" 10 | ) 11 | 12 | func downloadFile(url string, dest string) error { 13 | destFile, err := os.Create(dest) 14 | if err != nil { 15 | return fmt.Errorf("Failed to create dest file: %s", err) 16 | } 17 | defer destFile.Close() 18 | 19 | resp, err := http.Get(url) 20 | if err != nil { 21 | return fmt.Errorf("Failed to get: %s", err) 22 | } 23 | defer resp.Body.Close() 24 | 25 | if resp.StatusCode != http.StatusOK { 26 | return fmt.Errorf("Response status isn't OK: %s", resp.Status) 27 | } 28 | 29 | if _, err := io.Copy(destFile, resp.Body); err != nil { 30 | return fmt.Errorf("Failed to write dest file: %s", err) 31 | } 32 | 33 | return nil 34 | } 35 | -------------------------------------------------------------------------------- /doc/locale/ru/LC_MESSAGES/doc/commands/connect.po: -------------------------------------------------------------------------------- 1 | 2 | msgid "Connect to an instance at a specific address" 3 | msgstr "Подключение к экземпляру по определенному адресу" 4 | 5 | msgid "cartridge connect [URI] [flags]" 6 | msgstr "cartridge connect [URI] [параметры]" 7 | 8 | msgid "" 9 | "Specify the instance's address or path to its UNIX socket. Username and " 10 | "password can be passed as part of the URI or via the following flags (has " 11 | "greater priority):" 12 | msgstr "" 13 | "Укажите адрес экземпляра или путь к UNIX-сокету. Вы можете передать имя " 14 | "пользователя и пароль как часть URI или с помощью следующих параметров, " 15 | "приоритет которых выше:" 16 | 17 | msgid "``-u, --username``" 18 | msgstr "``-u, --username``" 19 | 20 | msgid "``-p, --password``" 21 | msgstr "``-p, --password``" 22 | -------------------------------------------------------------------------------- /cli/pack/tgz.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | 7 | "github.com/apex/log" 8 | 9 | "github.com/tarantool/cartridge-cli/cli/common" 10 | "github.com/tarantool/cartridge-cli/cli/context" 11 | ) 12 | 13 | func packTgz(ctx *context.Ctx) error { 14 | var err error 15 | 16 | appDirPath := filepath.Join(ctx.Pack.PackageFilesDir, ctx.Project.Name) 17 | if err := initAppDir(appDirPath, ctx); err != nil { 18 | return err 19 | } 20 | 21 | err = common.RunFunctionWithSpinner(func() error { 22 | return common.WriteTgzArchive(ctx.Pack.PackageFilesDir, ctx.Pack.ResPackagePath) 23 | }, "Creating result TGZ archive...") 24 | if err != nil { 25 | return fmt.Errorf("Failed to create TGZ archive: %s", err) 26 | } 27 | 28 | log.Infof("Created result TGZ archive: %s", ctx.Pack.ResPackagePath) 29 | 30 | return nil 31 | } 32 | -------------------------------------------------------------------------------- /doc/locale/ru/LC_MESSAGES/doc/commands/pack/tgz.po: -------------------------------------------------------------------------------- 1 | 2 | msgid "Packaging an application into a TGZ archive" 3 | msgstr "Упаковка приложения в TGZ-архив" 4 | 5 | msgid "" 6 | "``cartridge pack tgz`` creates a ``.tgz`` archive. It contains the directory" 7 | " ```` with the application source code and the ``.rocks`` modules " 8 | "described in the application's ``.rockspec`` file." 9 | msgstr "" 10 | "Команда ``cartridge pack tgz`` создает архив формата ``.tgz``. В нем будет " 11 | "содержаться директория ```` с исходным кодом приложения и модули " 12 | "``.rocks``, описанные в файле ``.rockspec`` вашего приложения." 13 | 14 | msgid "" 15 | "The resulting artifact name is ``-[.]..tar.gz``." 16 | msgstr "" 17 | "Этот архив получит следующее название: " 18 | "``<имя_приложения>-<версия>[.<суффикс>].<архитектура>.tar.gz``." 19 | -------------------------------------------------------------------------------- /doc/commands/enter.rst: -------------------------------------------------------------------------------- 1 | Enter an instance 2 | ================= 3 | 4 | ``cartridge enter`` allows connecting to an instance started with ``cartridge start``. 5 | The connection uses the instance's console socket placed in ``run-dir``. 6 | 7 | .. code-block:: bash 8 | 9 | cartridge enter [INSTANCE_NAME] [flags] 10 | 11 | Flags 12 | ----- 13 | 14 | .. container:: table 15 | 16 | .. list-table:: 17 | :widths: 20 80 18 | :header-rows: 0 19 | 20 | * - ``--name`` 21 | - Application name. 22 | * - ``--run-dir`` 23 | - The directory where PID and socket files are stored. 24 | Defaults to ``./tmp/run``. 25 | ``run-dir`` is also a section of ``.cartridge.yml``. 26 | Learn more about 27 | :doc:`instance paths `. 28 | 29 | -------------------------------------------------------------------------------- /doc/global-flags.rst: -------------------------------------------------------------------------------- 1 | Global flags 2 | ============ 3 | 4 | All Cartridge CLI commands support these flags: 5 | 6 | .. container:: table 7 | 8 | .. list-table:: 9 | :widths: 20 80 10 | :header-rows: 0 11 | 12 | * - ``--verbose`` 13 | - Run commands with verbose output, 14 | including the output of nested commands like 15 | ``tarantoolctl rocks make`` or ``docker build``. 16 | * - ``--debug`` 17 | - Run command in debug mode -- that is, 18 | with verbose output and without removing temporary files. 19 | Useful for debugging ``cartridge pack``. 20 | * - ``--quiet`` 21 | - Hide command output, only display error messages. 22 | Useful for suppressing the huge output 23 | of ``cartridge pack`` and ``cartridge build``. 24 | 25 | -------------------------------------------------------------------------------- /doc/locale/ru/LC_MESSAGES/doc/connect.po: -------------------------------------------------------------------------------- 1 | 2 | msgid "Connect to running instances" 3 | msgstr "" 4 | 5 | msgid "Enter instance started via ``cartridge start``" 6 | msgstr "" 7 | 8 | msgid "Flags:" 9 | msgstr "" 10 | 11 | msgid "``--name`` - application name" 12 | msgstr "" 13 | 14 | msgid "" 15 | "``--run-dir`` - directory where PID and socket files are stored (defaults to" 16 | " ./tmp/run or \"run-dir\" in .cartridge.yml)" 17 | msgstr "" 18 | 19 | msgid "Connects to instance via it's console socket placed in ``run-dir``." 20 | msgstr "" 21 | 22 | msgid "Connect to instance by specified address" 23 | msgstr "" 24 | 25 | msgid "" 26 | "Instance address or path to UNIX socket can be specified. Username and " 27 | "password can be passed as a part of URI or by flags (has greater priority):" 28 | msgstr "" 29 | 30 | msgid "``-u, --username``" 31 | msgstr "" 32 | 33 | msgid "``-p, --password``" 34 | msgstr "" 35 | -------------------------------------------------------------------------------- /doc/lifecycle.rst: -------------------------------------------------------------------------------- 1 | Cartridge application lifecycle 2 | =============================== 3 | 4 | In a nutshell: 5 | 6 | 1. :doc:`Create an application` 7 | (for example, ``myapp``) from a template: 8 | 9 | .. code-block:: bash 10 | 11 | cartridge create --name myapp 12 | cd ./myapp 13 | 14 | 2. :doc:`Build the application ` 15 | for local development and testing: 16 | 17 | .. code-block:: bash 18 | 19 | cartridge build 20 | 21 | 3. :doc:`Run instances locally `: 22 | 23 | .. code-block:: bash 24 | 25 | cartridge start 26 | cartridge stop 27 | 28 | 4. :doc:`Pack the application ` 29 | into a distributable (like an RPM package): 30 | 31 | .. code-block:: bash 32 | 33 | cartridge pack rpm 34 | 35 | -------------------------------------------------------------------------------- /cli/rpm/cpio.go: -------------------------------------------------------------------------------- 1 | package rpm 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "fmt" 7 | "os" 8 | "os/exec" 9 | "strings" 10 | 11 | "github.com/tarantool/cartridge-cli/cli/context" 12 | ) 13 | 14 | func packCpio(relPaths []string, resFileName string, ctx *context.Ctx) error { 15 | filesBuffer := bytes.Buffer{} 16 | filesBuffer.WriteString(strings.Join(relPaths, "\n")) 17 | 18 | cpioFile, err := os.Create(resFileName) 19 | if err != nil { 20 | return err 21 | } 22 | defer cpioFile.Close() 23 | 24 | cpioFileWriter := bufio.NewWriter(cpioFile) 25 | defer cpioFileWriter.Flush() 26 | 27 | var stderrBuf bytes.Buffer 28 | 29 | cmd := exec.Command("cpio", "-o", "-H", "newc") 30 | cmd.Stdin = &filesBuffer 31 | cmd.Stdout = cpioFileWriter 32 | cmd.Stderr = &stderrBuf 33 | cmd.Dir = ctx.Pack.PackageFilesDir 34 | 35 | if err := cmd.Run(); err != nil { 36 | return fmt.Errorf("Failed to run \n%s\n\nStderr: %s", cmd.String(), stderrBuf.String()) 37 | } 38 | 39 | return nil 40 | } 41 | -------------------------------------------------------------------------------- /cli/repair/advertise_uri.go: -------------------------------------------------------------------------------- 1 | package repair 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/tarantool/cartridge-cli/cli/common" 7 | "github.com/tarantool/cartridge-cli/cli/context" 8 | ) 9 | 10 | func patchConfAdvertiseURI(topologyConf *TopologyConfType, ctx *context.Ctx) ([]common.ResultMessage, error) { 11 | return patchConf(patchInstanceURI, topologyConf, ctx) 12 | } 13 | 14 | func patchInstanceURI(topologyConf *TopologyConfType, ctx *context.Ctx) error { 15 | instanceUUID := ctx.Repair.SetURIInstanceUUID 16 | 17 | instanceConf, ok := topologyConf.Instances[instanceUUID] 18 | if !ok { 19 | return fmt.Errorf("Instance %s isn't found in cluster", instanceUUID) 20 | } 21 | 22 | if instanceConf.IsExpelled { 23 | return fmt.Errorf("Instance %s is expelled", instanceUUID) 24 | } 25 | 26 | if err := topologyConf.SetInstanceURI(instanceUUID, ctx.Repair.NewURI); err != nil { 27 | return fmt.Errorf("Failed to change instance advertise URI: %s", err) 28 | } 29 | 30 | return nil 31 | } 32 | -------------------------------------------------------------------------------- /test/files/init_no_cartridge.lua: -------------------------------------------------------------------------------- 1 | require('strict').on() 2 | 3 | local fiber = require('fiber') 4 | 5 | fiber.create(function() 6 | fiber.sleep(1) 7 | end) 8 | 9 | require('log').info('I am starting...') 10 | 11 | -- Copied from cartridge.cfg to provide support for NOTIFY_SOCKET in old tarantool 12 | local tnt_version = string.split(_TARANTOOL, '.') 13 | local tnt_major = tonumber(tnt_version[1]) 14 | local tnt_minor = tonumber(tnt_version[2]) 15 | if tnt_major < 2 or (tnt_major == 2 and tnt_minor < 2) then 16 | local notify_socket = os.getenv('NOTIFY_SOCKET') 17 | if notify_socket then 18 | local socket = require('socket') 19 | local sock = assert(socket('AF_UNIX', 'SOCK_DGRAM', 0), 'Can not create socket') 20 | sock:sendto('unix/', notify_socket, 'READY=1') 21 | end 22 | end 23 | 24 | local sock_path = os.getenv('TARANTOOL_CONSOLE_SOCK') 25 | if sock_path ~= nil then 26 | local console = require('console') 27 | assert(pcall(console.listen, sock_path)) 28 | end 29 | -------------------------------------------------------------------------------- /cli/pack/tmpfiles_dir.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/apex/log" 7 | "github.com/tarantool/cartridge-cli/cli/context" 8 | "github.com/tarantool/cartridge-cli/cli/templates" 9 | ) 10 | 11 | var ( 12 | tmpFilesTemplate = templates.FileTreeTemplate{ 13 | Dirs: []templates.DirTemplate{ 14 | { 15 | Path: "/usr/lib/tmpfiles.d", 16 | Mode: 0755, 17 | }, 18 | }, 19 | Files: []templates.FileTemplate{ 20 | { 21 | Path: "/usr/lib/tmpfiles.d/{{ .Name }}.conf", 22 | Mode: 0644, 23 | Content: tmpFilesConfContent, 24 | }, 25 | }, 26 | } 27 | ) 28 | 29 | func initTmpfilesDir(baseDirPath string, ctx *context.Ctx) error { 30 | log.Infof("Initialize tmpfiles dir") 31 | 32 | if err := tmpFilesTemplate.Instantiate(baseDirPath, ctx.Project); err != nil { 33 | return fmt.Errorf("Failed to instantiate tmpfiles dir: %s", err) 34 | } 35 | 36 | return nil 37 | } 38 | 39 | const ( 40 | tmpFilesConfContent = `d /var/run/tarantool 0755 tarantool tarantool` 41 | ) 42 | -------------------------------------------------------------------------------- /cli/commands/common_test.go: -------------------------------------------------------------------------------- 1 | package commands 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestGetDuration(t *testing.T) { 12 | t.Parallel() 13 | assert := assert.New(t) 14 | 15 | var duration time.Duration 16 | var err error 17 | 18 | duration, err = getDuration("72h1m0.5s") 19 | assert.Nil(err) 20 | assert.Equal("72h1m0.5s", duration.String()) 21 | 22 | duration, err = getDuration("100") 23 | assert.Nil(err) 24 | assert.Equal("1m40s", duration.String()) 25 | 26 | _, err = getDuration("forever") 27 | assert.NotNil(err) 28 | assert.True(strings.Contains(err.Error(), `invalid duration "forever"`), err.Error()) 29 | 30 | _, err = getDuration("-1") 31 | assert.NotNil(err) 32 | assert.True(strings.Contains(err.Error(), `Negative duration is specified`), err.Error()) 33 | 34 | _, err = getDuration("-10m") 35 | assert.NotNil(err) 36 | assert.True(strings.Contains(err.Error(), `Negative duration is specified`), err.Error()) 37 | } 38 | -------------------------------------------------------------------------------- /cli/commands/version.go: -------------------------------------------------------------------------------- 1 | package commands 2 | 3 | import ( 4 | "github.com/apex/log" 5 | "github.com/spf13/cobra" 6 | "github.com/spf13/pflag" 7 | "github.com/tarantool/cartridge-cli/cli/version" 8 | ) 9 | 10 | var ( 11 | projectPath string 12 | showRocksVersions bool 13 | ) 14 | 15 | func init() { 16 | var versionCmd = &cobra.Command{ 17 | Use: "version", 18 | Short: "Show version information", 19 | Args: cobra.MaximumNArgs(0), 20 | Run: func(cmd *cobra.Command, args []string) { 21 | projectPathIsSet := cmd.Flags().Changed("project-path") 22 | if err := version.PrintVersionString(projectPath, projectPathIsSet, showRocksVersions); err != nil { 23 | log.Fatalf(err.Error()) 24 | } 25 | }, 26 | } 27 | 28 | rootCmd.AddCommand(versionCmd) 29 | addVersionFlags(versionCmd.Flags()) 30 | } 31 | 32 | func addVersionFlags(flagSet *pflag.FlagSet) { 33 | flagSet.BoolVar(&showRocksVersions, "rocks", false, needRocksUsage) 34 | flagSet.StringVar(&projectPath, "project-path", ".", projectPathUsage) 35 | } 36 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/app/roles/custom.lua: -------------------------------------------------------------------------------- 1 | local cartridge = require('cartridge') 2 | 3 | local function init(opts) -- luacheck: no unused args 4 | -- if opts.is_master then 5 | -- end 6 | 7 | local httpd = assert(cartridge.service_get('httpd'), "Failed to get httpd service") 8 | httpd:route({method = 'GET', path = '/hello'}, function() 9 | return {body = 'Hello world!'} 10 | end) 11 | 12 | return true 13 | end 14 | 15 | local function stop() 16 | return true 17 | end 18 | 19 | local function validate_config(conf_new, conf_old) -- luacheck: no unused args 20 | return true 21 | end 22 | 23 | local function apply_config(conf, opts) -- luacheck: no unused args 24 | -- if opts.is_master then 25 | -- end 26 | 27 | return true 28 | end 29 | 30 | return { 31 | role_name = 'app.roles.custom', 32 | init = init, 33 | stop = stop, 34 | validate_config = validate_config, 35 | apply_config = apply_config, 36 | -- dependencies = {'cartridge.roles.vshard-router'}, 37 | } 38 | -------------------------------------------------------------------------------- /examples/getting-started-app/test/helper/integration.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | 3 | local cartridge_helpers = require('cartridge.test-helpers') 4 | local shared = require('test.helper') 5 | 6 | local helper = {shared = shared} 7 | 8 | helper.cluster = cartridge_helpers.Cluster:new({ 9 | server_command = shared.server_command, 10 | datadir = shared.datadir, 11 | use_vshard = true, 12 | replicasets = { 13 | { 14 | alias = 'api', 15 | uuid = cartridge_helpers.uuid('b'), 16 | roles = {'api'}, 17 | servers = {{ instance_uuid = cartridge_helpers.uuid('b', 1) }}, 18 | }, 19 | { 20 | alias = 'storage', 21 | uuid = cartridge_helpers.uuid('a'), 22 | roles = {'storage'}, 23 | servers = {{ instance_uuid = cartridge_helpers.uuid('a', 1) }}, 24 | }, 25 | }, 26 | }) 27 | 28 | t.before_suite(function() helper.cluster:start() end) 29 | t.after_suite(function() helper.cluster:stop() end) 30 | 31 | return helper 32 | -------------------------------------------------------------------------------- /test/integration/replicasets/test_vshard_groups.py: -------------------------------------------------------------------------------- 1 | from integration.replicasets.utils import get_list_from_log_lines 2 | from utils import (get_log_lines, get_vshard_group_names, 3 | run_command_and_get_output) 4 | 5 | 6 | def test_list_groups(cartridge_cmd, project_with_instances): 7 | project = project_with_instances.project 8 | instances = project_with_instances.instances 9 | 10 | router = instances['router'] 11 | admin_api_url = router.get_admin_api_url() 12 | vshard_group_names = get_vshard_group_names(admin_api_url) 13 | 14 | # bootstrap vshard 15 | cmd = [ 16 | cartridge_cmd, 'replicasets', 'list-vshard-groups', 17 | ] 18 | 19 | rc, output = run_command_and_get_output(cmd, cwd=project.path) 20 | assert rc == 0 21 | 22 | log_lines = get_log_lines(output) 23 | 24 | assert log_lines[:1] == [ 25 | '• Available vshard groups:', 26 | ] 27 | 28 | groups_list = get_list_from_log_lines(log_lines[1:]) 29 | assert set(groups_list) == set(vshard_group_names) 30 | -------------------------------------------------------------------------------- /cli/connector/plain_text.go: -------------------------------------------------------------------------------- 1 | package connector 2 | 3 | import ( 4 | "net" 5 | ) 6 | 7 | func initPlainTextConn(conn *Conn, plainTextConn net.Conn) error { 8 | conn.plainText = plainTextConn 9 | 10 | conn.evalFunc = evalPlainText 11 | conn.callFunc = callPlainText 12 | 13 | return nil 14 | } 15 | 16 | func evalPlainText(conn *Conn, funcBody string, args []interface{}, execOpts ExecOpts) ([]interface{}, error) { 17 | evalPlainTextOpts := getEvalPlainTextOpts(execOpts) 18 | return evalPlainTextConn(conn.plainText, funcBody, args, evalPlainTextOpts) 19 | } 20 | 21 | func callPlainText(conn *Conn, funcName string, args []interface{}, execOpts ExecOpts) ([]interface{}, error) { 22 | evalPlainTextOpts := getEvalPlainTextOpts(execOpts) 23 | return callPlainTextConn(conn.plainText, funcName, args, evalPlainTextOpts) 24 | } 25 | 26 | func getEvalPlainTextOpts(execOpts ExecOpts) EvalPlainTextOpts { 27 | return EvalPlainTextOpts{ 28 | PushCallback: execOpts.PushCallback, 29 | ReadTimeout: execOpts.ReadTimeout, 30 | ResData: execOpts.ResData, 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /cli/replicasets/lua/format_topology_replicaset_func_template.lua: -------------------------------------------------------------------------------- 1 | local function {{ .FormatTopologyReplicasetFuncName }}(replicaset) 2 | local instances = {} 3 | for _, server in pairs(replicaset.servers) do 4 | local instance = { 5 | alias = server.alias, 6 | uuid = server.uuid, 7 | uri = server.uri, 8 | zone = server.zone, 9 | } 10 | table.insert(instances, instance) 11 | end 12 | 13 | local leader_uuid 14 | if replicaset.active_master ~= nil then 15 | leader_uuid = replicaset.active_master.uuid 16 | end 17 | 18 | local topology_replicaset = { 19 | uuid = replicaset.uuid, 20 | alias = replicaset.alias, 21 | status = replicaset.status, 22 | roles = replicaset.roles, 23 | all_rw = replicaset.all_rw, 24 | weight = replicaset.weight, 25 | vshard_group = replicaset.vshard_group, 26 | instances = instances, 27 | leader_uuid = leader_uuid, 28 | } 29 | 30 | return topology_replicaset 31 | end 32 | -------------------------------------------------------------------------------- /test/files/init_ignore_sigterm.lua: -------------------------------------------------------------------------------- 1 | local fiber = require('fiber') 2 | fiber.create(function() 3 | fiber.sleep(1) 4 | end) 5 | 6 | require('log').info('I am starting...') 7 | 8 | -- ignore SIGTERM 9 | local ffi = require('ffi') 10 | local SIG_IGN = 1 11 | local SIGTERM = 15 12 | ffi.cdef[[ 13 | void (*signal(int sig, void (*func)(int)))(int); 14 | ]] 15 | local ignore_handler = ffi.cast("void (*)(int)", SIG_IGN) 16 | ffi.C.signal(SIGTERM, ignore_handler) 17 | 18 | -- Copied from cartridge.cfg to provide support for NOTIFY_SOCKET in old tarantool 19 | local tnt_version = string.split(_TARANTOOL, '.') 20 | local tnt_major = tonumber(tnt_version[1]) 21 | local tnt_minor = tonumber(tnt_version[2]) 22 | if tnt_major < 2 or (tnt_major == 2 and tnt_minor < 2) then 23 | local notify_socket = os.getenv('NOTIFY_SOCKET') 24 | if notify_socket then 25 | local socket = require('socket') 26 | local sock = assert(socket('AF_UNIX', 'SOCK_DGRAM', 0), 'Can not create socket') 27 | sock:sendto('unix/', notify_socket, 'READY=1') 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /cli/admin/list.go: -------------------------------------------------------------------------------- 1 | package admin 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/apex/log" 8 | "github.com/tarantool/cartridge-cli/cli/connector" 9 | ) 10 | 11 | func adminFuncList(conn *connector.Conn) error { 12 | funcInfos, err := getListFuncInfos(conn) 13 | if err != nil { 14 | return fmt.Errorf("Failed to get functions list: %s", err) 15 | } 16 | 17 | log.Infof("Available admin functions:\n\n%s", funcInfos.FormatUsages()) 18 | 19 | return nil 20 | } 21 | 22 | func getListFuncInfos(conn *connector.Conn) (*FuncInfos, error) { 23 | funcBody, err := getAdminFuncEvalTypedBody(adminListFuncName) 24 | if err != nil { 25 | return nil, err 26 | } 27 | 28 | req := connector.EvalReq(funcBody).SetReadTimeout(3 * time.Second) 29 | 30 | funcInfosSlice := []FuncInfos{} 31 | if err := conn.ExecTyped(req, &funcInfosSlice); err != nil { 32 | return nil, err 33 | } 34 | 35 | if len(funcInfosSlice) != 1 { 36 | return nil, fmt.Errorf("Function signature received in a bad format") 37 | } 38 | 39 | funcInfos := funcInfosSlice[0] 40 | 41 | return &funcInfos, nil 42 | } 43 | -------------------------------------------------------------------------------- /cli/commands/build.go: -------------------------------------------------------------------------------- 1 | package commands 2 | 3 | import ( 4 | "github.com/apex/log" 5 | "github.com/spf13/cobra" 6 | 7 | "github.com/tarantool/cartridge-cli/cli/build" 8 | ) 9 | 10 | func init() { 11 | var buildCmd = &cobra.Command{ 12 | Use: "build [PATH]", 13 | Short: "Build application for local development", 14 | Long: "Build application in specified PATH (default \".\")", 15 | Args: cobra.MaximumNArgs(1), 16 | Run: func(cmd *cobra.Command, args []string) { 17 | err := runBuildCommand(cmd, args) 18 | if err != nil { 19 | log.Fatalf(err.Error()) 20 | } 21 | }, 22 | } 23 | 24 | rootCmd.AddCommand(buildCmd) 25 | 26 | // FLAGS 27 | configureFlags(buildCmd) 28 | 29 | // path to rockspec to use for build 30 | addSpecFlag(buildCmd) 31 | } 32 | 33 | func runBuildCommand(cmd *cobra.Command, args []string) error { 34 | var err error 35 | 36 | ctx.Project.Path = cmd.Flags().Arg(0) 37 | 38 | err = build.FillCtx(&ctx) 39 | if err != nil { 40 | return err 41 | } 42 | 43 | // build project 44 | err = build.Run(&ctx) 45 | if err != nil { 46 | return err 47 | } 48 | 49 | return nil 50 | } 51 | -------------------------------------------------------------------------------- /cli/pack/rpm.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | 7 | "github.com/tarantool/cartridge-cli/cli/common" 8 | "github.com/tarantool/cartridge-cli/cli/context" 9 | "github.com/tarantool/cartridge-cli/cli/rpm" 10 | 11 | "github.com/apex/log" 12 | ) 13 | 14 | func packRpm(ctx *context.Ctx) error { 15 | var err error 16 | 17 | if err := common.CheckRequiredBinaries("cpio"); err != nil { 18 | return err 19 | } 20 | 21 | appDirPath := filepath.Join(ctx.Pack.PackageFilesDir, ctx.Running.AppDir) 22 | if err := initAppDir(appDirPath, ctx); err != nil { 23 | return err 24 | } 25 | 26 | if err := initSystemdDir(ctx.Pack.PackageFilesDir, ctx); err != nil { 27 | return err 28 | } 29 | 30 | if err := initTmpfilesDir(ctx.Pack.PackageFilesDir, ctx); err != nil { 31 | return err 32 | } 33 | 34 | err = common.RunFunctionWithSpinner(func() error { 35 | return rpm.Pack(ctx) 36 | }, "Creating result RPM package...") 37 | if err != nil { 38 | return fmt.Errorf("Failed to create RPM package: %s", err) 39 | } 40 | 41 | log.Infof("Created result RPM package: %s", ctx.Pack.ResPackagePath) 42 | 43 | return nil 44 | } 45 | -------------------------------------------------------------------------------- /cli/rpm/lead.go: -------------------------------------------------------------------------------- 1 | package rpm 2 | 3 | import ( 4 | "bytes" 5 | ) 6 | 7 | func genRpmLead(name string) *bytes.Buffer { 8 | // The Lead is a legacy structure that used to describe RPM files 9 | // before header sections were introduced. 10 | // 11 | // struct rpmlead { 12 | // unsigned char magic[4]; 13 | // unsigned char major, minor; 14 | // short type; 15 | // short archnum; 16 | // char name[66]; 17 | // short osnum; 18 | // short signature_type; 19 | // char reserved[16]; 20 | // } ; 21 | 22 | var rpmLeadName [66]byte 23 | for i, nameByte := range []uint8(name) { 24 | rpmLeadName[i] = nameByte 25 | } 26 | 27 | rpmLead := packValues( 28 | [4]byte{0xed, 0xab, 0xee, 0xdb}, // magic 29 | uint8(3), // major 30 | uint8(0), // minor 31 | int16(0), // type 32 | int16(1), // archnum 33 | rpmLeadName, // name 34 | int16(1), // osnum 35 | int16(5), // signature_type 36 | [16]int8{}, // reserved 37 | ) 38 | 39 | return rpmLead 40 | } 41 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/test/integration/api_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local g = t.group('integration_api') 3 | 4 | local helper = require('test.helper') 5 | 6 | g.before_all(function(cg) 7 | cg.cluster = helper.cluster 8 | cg.cluster:start() 9 | end) 10 | 11 | g.after_all(function(cg) 12 | helper.stop_cluster(cg.cluster) 13 | end) 14 | 15 | g.before_each(function(cg) -- luacheck: no unused args 16 | -- helper.truncate_space_on_cluster(g.cluster, 'Set your space name here') 17 | end) 18 | 19 | g.test_sample = function(cg) 20 | local server = cg.cluster.main_server 21 | local response = server:http_request('post', '/admin/api', {json = {query = '{ cluster { self { alias } } }'}}) 22 | t.assert_equals(response.json, {data = { cluster = { self = { alias = 'api' } } }}) 23 | t.assert_equals(server.net_box:eval('return box.cfg.memtx_dir'), server.workdir) 24 | end 25 | 26 | g.test_metrics = function(cg) 27 | local server = cg.cluster.main_server 28 | local response = server:http_request('get', '/metrics') 29 | t.assert_equals(response.status, 200) 30 | t.assert_equals(response.reason, "Ok") 31 | end 32 | -------------------------------------------------------------------------------- /cli/replicasets/vshard_group.go: -------------------------------------------------------------------------------- 1 | package replicasets 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/tarantool/cartridge-cli/cli/cluster" 7 | "github.com/tarantool/cartridge-cli/cli/connector" 8 | 9 | "github.com/apex/log" 10 | "github.com/tarantool/cartridge-cli/cli/context" 11 | ) 12 | 13 | func ListVshardGroups(ctx *context.Ctx, args []string) error { 14 | conn, err := cluster.ConnectToSomeRunningInstance(ctx) 15 | if err != nil { 16 | return fmt.Errorf("Failed to connect to Tarantool instance: %s", err) 17 | } 18 | 19 | req := connector.EvalReq(getKnownVshardGroupsBody) 20 | var knownVshardGroups []string 21 | 22 | if err := conn.ExecTyped(req, &knownVshardGroups); err != nil { 23 | return fmt.Errorf("Failed to get known vshard groups: %s", err) 24 | } 25 | 26 | if len(knownVshardGroups) == 0 { 27 | log.Infof( 28 | "No vshard groups available. " + 29 | "It's possible that your application hasn't vshard-router role registered", 30 | ) 31 | } else { 32 | log.Infof("Available vshard groups:") 33 | for _, vshardGroup := range knownVshardGroups { 34 | log.Infof(" %s", vshardGroup) 35 | } 36 | } 37 | 38 | return nil 39 | } 40 | -------------------------------------------------------------------------------- /cli/build/post_build.go: -------------------------------------------------------------------------------- 1 | package build 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | 8 | "github.com/apex/log" 9 | "github.com/tarantool/cartridge-cli/cli/common" 10 | "github.com/tarantool/cartridge-cli/cli/context" 11 | ) 12 | 13 | func PostRun(ctx *context.Ctx) error { 14 | // post-build 15 | postBuildHookPath := filepath.Join(ctx.Build.Dir, postBuildHookName) 16 | 17 | if _, err := os.Stat(postBuildHookPath); err == nil { 18 | log.Infof("Running `%s`", postBuildHookName) 19 | err = common.RunHook(postBuildHookPath, ctx.Cli.Verbose) 20 | if err != nil { 21 | return fmt.Errorf("Failed to run post-build hook: %s", err) 22 | } 23 | } else if !os.IsNotExist(err) { 24 | return fmt.Errorf("Unable to use post-build hook: %s", err) 25 | } 26 | 27 | var buildHooks = []string{ 28 | preBuildHookName, 29 | postBuildHookName, 30 | } 31 | 32 | for _, hook := range buildHooks { 33 | log.Debugf("Remove `%s`", hook) 34 | 35 | hookPath := filepath.Join(ctx.Build.Dir, hook) 36 | if err := os.RemoveAll(hookPath); err != nil { 37 | return fmt.Errorf("Failed to remove %s: %s", hookPath, err) 38 | } 39 | } 40 | 41 | return nil 42 | } 43 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/app/admin.lua: -------------------------------------------------------------------------------- 1 | local cli_admin = require('cartridge-cli-extensions.admin') 2 | 3 | -- register admin function probe to use it with "cartridge admin" 4 | local function init() 5 | cli_admin.init() 6 | 7 | local probe = { 8 | usage = 'Probe instance', 9 | args = { 10 | uri = { 11 | type = 'string', 12 | usage = 'Instance URI', 13 | }, 14 | }, 15 | call = function(opts) 16 | opts = opts or {} 17 | 18 | if opts.uri == nil then 19 | return nil, "Please, pass instance URI via --uri flag" 20 | end 21 | 22 | local cartridge_admin = require('cartridge.admin') 23 | local ok, err = cartridge_admin.probe_server(opts.uri) 24 | 25 | if not ok then 26 | return nil, err.err 27 | end 28 | 29 | return { 30 | string.format('Probe %q: OK', opts.uri), 31 | } 32 | end, 33 | } 34 | 35 | local ok, err = cli_admin.register('probe', probe.usage, probe.args, probe.call) 36 | assert(ok, err) 37 | end 38 | 39 | return {init = init} 40 | -------------------------------------------------------------------------------- /.github/workflows/upload-translations.yml: -------------------------------------------------------------------------------- 1 | name: Update translation sources 2 | 3 | on: 4 | push: 5 | paths: 6 | - 'doc/**/*.rst' 7 | - 'doc/**/*.po' 8 | - '.github/workflows/upload-translations.yml' 9 | branches: 10 | - master 11 | jobs: 12 | autocommit-pot-files: 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | 17 | - name: Checkout 18 | uses: actions/checkout@v2 19 | 20 | - name: Setup Python environment 21 | uses: actions/setup-python@v4 22 | with: 23 | python-version: '3.9' 24 | 25 | - name: Setup Python requirements 26 | run: | 27 | python -m pip install --upgrade pip 28 | pip install -r doc/requirements.txt 29 | 30 | - name: Build pot files 31 | run: python -m sphinx . doc/locale/en -c doc -b gettext 32 | 33 | - name: Push Pot-files to crowdin 34 | uses: crowdin/github-action@1.1.0 35 | with: 36 | config: 'doc/crowdin.yaml' 37 | upload_sources: true 38 | upload_translations: true 39 | import_eq_suggestions: true 40 | env: 41 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 42 | CROWDIN_PERSONAL_TOKEN: ${{secrets.CROWDIN_PERSONAL_TOKEN}} 43 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:7 2 | 3 | ARG GOVERSION=1.17 4 | 5 | RUN yum -y update 6 | RUN yum install -y git gcc make cmake unzip python3-pip python3-devel 7 | 8 | RUN yum install -y yum-utils device-mapper-persistent-data lvm2 9 | RUN yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo 10 | RUN yum install -y docker-ce-18.09.1 11 | 12 | RUN git config --global user.email "test@tarantool.io" \ 13 | && git config --global user.name "Test Tarantool" 14 | 15 | VOLUME /var/lib/docker 16 | 17 | ENV PORT=2375 18 | 19 | ADD wrapdocker /usr/local/bin/wrapdocker 20 | RUN chmod +x /usr/local/bin/wrapdocker 21 | 22 | RUN curl -O -L https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz \ 23 | && tar -C /usr/local -xzf /go${GOVERSION}.linux-amd64.tar.gz 24 | 25 | ENV PATH=${PATH}:/usr/local/go/bin 26 | ENV GOPATH=/home/go 27 | ENV PATH=$PATH:${GOPATH}/bin 28 | 29 | RUN go get -u -d github.com/magefile/mage \ 30 | && cd $GOPATH/src/github.com/magefile/mage \ 31 | && go run bootstrap.go 32 | 33 | COPY test/requirements.txt /tmp/test/ 34 | RUN pip3 install --user -r /tmp/test/requirements.txt 35 | 36 | EXPOSE 2375 37 | 38 | ENTRYPOINT [ "/usr/local/bin/wrapdocker" ] 39 | 40 | CMD ["/bin/bash" , "-l"] 41 | -------------------------------------------------------------------------------- /cli/commands/status.go: -------------------------------------------------------------------------------- 1 | package commands 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/apex/log" 7 | "github.com/spf13/cobra" 8 | 9 | "github.com/tarantool/cartridge-cli/cli/running" 10 | ) 11 | 12 | func init() { 13 | var statusCmd = &cobra.Command{ 14 | Use: "status [INSTANCE_NAME...]", 15 | Short: "Get instance(s) status", 16 | Long: fmt.Sprintf("Get instance(s) status\n\n%s", runningCommonUsage), 17 | Run: func(cmd *cobra.Command, args []string) { 18 | err := runStatusCmd(cmd, args) 19 | if err != nil { 20 | log.Fatalf(err.Error()) 21 | } 22 | }, 23 | ValidArgsFunction: ShellCompRunningInstances, 24 | } 25 | 26 | rootCmd.AddCommand(statusCmd) 27 | 28 | // FLAGS 29 | configureFlags(statusCmd) 30 | 31 | // application name flag 32 | addNameFlag(statusCmd) 33 | 34 | // stateboard flags 35 | addStateboardRunningFlags(statusCmd) 36 | 37 | // common running paths 38 | addCommonRunningPathsFlags(statusCmd) 39 | } 40 | 41 | func runStatusCmd(cmd *cobra.Command, args []string) error { 42 | setStateboardFlagIsSet(cmd) 43 | 44 | if err := running.FillCtx(&ctx, args); err != nil { 45 | return err 46 | } 47 | 48 | if err := running.Status(&ctx); err != nil { 49 | return err 50 | } 51 | 52 | return nil 53 | } 54 | -------------------------------------------------------------------------------- /cli/docker/build_test.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | type testReadCloser struct { 13 | data bytes.Buffer 14 | } 15 | 16 | func (r *testReadCloser) Put(data string) { 17 | r.data.WriteString(data) 18 | } 19 | 20 | func (r *testReadCloser) Clear() { 21 | r.data.Reset() 22 | } 23 | 24 | func (r *testReadCloser) Read(p []byte) (int, error) { 25 | return r.data.Read(p) 26 | } 27 | 28 | func (r *testReadCloser) Close() error { 29 | return nil 30 | } 31 | 32 | func TestPrintBuildOutput(t *testing.T) { 33 | t.Parallel() 34 | 35 | readerSize = 30 36 | assert := assert.New(t) 37 | 38 | r := testReadCloser{} 39 | outBuf := bytes.NewBuffer(nil) 40 | 41 | // put valid JSON data 42 | r.Clear() 43 | outBuf.Reset() 44 | r.Put(`{"stream":"I am stream"}`) 45 | 46 | assert.Nil(printBuildOutput(outBuf, &r)) 47 | assert.Equal("I am stream", outBuf.String()) 48 | 49 | // put long data 50 | r.Clear() 51 | outBuf.Reset() 52 | longString := strings.Repeat("a", readerSize+1) 53 | r.Put(fmt.Sprintf(`{"stream":"%s"}`, longString)) 54 | 55 | assert.Nil(printBuildOutput(outBuf, &r)) 56 | assert.Equal(longString, outBuf.String()) 57 | } 58 | -------------------------------------------------------------------------------- /cli/replicasets/weight_test.go: -------------------------------------------------------------------------------- 1 | package replicasets 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestGetSetWeightEditReplicasetOpts(t *testing.T) { 10 | assert := assert.New(t) 11 | 12 | var err error 13 | var oldWeight float64 14 | var newWeight float64 15 | var opts *EditReplicasetOpts 16 | 17 | oldWeight = 1 18 | 19 | topologyReplicaset := &TopologyReplicaset{ 20 | UUID: "replicaset-uuid", 21 | Alias: "replicaset-alias", 22 | Status: "healthy", 23 | Roles: []string{"some-role", "other-role"}, 24 | Weight: &oldWeight, 25 | Instances: TopologyInstances{ 26 | &TopologyInstance{ 27 | Alias: "instance-1", 28 | UUID: "uuid-1", 29 | }, 30 | }, 31 | } 32 | 33 | // int 34 | newWeight = 111 35 | 36 | opts, err = getSetWeightEditReplicasetOpts(newWeight, topologyReplicaset) 37 | assert.Nil(err) 38 | assert.Equal(topologyReplicaset.UUID, opts.ReplicasetUUID) 39 | assert.Equal(newWeight, *opts.Weight) 40 | 41 | // float 42 | newWeight = 111.123 43 | 44 | opts, err = getSetWeightEditReplicasetOpts(newWeight, topologyReplicaset) 45 | assert.Nil(err) 46 | assert.Equal(topologyReplicaset.UUID, opts.ReplicasetUUID) 47 | assert.Equal(newWeight, *opts.Weight) 48 | } 49 | -------------------------------------------------------------------------------- /cli/replicasets/bootstrap_vshard.go: -------------------------------------------------------------------------------- 1 | package replicasets 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/apex/log" 8 | "github.com/tarantool/cartridge-cli/cli/cluster" 9 | "github.com/tarantool/cartridge-cli/cli/connector" 10 | "github.com/tarantool/cartridge-cli/cli/context" 11 | ) 12 | 13 | func BootstrapVshard(ctx *context.Ctx, args []string) error { 14 | conn, err := cluster.ConnectToSomeJoinedInstance(ctx) 15 | if err != nil { 16 | return err 17 | } 18 | 19 | if err := bootstrapVshard(conn); err != nil { 20 | return fmt.Errorf("failed to bootstrap vshard: %s", err) 21 | } 22 | 23 | log.Infof("Bootstrap vshard task completed successfully, check the cluster status") 24 | 25 | return nil 26 | } 27 | 28 | func bootstrapVshard(conn *connector.Conn) error { 29 | req := connector.EvalReq(bootstrapVshardBody) 30 | 31 | if _, err := conn.Exec(req); err != nil { 32 | if strings.Contains(err.Error(), `Sharding config is empty`) { 33 | // XXX: see https://github.com/tarantool/cartridge/issues/1148 34 | log.Warnf( 35 | `It's possible that there is no running instances of some configured vshard groups. ` + 36 | `In this case existing storages are bootstrapped, but Cartridge returns an error`, 37 | ) 38 | } 39 | 40 | return err 41 | } 42 | 43 | return nil 44 | } 45 | -------------------------------------------------------------------------------- /test/integration/admin/test_list.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from utils import (get_admin_connection_params, get_log_lines, 3 | run_command_and_get_output) 4 | 5 | 6 | @pytest.mark.parametrize('connection_type', ['find-socket', 'connect', 'instance']) 7 | def test_list(cartridge_cmd, custom_admin_running_instances, connection_type, tmpdir): 8 | project = custom_admin_running_instances['project'] 9 | 10 | cmd = [ 11 | cartridge_cmd, 'admin', 12 | '--list' 13 | ] 14 | cmd.extend(get_admin_connection_params(connection_type, project)) 15 | 16 | rc, output = run_command_and_get_output(cmd, cwd=tmpdir) 17 | assert rc == 0 18 | 19 | assert get_log_lines(output) == [ 20 | '• Available admin functions:', 21 | 'echo_user echo_user usage', 22 | 'func.long.name func_long_name usage', 23 | 'func_conflicting func_conflicting usage', 24 | 'func_long_arg func_long_arg usage', 25 | 'func_no_args func_no_args usage', 26 | 'func_print func_print usage', 27 | 'func_raises_err func_raises_err usage', 28 | 'func_rets_err func_rets_err usage', 29 | 'func_rets_non_str func_rets_non_str usage', 30 | 'func_rets_str func_rets_str usage', 31 | ] 32 | -------------------------------------------------------------------------------- /cli/commands/stop.go: -------------------------------------------------------------------------------- 1 | package commands 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/apex/log" 7 | "github.com/spf13/cobra" 8 | 9 | "github.com/tarantool/cartridge-cli/cli/running" 10 | ) 11 | 12 | func init() { 13 | var stopCmd = &cobra.Command{ 14 | Use: "stop [INSTANCE_NAME...]", 15 | Short: "Stop instance(s)", 16 | Long: fmt.Sprintf("Stop instance(s) (sends SIGTERM)\n%s", runningCommonUsage), 17 | Run: func(cmd *cobra.Command, args []string) { 18 | err := runStopCmd(cmd, args) 19 | if err != nil { 20 | log.Fatalf(err.Error()) 21 | } 22 | }, 23 | ValidArgsFunction: ShellCompRunningInstances, 24 | } 25 | 26 | rootCmd.AddCommand(stopCmd) 27 | 28 | // FLAGS 29 | configureFlags(stopCmd) 30 | 31 | // application name flag 32 | addNameFlag(stopCmd) 33 | 34 | // stateboard flags 35 | addStateboardRunningFlags(stopCmd) 36 | 37 | // common running paths 38 | addCommonRunningPathsFlags(stopCmd) 39 | 40 | // add --force flag 41 | stopCmd.Flags().BoolVarP(&ctx.Running.StopForced, "force", "f", false, stopForceUsage) 42 | } 43 | 44 | func runStopCmd(cmd *cobra.Command, args []string) error { 45 | setStateboardFlagIsSet(cmd) 46 | 47 | if err := running.FillCtx(&ctx, args); err != nil { 48 | return err 49 | } 50 | 51 | if err := running.Stop(&ctx); err != nil { 52 | return err 53 | } 54 | 55 | return nil 56 | } 57 | -------------------------------------------------------------------------------- /doc/cleanup.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | import argparse 3 | from glob import glob 4 | from polib import pofile, POFile, _BaseFile 5 | 6 | parser = argparse.ArgumentParser(description='Cleanup PO and POT files') 7 | parser.add_argument('extension', type=str, choices=['po', 'pot', 'both'], 8 | help='cleanup files with extension: po, pot or both') 9 | 10 | 11 | class PoFile(POFile): 12 | 13 | def __unicode__(self): 14 | return _BaseFile.__unicode__(self) 15 | 16 | def metadata_as_entry(self): 17 | class M: 18 | def __unicode__(self, _): 19 | return '' 20 | return M() 21 | 22 | 23 | def cleanup_files(extension): 24 | mask = f'**/*.{extension}' 25 | for file_path in glob(mask, recursive=True): 26 | print(f'cleanup {file_path}') 27 | po_file: POFile = pofile(file_path, klass=PoFile) 28 | po_file.header = '' 29 | po_file.metadata = {} 30 | po_file.metadata_is_fuzzy = False 31 | 32 | for item in po_file: 33 | item.occurrences = None 34 | 35 | po_file.save() 36 | 37 | 38 | if __name__ == "__main__": 39 | 40 | args = parser.parse_args() 41 | 42 | if args.extension in ['po', 'both']: 43 | cleanup_files('po') 44 | 45 | if args.extension in ['pot', 'both']: 46 | cleanup_files('pot') 47 | -------------------------------------------------------------------------------- /Dockerfile.releaser: -------------------------------------------------------------------------------- 1 | FROM centos:7 2 | 3 | ARG GOVERSION=1.15 4 | ARG GORELEASER_VERSION=v0.138.0 5 | # Packagecloud token is required for pkgcloud package that generates 6 | # allowed disros list using token 7 | ARG PACKAGECLOUD_TOKEN 8 | ENV PACKAGECLOUD_TOKEN=${PACKAGECLOUD_TOKEN} 9 | 10 | RUN mkdir -p ~/.gnupg \ 11 | && echo 'digest-algo sha256' >> ~/.gnupg/gpg.conf 12 | 13 | RUN yum -y update \ 14 | && yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm \ 15 | && yum install -y procmail createrepo awscli reprepro pcre-tools bzip2 git 16 | 17 | RUN curl -O -L https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz \ 18 | && tar -C /usr/local -xzf /go${GOVERSION}.linux-amd64.tar.gz 19 | 20 | ENV PATH=${PATH}:/usr/local/go/bin 21 | ENV GOPATH=/home/go 22 | ENV PATH=$PATH:${GOPATH}/bin 23 | 24 | RUN curl -O -L https://github.com/goreleaser/goreleaser/releases/download/${GORELEASER_VERSION}/goreleaser_amd64.rpm \ 25 | && yum install -y goreleaser_amd64.rpm \ 26 | && rm goreleaser_amd64.rpm \ 27 | && go get -u -d github.com/magefile/mage \ 28 | && cd $GOPATH/src/github.com/magefile/mage \ 29 | && go run bootstrap.go 30 | 31 | RUN go get -u -d github.com/mlafeldt/pkgcloud \ 32 | && cd $GOPATH/src/github.com/mlafeldt/pkgcloud \ 33 | && make generate && make all && cp pkgcloud-push $GOPATH/bin 34 | -------------------------------------------------------------------------------- /cli/rpm/common.go: -------------------------------------------------------------------------------- 1 | package rpm 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "os" 7 | "path/filepath" 8 | "sort" 9 | ) 10 | 11 | func packValues(values ...interface{}) *bytes.Buffer { 12 | buf := bytes.NewBuffer(nil) 13 | 14 | for _, v := range values { 15 | binary.Write(buf, binary.BigEndian, v) 16 | } 17 | 18 | return buf 19 | } 20 | 21 | func alignData(data *bytes.Buffer, boundaries int) { 22 | dataLen := data.Len() 23 | 24 | if dataLen%boundaries != 0 { 25 | alignedDataLen := (dataLen/boundaries + 1) * boundaries 26 | 27 | missedBytesNum := alignedDataLen - dataLen 28 | 29 | paddingBytes := make([]byte, missedBytesNum) 30 | data.Write(paddingBytes) 31 | } 32 | } 33 | 34 | func getSortedRelPaths(srcDir string) ([]string, error) { 35 | var files []string 36 | 37 | err := filepath.Walk(srcDir, func(filePath string, fileInfo os.FileInfo, err error) error { 38 | if err != nil { 39 | return err 40 | } 41 | 42 | filePath, err = filepath.Rel(srcDir, filePath) 43 | if err != nil { 44 | return err 45 | } 46 | 47 | // system dirs shouldn't be added to the paths list 48 | if _, isSystem := systemDirs[filePath]; !isSystem { 49 | files = append(files, filePath) 50 | } 51 | 52 | return nil 53 | }) 54 | 55 | if err != nil { 56 | return nil, err 57 | } 58 | 59 | sort.Strings(files) 60 | return files, nil 61 | } 62 | -------------------------------------------------------------------------------- /cli/repair/lua/reload_clusterwide_config_func_body.lua: -------------------------------------------------------------------------------- 1 | local ClusterwideConfig = require('cartridge.clusterwide-config') 2 | local confapplier = require('cartridge.confapplier') 3 | 4 | local conf_path, wish_state_timeout = ... 5 | 6 | local cfg, err = ClusterwideConfig.load(conf_path) 7 | assert(err == nil, string.format('Failed to load new config: %s', err)) 8 | 9 | local current_uuid = box.info().uuid 10 | if cfg:get_readonly().topology.servers[current_uuid] == nil then 11 | return false 12 | end 13 | 14 | local roles_configured_state = 'RolesConfigured' 15 | local connecting_fullmesh_state = 'ConnectingFullmesh' 16 | 17 | local state = confapplier.wish_state(roles_configured_state, wish_state_timeout) 18 | 19 | if state == connecting_fullmesh_state then 20 | error(string.format( 21 | 'Failed to reach %s config state. Stuck in %s. ' .. 22 | 'Call "box.cfg({replication_connect_quorum = 0})" in instance console and try again', 23 | roles_configured_state, state 24 | )) 25 | end 26 | 27 | if state ~= roles_configured_state then 28 | error(string.format( 29 | 'Failed to reach %s config state. Stuck in %s', 30 | roles_configured_state, state 31 | )) 32 | end 33 | 34 | cfg:lock() 35 | local ok, err = confapplier.apply_config(cfg) 36 | assert(ok, string.format('Failed to apply new config: %s', err)) 37 | 38 | return true 39 | -------------------------------------------------------------------------------- /cli/rpm/signature.go: -------------------------------------------------------------------------------- 1 | package rpm 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/tarantool/cartridge-cli/cli/common" 8 | ) 9 | 10 | func genSignature(rpmBodyFilePath, rpmHeaderFilePath, cpioPath string) (*rpmTagSetType, error) { 11 | // SHA1 12 | sha1, err := common.FileSHA1Hex(rpmHeaderFilePath) 13 | if err != nil { 14 | return nil, fmt.Errorf("Failed to get header sha1: %s", err) 15 | } 16 | 17 | // SIG_SIZE 18 | rpmBodyFileInfo, err := os.Stat(rpmBodyFilePath) 19 | if err != nil { 20 | return nil, fmt.Errorf("Failed to get RPM body size: %s", err) 21 | } 22 | rpmBodyFileSize := rpmBodyFileInfo.Size() 23 | 24 | // PAYLOADSIZE 25 | cpioFileInfo, err := os.Stat(cpioPath) 26 | if err != nil { 27 | return nil, fmt.Errorf("Failed to get CPIO payload size: %s", err) 28 | } 29 | cpioSize := cpioFileInfo.Size() 30 | 31 | // MD5 32 | md5, err := common.FileMD5(rpmBodyFilePath) 33 | if err != nil { 34 | return nil, fmt.Errorf("Failed to get RPM body MD5: %s", err) 35 | } 36 | 37 | signature := rpmTagSetType{ 38 | {ID: signatureTagSHA1, Type: rpmTypeString, Value: sha1}, 39 | {ID: signatureTagSize, Type: rpmTypeInt32, Value: []int32{int32(rpmBodyFileSize)}}, 40 | {ID: signatureTagPayloadSize, Type: rpmTypeInt32, Value: []int32{int32(cpioSize)}}, 41 | {ID: signatureTagMD5, Type: rpmTypeBin, Value: md5}, 42 | } 43 | 44 | return &signature, nil 45 | } 46 | -------------------------------------------------------------------------------- /cli/docker/common.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/docker/docker/client" 8 | goVersion "github.com/hashicorp/go-version" 9 | ) 10 | 11 | var ( 12 | dockerServerMinVersion *goVersion.Version 13 | ) 14 | 15 | func init() { 16 | dockerServerMinVersion = goVersion.Must(goVersion.NewSemver("17.03.2")) 17 | } 18 | 19 | func getServerVersion() (string, error) { 20 | cli, err := client.NewClientWithOpts(client.WithAPIVersionNegotiation()) 21 | if err != nil { 22 | return "", err 23 | } 24 | 25 | ctx := context.Background() 26 | version, err := cli.ServerVersion(ctx) 27 | if err != nil { 28 | return "", fmt.Errorf("Failed to get docker server version: %s", err) 29 | } 30 | 31 | return version.Version, nil 32 | } 33 | 34 | func CheckMinServerVersion() error { 35 | serverVersionStr, err := getServerVersion() 36 | if err != nil { 37 | return fmt.Errorf("Failed to check docker server version: %s", err) 38 | } 39 | 40 | serverVersion, err := goVersion.NewSemver(serverVersionStr) 41 | if err != nil { 42 | return fmt.Errorf("Failed to parse docker server version: %s", err) 43 | } 44 | 45 | if serverVersion.LessThan(dockerServerMinVersion) { 46 | return fmt.Errorf( 47 | "Docker version %s is not supported. Minimal required docker version is %s", 48 | serverVersion, dockerServerMinVersion, 49 | ) 50 | 51 | } 52 | 53 | return nil 54 | } 55 | -------------------------------------------------------------------------------- /cli/replicasets/expel_test.go: -------------------------------------------------------------------------------- 1 | package replicasets 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestExpelEditInstancesOpts(t *testing.T) { 10 | assert := assert.New(t) 11 | 12 | var instancesToExpelUUIDs []string 13 | var err error 14 | var opts *EditInstancesListOpts 15 | var optsMapsList []map[string]interface{} 16 | 17 | // no uuids are specified 18 | instancesToExpelUUIDs = []string{} 19 | 20 | opts, err = getExpelInstancesEditInstancesOpts(instancesToExpelUUIDs) 21 | assert.Nil(err) 22 | assert.Len(*opts, 0) 23 | 24 | optsMapsList = opts.ToMapsList() 25 | assert.Equal(0, len(optsMapsList)) 26 | 27 | // uuids are specified 28 | instancesToExpelUUIDs = []string{ 29 | "uuid-1", "uuid-2", "uuid-3", 30 | } 31 | 32 | opts, err = getExpelInstancesEditInstancesOpts(instancesToExpelUUIDs) 33 | assert.Nil(err) 34 | assert.Len(*opts, len(instancesToExpelUUIDs)) 35 | 36 | for i, uuid := range instancesToExpelUUIDs { 37 | opt := (*opts)[i] 38 | expOpt := EditInstanceOpts{ 39 | InstanceUUID: uuid, 40 | Expelled: true, 41 | } 42 | assert.Equal(expOpt, *opt) 43 | } 44 | 45 | optsMapsList = opts.ToMapsList() 46 | assert.Equal( 47 | []map[string]interface{}{ 48 | {"uuid": "uuid-1", "expelled": true}, 49 | {"uuid": "uuid-2", "expelled": true}, 50 | {"uuid": "uuid-3", "expelled": true}, 51 | }, 52 | optsMapsList, 53 | ) 54 | } 55 | -------------------------------------------------------------------------------- /cli/commands/clean.go: -------------------------------------------------------------------------------- 1 | package commands 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/apex/log" 7 | "github.com/spf13/cobra" 8 | 9 | "github.com/tarantool/cartridge-cli/cli/running" 10 | ) 11 | 12 | func init() { 13 | var cleanCmd = &cobra.Command{ 14 | Use: "clean [INSTANCE_NAME...]", 15 | Short: "Clean instance(s) files", 16 | Long: fmt.Sprintf("Clean instance(s) files\n\n%s", runningCommonUsage), 17 | Run: func(cmd *cobra.Command, args []string) { 18 | err := runCleanCmd(cmd, args) 19 | if err != nil { 20 | log.Fatalf(err.Error()) 21 | } 22 | }, 23 | ValidArgsFunction: ShellCompRunningInstances, 24 | } 25 | 26 | rootCmd.AddCommand(cleanCmd) 27 | 28 | // FLAGS 29 | configureFlags(cleanCmd) 30 | 31 | // application name flag 32 | addNameFlag(cleanCmd) 33 | 34 | // stateboard flags 35 | addStateboardRunningFlags(cleanCmd) 36 | 37 | // clean-specific paths 38 | cleanCmd.Flags().StringVar(&ctx.Running.LogDir, "log-dir", "", logDirUsage) 39 | cleanCmd.Flags().StringVar(&ctx.Running.DataDir, "data-dir", "", dataDirUsage) 40 | // common running paths 41 | addCommonRunningPathsFlags(cleanCmd) 42 | } 43 | 44 | func runCleanCmd(cmd *cobra.Command, args []string) error { 45 | setStateboardFlagIsSet(cmd) 46 | 47 | if err := running.FillCtx(&ctx, args); err != nil { 48 | return err 49 | } 50 | 51 | if err := running.Clean(&ctx); err != nil { 52 | return err 53 | } 54 | 55 | return nil 56 | } 57 | -------------------------------------------------------------------------------- /doc/locale/ru/LC_MESSAGES/doc/commands/enter.po: -------------------------------------------------------------------------------- 1 | 2 | msgid "Enter an instance" 3 | msgstr "Вход в экземпляр" 4 | 5 | msgid "" 6 | "``cartridge enter`` allows connecting to an instance started with " 7 | "``cartridge start``. The connection uses the instance's console socket " 8 | "placed in ``run-dir``." 9 | msgstr "" 10 | "Чтобы подключиться к экземпляру, запущенному с помощью ``cartridge start``, " 11 | "воспользуйтесь командой ``cartridge enter``. Соединение будет установлено " 12 | "через сокет консоли (console socket) экземпляра, расположенный в ``run-" 13 | "dir``." 14 | 15 | msgid "cartridge enter [INSTANCE_NAME] [flags]" 16 | msgstr "cartridge enter [ИМЯ_ЭКЗЕМПЛЯРА...] [параметры]" 17 | 18 | msgid "Flags" 19 | msgstr "Параметры" 20 | 21 | msgid "``--name``" 22 | msgstr "``--name``" 23 | 24 | msgid "Application name." 25 | msgstr "Имя приложения." 26 | 27 | msgid "``--run-dir``" 28 | msgstr "``--run-dir``" 29 | 30 | msgid "" 31 | "The directory where PID and socket files are stored. Defaults to " 32 | "``./tmp/run``. ``run-dir`` is also a section of ``.cartridge.yml``. Learn " 33 | "more about :doc:`instance paths `." 35 | msgstr "" 36 | "Директория, где хранятся PID-файлы и файлы сокетов (по умолчанию " 37 | "``./tmp/run``). ``run-dir`` --- также раздел файла ``.cartridge.yml``. " 38 | "Узнайте больше о :doc:`путях к файлам экземпляров " 39 | "`." 40 | -------------------------------------------------------------------------------- /cli/failover/manage.go: -------------------------------------------------------------------------------- 1 | package failover 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/tarantool/cartridge-cli/cli/cluster" 8 | "github.com/tarantool/cartridge-cli/cli/connector" 9 | "github.com/tarantool/cartridge-cli/cli/context" 10 | ) 11 | 12 | type FailoverOpts map[string]interface{} 13 | type ProviderParams map[string]interface{} 14 | 15 | func (failoverOpts FailoverOpts) Manage(ctx *context.Ctx) error { 16 | conn, err := cluster.ConnectToSomeRunningInstance(ctx) 17 | if err != nil { 18 | return fmt.Errorf("Failed to connect to some instance: %s", err) 19 | } 20 | 21 | if provider, found := failoverOpts["state_provider"]; found { 22 | if provider == "stateboard" { 23 | failoverOpts["state_provider"] = "tarantool" 24 | } 25 | 26 | failoverOpts["tarantool_params"] = failoverOpts["stateboard_params"] 27 | delete(failoverOpts, "stateboard_params") 28 | } 29 | 30 | result, err := conn.Exec(connector.EvalReq(manageFailoverBody, failoverOpts)) 31 | if err != nil { 32 | return fmt.Errorf("Failed to configure failover: %s", err) 33 | } 34 | 35 | if len(result) == 2 { 36 | if funcErr := result[1]; funcErr != nil { 37 | return fmt.Errorf( 38 | "Failed to configure failover: %s", 39 | // Cartridge may use 'tarantool_params' in error messages. It can confuse the user 40 | strings.Replace(funcErr.(string), "tarantool_params", "stateboard_params", -1), 41 | ) 42 | } 43 | } 44 | 45 | return nil 46 | } 47 | -------------------------------------------------------------------------------- /cli/failover/set_test.go: -------------------------------------------------------------------------------- 1 | package failover 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "github.com/tarantool/cartridge-cli/cli/context" 8 | ) 9 | 10 | func TestBadValidateFailoverSet(t *testing.T) { 11 | assert := assert.New(t) 12 | 13 | // Specifying invalid mode 14 | ctx := context.Ctx{} 15 | ctx.Failover.Mode = "some-invalid-mode" 16 | _, err := getFailoverOpts(&ctx) 17 | assert.Equal("Failover mode should be `stateful`, `eventual`, `raft` or `disabled`", err.Error()) 18 | 19 | // Specifying no mode 20 | ctx = context.Ctx{} 21 | ctx.Failover.Mode = "" 22 | ctx.Failover.ParamsJSON = `{"fencing_pause": 4}` 23 | _, err = getFailoverOpts(&ctx) 24 | assert.Equal("Failover mode should be `stateful`, `eventual`, `raft` or `disabled`", err.Error()) 25 | 26 | // Eventual mode with with passing state-provider 27 | ctx = context.Ctx{} 28 | ctx.Failover.Mode = "eventual" 29 | ctx.Failover.StateProvider = "stateboard" 30 | _, err = getFailoverOpts(&ctx) 31 | assert.Equal("Please, don't specify --state-provider flag when using eventual mode", err.Error()) 32 | 33 | // Stateful mode without state-provider 34 | ctx = context.Ctx{} 35 | ctx.Failover.Mode = "stateful" 36 | ctx.Failover.StateProvider = "" 37 | ctx.Failover.ProviderParamsJSON = `{"uri": "uri", "password": "pass"}` 38 | _, err = getFailoverOpts(&ctx) 39 | assert.Equal("Please, specify --state-provider flag when using stateful mode", err.Error()) 40 | } 41 | -------------------------------------------------------------------------------- /doc/locale/ru/LC_MESSAGES/doc/global-flags.po: -------------------------------------------------------------------------------- 1 | 2 | msgid "Global flags" 3 | msgstr "Глобальные параметры" 4 | 5 | msgid "All Cartridge CLI commands support these flags:" 6 | msgstr "" 7 | "Все команды интерфейса командной строки Cartridge поддерживают следующие " 8 | "параметры:" 9 | 10 | msgid "``--verbose``" 11 | msgstr "``--verbose``" 12 | 13 | msgid "" 14 | "Run commands with verbose output, including the output of nested commands " 15 | "like ``tarantoolctl rocks make`` or ``docker build``." 16 | msgstr "" 17 | "Вызов команд с детализацией вывода, включая результаты вложенных команд, " 18 | "таких как ``tarantoolctl rocks make`` или ``docker build``." 19 | 20 | msgid "``--debug``" 21 | msgstr "``--debug``" 22 | 23 | msgid "" 24 | "Run command in debug mode -- that is, with verbose output and without " 25 | "removing temporary files. Useful for debugging ``cartridge pack``." 26 | msgstr "" 27 | "Вызов команд в режиме отладки: с детализацией вывода и сохранением временных" 28 | " файлов. Используется для отладки ``cartridge pack``." 29 | 30 | msgid "``--quiet``" 31 | msgstr "``--quiet``" 32 | 33 | msgid "" 34 | "Hide command output, only display error messages. Useful for suppressing the" 35 | " huge output of ``cartridge pack`` and ``cartridge build``." 36 | msgstr "" 37 | "Сокрытие вывода команды, отображение только сообщений об ошибках. Помогает " 38 | "скрыть огромное количество информации, выводимое командами ``cartridge " 39 | "pack`` и ``cartridge build``." 40 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/stateboard.init.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | if package.setsearchroot ~= nil then 6 | package.setsearchroot() 7 | else 8 | -- Workaround for rocks loading in tarantool 1.10 9 | -- It can be removed in tarantool > 2.2 10 | -- By default, when you do require('mymodule'), tarantool looks into 11 | -- the current working directory and whatever is specified in 12 | -- package.path and package.cpath. If you run your app while in the 13 | -- root directory of that app, everything goes fine, but if you try to 14 | -- start stateboard with "tarantool myapp/stateboard.init.lua", it will fail to load 15 | -- its modules, and modules from myapp/.rocks. 16 | local fio = require('fio') 17 | local app_dir = fio.abspath(fio.dirname(arg[0])) 18 | package.path = app_dir .. '/?.lua;' .. package.path 19 | package.path = app_dir .. '/?/init.lua;' .. package.path 20 | package.path = app_dir .. '/.rocks/share/tarantool/?.lua;' .. package.path 21 | package.path = app_dir .. '/.rocks/share/tarantool/?/init.lua;' .. package.path 22 | package.cpath = app_dir .. '/?.so;' .. package.cpath 23 | package.cpath = app_dir .. '/?.dylib;' .. package.cpath 24 | package.cpath = app_dir .. '/.rocks/lib/tarantool/?.so;' .. package.cpath 25 | package.cpath = app_dir .. '/.rocks/lib/tarantool/?.dylib;' .. package.cpath 26 | end 27 | 28 | require('cartridge.stateboard').cfg() 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (C) 2010-2013 Tarantool AUTHORS: 2 | please see AUTHORS file. 3 | 4 | /* 5 | * Redistribution and use in source and binary forms, with or 6 | * without modification, are permitted provided that the following 7 | * conditions are met: 8 | * 9 | * 1. Redistributions of source code must retain the above 10 | * copyright notice, this list of conditions and the 11 | * following disclaimer. 12 | * 13 | * 2. Redistributions in binary form must reproduce the above 14 | * copyright notice, this list of conditions and the following 15 | * disclaimer in the documentation and/or other materials 16 | * provided with the distribution. 17 | * 18 | * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND 19 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 22 | * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 23 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 26 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 27 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF 29 | * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 | * SUCH DAMAGE. 31 | */ 32 | -------------------------------------------------------------------------------- /cli/replicasets/roles_test.go: -------------------------------------------------------------------------------- 1 | package replicasets 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestGetUpdateRolesEditReplicasetsOpts(t *testing.T) { 10 | assert := assert.New(t) 11 | 12 | var err error 13 | var specifiedRoles []string 14 | var vshardGroup string 15 | var opts *EditReplicasetOpts 16 | 17 | topologyReplicaset := &TopologyReplicaset{ 18 | UUID: "replicaset-uuid", 19 | Alias: "replicaset-alias", 20 | Status: "healthy", 21 | Roles: []string{"some-role", "other-role"}, 22 | Instances: TopologyInstances{ 23 | &TopologyInstance{ 24 | Alias: "instance-1", 25 | UUID: "uuid-1", 26 | }, 27 | }, 28 | } 29 | 30 | // add roles, vshard group is specified 31 | 32 | specifiedRoles = []string{"some-new-role", "some-role"} 33 | vshardGroup = "some-group" 34 | 35 | opts, err = getUpdateRolesEditReplicasetsOpts(addRolesToList, specifiedRoles, vshardGroup, topologyReplicaset) 36 | assert.Nil(err) 37 | assert.Equal("replicaset-uuid", opts.ReplicasetUUID) 38 | assert.Equal([]string{"other-role", "some-new-role", "some-role"}, opts.Roles) 39 | assert.Equal(vshardGroup, *opts.VshardGroup) 40 | 41 | specifiedRoles = []string{"some-not-added-role", "some-role"} 42 | 43 | opts, err = getUpdateRolesEditReplicasetsOpts(removeRolesFromList, specifiedRoles, "", topologyReplicaset) 44 | assert.Nil(err) 45 | assert.Equal("replicaset-uuid", opts.ReplicasetUUID) 46 | assert.Equal([]string{"other-role"}, opts.Roles) 47 | } 48 | -------------------------------------------------------------------------------- /examples/getting-started-app/stateboard.init.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | if package.setsearchroot ~= nil then 6 | package.setsearchroot() 7 | else 8 | -- Workaround for rocks loading in tarantool 1.10 9 | -- It can be removed in tarantool > 2.2 10 | -- By default, when you do require('mymodule'), tarantool looks into 11 | -- the current working directory and whatever is specified in 12 | -- package.path and package.cpath. If you run your app while in the 13 | -- root directory of that app, everything goes fine, but if you try to 14 | -- start stateboard with "tarantool myapp/stateboard.init.lua", it will fail to load 15 | -- its modules, and modules from myapp/.rocks. 16 | local fio = require('fio') 17 | local app_dir = fio.abspath(fio.dirname(arg[0])) 18 | print('App dir set to ' .. app_dir) 19 | package.path = app_dir .. '/?.lua;' .. package.path 20 | package.path = app_dir .. '/?/init.lua;' .. package.path 21 | package.path = app_dir .. '/.rocks/share/tarantool/?.lua;' .. package.path 22 | package.path = app_dir .. '/.rocks/share/tarantool/?/init.lua;' .. package.path 23 | package.cpath = app_dir .. '/?.so;' .. package.cpath 24 | package.cpath = app_dir .. '/?.dylib;' .. package.cpath 25 | package.cpath = app_dir .. '/.rocks/lib/tarantool/?.so;' .. package.cpath 26 | package.cpath = app_dir .. '/.rocks/lib/tarantool/?.dylib;' .. package.cpath 27 | end 28 | 29 | require('cartridge.stateboard').cfg() 30 | -------------------------------------------------------------------------------- /cli/commands/connect.go: -------------------------------------------------------------------------------- 1 | package commands 2 | 3 | import ( 4 | "github.com/apex/log" 5 | "github.com/spf13/cobra" 6 | "github.com/tarantool/cartridge-cli/cli/connect" 7 | ) 8 | 9 | func init() { 10 | var enterCmd = &cobra.Command{ 11 | Use: "enter INSTANCE_NAME", 12 | Short: "Enter to application instance console", 13 | Run: func(cmd *cobra.Command, args []string) { 14 | if err := connect.Enter(&ctx, args); err != nil { 15 | log.Fatalf(err.Error()) 16 | } 17 | }, 18 | ValidArgsFunction: ShellCompRunningInstances, 19 | Args: cobra.MaximumNArgs(1), 20 | } 21 | 22 | rootCmd.AddCommand(enterCmd) 23 | 24 | // FLAGS 25 | configureFlags(enterCmd) 26 | 27 | // application name flag 28 | addNameFlag(enterCmd) 29 | // run-dir flag 30 | enterCmd.Flags().StringVar(&ctx.Running.RunDir, "run-dir", "", runDirUsage) 31 | 32 | var connectCmd = &cobra.Command{ 33 | Use: "connect URI", 34 | Short: "Connect to specified URI", 35 | Run: func(cmd *cobra.Command, args []string) { 36 | if err := connect.Connect(&ctx, args); err != nil { 37 | log.Fatalf(err.Error()) 38 | } 39 | }, 40 | Args: cobra.MaximumNArgs(1), 41 | } 42 | 43 | rootCmd.AddCommand(connectCmd) 44 | 45 | // FLAGS 46 | configureFlags(connectCmd) 47 | 48 | // username flag 49 | connectCmd.Flags().StringVarP(&ctx.Connect.Username, "username", "u", "", connectUsernameUsage) 50 | // password flag 51 | connectCmd.Flags().StringVarP(&ctx.Connect.Password, "password", "p", "", connectPasswordUsage) 52 | } 53 | -------------------------------------------------------------------------------- /cli/replicasets/failover_priority_test.go: -------------------------------------------------------------------------------- 1 | package replicasets 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestGetFailoverPriorityEditReplicasetOpts(t *testing.T) { 11 | assert := assert.New(t) 12 | 13 | var err error 14 | var instanceNames []string 15 | var opts *EditReplicasetOpts 16 | 17 | topologyReplicaset := &TopologyReplicaset{ 18 | UUID: "replicaset-uuid", 19 | Alias: "replicaset-alias", 20 | Status: "healthy", 21 | Roles: []string{"some-role", "other-role"}, 22 | Instances: TopologyInstances{ 23 | &TopologyInstance{ 24 | Alias: "instance-1", 25 | UUID: "uuid-1", 26 | }, 27 | &TopologyInstance{ 28 | Alias: "instance-2", 29 | UUID: "uuid-2", 30 | }, 31 | &TopologyInstance{ 32 | Alias: "instance-3", 33 | UUID: "uuid-3", 34 | }, 35 | }, 36 | } 37 | 38 | // everything is OK 39 | instanceNames = []string{"instance-3", "instance-2"} 40 | 41 | opts, err = getSetFailoverPriorityEditReplicasetOpts(instanceNames, topologyReplicaset) 42 | assert.Nil(err) 43 | assert.Equal(topologyReplicaset.UUID, opts.ReplicasetUUID) 44 | assert.Equal([]string{"uuid-3", "uuid-2"}, opts.FailoverPriorityUUIDs) 45 | 46 | // specified unknown instance alias 47 | instanceNames = []string{"unknown-instance", "instance-3", "instance-2"} 48 | 49 | opts, err = getSetFailoverPriorityEditReplicasetOpts(instanceNames, topologyReplicaset) 50 | assert.True(strings.Contains(err.Error(), `Instance unknown-instance not found in replica set`), err.Error()) 51 | } 52 | -------------------------------------------------------------------------------- /cli/build/local.go: -------------------------------------------------------------------------------- 1 | package build 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "os" 7 | "os/exec" 8 | "path/filepath" 9 | 10 | "github.com/apex/log" 11 | 12 | "github.com/tarantool/cartridge-cli/cli/common" 13 | "github.com/tarantool/cartridge-cli/cli/context" 14 | ) 15 | 16 | func buildProjectLocally(ctx *context.Ctx) error { 17 | if err := common.CheckTarantoolBinaries(); err != nil { 18 | return fmt.Errorf("Tarantool binaries are required for local build: %s", err) 19 | } 20 | common.CheckRecommendedBinaries("cmake", "make", "git", "unzip", "gcc") 21 | 22 | // pre-build 23 | preBuildHookPath := filepath.Join(ctx.Build.Dir, preBuildHookName) 24 | 25 | if _, err := os.Stat(preBuildHookPath); err == nil { 26 | log.Infof("Running `%s`", preBuildHookName) 27 | err = common.RunHook(preBuildHookPath, ctx.Cli.Verbose) 28 | if err != nil { 29 | return fmt.Errorf("Failed to run pre-build hook: %s", err) 30 | } 31 | } else if !os.IsNotExist(err) { 32 | return fmt.Errorf("Unable to use pre-build hook: %s", err) 33 | } 34 | 35 | // tarantoolctl rocks make 36 | rocksMakeCmdParts := []string{"tarantoolctl", "rocks", "make"} 37 | if ctx.Build.Spec != "" { 38 | rocksMakeCmdParts = append(rocksMakeCmdParts, ctx.Build.Spec) 39 | } 40 | 41 | log.Infof("Running `%s`", strings.Join(rocksMakeCmdParts, " ")) 42 | rocksMakeCmd := exec.Command(rocksMakeCmdParts[0], rocksMakeCmdParts[1:]...) 43 | err := common.RunCommand(rocksMakeCmd, ctx.Build.Dir, ctx.Cli.Verbose) 44 | if err != nil { 45 | return fmt.Errorf("Failed to install rocks: %s", err) 46 | } 47 | 48 | return nil 49 | } 50 | -------------------------------------------------------------------------------- /cli/create/git.go: -------------------------------------------------------------------------------- 1 | package create 2 | 3 | import ( 4 | "fmt" 5 | "os/exec" 6 | 7 | "github.com/apex/log" 8 | 9 | "github.com/tarantool/cartridge-cli/cli/common" 10 | "github.com/tarantool/cartridge-cli/cli/context" 11 | ) 12 | 13 | const ( 14 | initialTagName = "0.1.0" 15 | initialCommitMsg = "Initial commit" 16 | ) 17 | 18 | func initGitRepo(ctx *context.Ctx) error { 19 | // check that git is installed 20 | if !common.GitIsInstalled() { 21 | return fmt.Errorf("git not found. " + 22 | "You'll need to add the application to version control yourself later") 23 | } 24 | 25 | log.Debug("Initialize empty git repository") 26 | initCmd := exec.Command("git", "init") 27 | if err := common.RunCommand(initCmd, ctx.Project.Path, false); err != nil { 28 | return fmt.Errorf("Failed to initialize git repository") 29 | } 30 | 31 | log.Debug("Add files to git index") 32 | addCmd := exec.Command("git", "add", "-A") 33 | if err := common.RunCommand(addCmd, ctx.Project.Path, false); err != nil { 34 | return fmt.Errorf("Failed to add files to index") 35 | } 36 | 37 | log.Debug("Create initial commit") 38 | commitCmd := exec.Command("git", "commit", "-m", initialCommitMsg) 39 | if err := common.RunCommand(commitCmd, ctx.Project.Path, false); err != nil { 40 | return fmt.Errorf("Failed to create initial commit") 41 | } 42 | 43 | log.Debugf("Create initial tag %s", initialTagName) 44 | tagCmd := exec.Command("git", "tag", initialTagName) 45 | if err := common.RunCommand(tagCmd, ctx.Project.Path, false); err != nil { 46 | return fmt.Errorf("Failed to create initial tag") 47 | } 48 | 49 | return nil 50 | } 51 | -------------------------------------------------------------------------------- /test/integration/replicasets/utils.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | 4 | def get_replicaset_by_alias(replicasets, alias): 5 | replicaset = None 6 | for r in replicasets: 7 | if r['alias'] == alias: 8 | replicaset = r 9 | break 10 | 11 | return replicaset 12 | 13 | 14 | def get_list_from_log_lines(log_lines): 15 | return [ 16 | line.split(maxsplit=1)[1] 17 | for line in log_lines 18 | ] 19 | 20 | 21 | def set_instance_zone(admin_api_url, instance_name, zone): 22 | # get instance UUID 23 | query = ''' 24 | query { 25 | servers: servers { 26 | uuid 27 | alias 28 | } 29 | } 30 | ''' 31 | 32 | r = requests.post(admin_api_url, json={'query': query}) 33 | assert r.status_code == 200 34 | resp = r.json() 35 | assert 'data' in resp 36 | instances = resp['data']['servers'] 37 | 38 | uuid = None 39 | for instance in instances: 40 | if instance['alias'] == instance_name: 41 | uuid = instance['uuid'] 42 | break 43 | 44 | assert uuid is not None 45 | 46 | query = ''' 47 | mutation { 48 | set_zone: cluster{ 49 | edit_topology( 50 | servers: [{ 51 | uuid: "%s", 52 | zone: "%s", 53 | }] 54 | ) { servers { uuid } } 55 | }} 56 | ''' % (uuid, zone) 57 | 58 | r = requests.post(admin_api_url, json={'query': query}) 59 | assert r.status_code == 200 60 | resp = r.json() 61 | assert 'data' in resp 62 | -------------------------------------------------------------------------------- /cli/bench/config.go: -------------------------------------------------------------------------------- 1 | package bench 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "text/tabwriter" 7 | 8 | "github.com/FZambia/tarantool" 9 | "github.com/tarantool/cartridge-cli/cli/context" 10 | ) 11 | 12 | var ( 13 | benchSpaceName = "__benchmark_space__" 14 | benchSpacePrimaryIndexName = "__bench_primary_key__" 15 | PreFillingCount = 1000000 16 | getRandomTupleCommand = fmt.Sprintf( 17 | "box.space.%s.index.%s:random", 18 | benchSpaceName, 19 | benchSpacePrimaryIndexName, 20 | ) 21 | ) 22 | 23 | // printConfig output formatted config parameters. 24 | func printConfig(ctx context.BenchCtx, tarantoolConnection *tarantool.Connection) { 25 | fmt.Printf("%s\n", tarantoolConnection.Greeting().Version) 26 | fmt.Printf("Parameters:\n") 27 | fmt.Printf("\tURL: %s\n", ctx.URL) 28 | fmt.Printf("\tuser: %s\n", ctx.User) 29 | fmt.Printf("\tconnections: %d\n", ctx.Connections) 30 | fmt.Printf("\tsimultaneous requests: %d\n", ctx.SimultaneousRequests) 31 | fmt.Printf("\tduration: %d seconds\n", ctx.Duration) 32 | fmt.Printf("\tkey size: %d bytes\n", ctx.KeySize) 33 | fmt.Printf("\tdata size: %d bytes\n", ctx.DataSize) 34 | fmt.Printf("\tinsert: %d percentages\n", ctx.InsertCount) 35 | fmt.Printf("\tselect: %d percentages\n", ctx.SelectCount) 36 | fmt.Printf("\tupdate: %d percentages\n\n", ctx.UpdateCount) 37 | 38 | fmt.Printf("Data schema\n") 39 | w := tabwriter.NewWriter(os.Stdout, 1, 1, 1, ' ', 0) 40 | fmt.Fprintf(w, "|\tkey\t|\tvalue\n") 41 | fmt.Fprintf(w, "|\t------------------------------\t|\t------------------------------\n") 42 | fmt.Fprintf(w, "|\trandom(%d)\t|\trandom(%d)\n", ctx.KeySize, ctx.DataSize) 43 | w.Flush() 44 | } 45 | -------------------------------------------------------------------------------- /cli/admin/admin.go: -------------------------------------------------------------------------------- 1 | package admin 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/spf13/pflag" 7 | 8 | "github.com/tarantool/cartridge-cli/cli/connector" 9 | "github.com/tarantool/cartridge-cli/cli/context" 10 | ) 11 | 12 | const ( 13 | // Names of global functions which are called on the application side 14 | // to use admin functions 15 | // These functions are exposed by admin extension on init 16 | // See https://github.com/tarantool/cartridge-cli-extensions 17 | adminListFuncName = "__cartridge_admin_list" 18 | adminHelpFuncName = "__cartridge_admin_help" 19 | adminCallFuncName = "__cartridge_admin_call" 20 | ) 21 | 22 | type ProcessAdminFuncType func(conn *connector.Conn, funcName string, flagSet *pflag.FlagSet, args []string) error 23 | 24 | func Run(processAdminFunc ProcessAdminFuncType, ctx *context.Ctx, funcName string, flagSet *pflag.FlagSet, args []string) error { 25 | if err := checkCtx(ctx); err != nil { 26 | return err 27 | } 28 | 29 | conn, err := getAvaliableConn(ctx) 30 | if err != nil { 31 | return fmt.Errorf("Failed to connect to application instance: %s", err) 32 | } 33 | defer conn.Close() 34 | 35 | return processAdminFunc(conn, funcName, flagSet, args) 36 | } 37 | 38 | func List(conn *connector.Conn, funcName string, flagSet *pflag.FlagSet, args []string) error { 39 | return adminFuncList(conn) 40 | } 41 | 42 | func Help(conn *connector.Conn, funcName string, flagSet *pflag.FlagSet, args []string) error { 43 | return adminFuncHelp(conn, flagSet, funcName) 44 | } 45 | 46 | func Call(conn *connector.Conn, funcName string, flagSet *pflag.FlagSet, args []string) error { 47 | return adminFuncCall(conn, funcName, flagSet, args) 48 | } 49 | -------------------------------------------------------------------------------- /test/integration/bench/test_bench.py: -------------------------------------------------------------------------------- 1 | import os 2 | import signal 3 | import socket 4 | from subprocess import PIPE, STDOUT, Popen 5 | from threading import Thread 6 | 7 | import tenacity 8 | from utils import consume_lines, run_command_and_get_output 9 | 10 | 11 | @tenacity.retry(stop=tenacity.stop_after_delay(15), wait=tenacity.wait_fixed(1)) 12 | def wait_for_connect(): 13 | socket.create_connection(('127.0.0.1', 3301)) 14 | 15 | 16 | def test_bench(cartridge_cmd, request, tmpdir): 17 | base_cmd = [cartridge_cmd, 'bench', '--duration=1'] 18 | tarantool_cmd = [ 19 | "tarantool", 20 | "-e", f"""box.cfg{{listen="127.0.0.1:3301",work_dir=[[{tmpdir}]]}}""", 21 | "-e", """box.schema.user.grant("guest","super",nil,nil,{if_not_exists=true})""" 22 | ] 23 | 24 | env = os.environ.copy() 25 | process = Popen(tarantool_cmd, stdout=PIPE, stderr=STDOUT, env=env) 26 | thread = Thread(target=consume_lines, args=["3301", process.stdout]) 27 | thread.start() 28 | 29 | def kill(): 30 | process.send_signal(signal.SIGKILL) 31 | if thread is not None: 32 | thread.join(5) 33 | request.addfinalizer(kill) 34 | 35 | wait_for_connect() 36 | 37 | rc, output = run_command_and_get_output(base_cmd, cwd=tmpdir) 38 | assert rc == 0 39 | 40 | base_cmd = [cartridge_cmd, 'bench', '--duration=1', '--fill=1000'] 41 | rc, output = run_command_and_get_output(base_cmd, cwd=tmpdir) 42 | assert rc == 0 43 | 44 | base_cmd = [cartridge_cmd, 'bench', '--duration=1', '--insert=0', '--select=50', '--update=50'] 45 | rc, output = run_command_and_get_output(base_cmd, cwd=tmpdir) 46 | assert rc == 0 47 | -------------------------------------------------------------------------------- /doc/locale/ru/LC_MESSAGES/doc/lifecycle.po: -------------------------------------------------------------------------------- 1 | 2 | msgid "Cartridge application lifecycle" 3 | msgstr "Жизненный цикл приложения на Cartridge" 4 | 5 | msgid "In a nutshell:" 6 | msgstr "Основные этапы:" 7 | 8 | msgid "" 9 | ":doc:`Create an application`" 10 | " (for example, ``myapp``) from a template:" 11 | msgstr "" 12 | ":doc:`Создание приложения` " 13 | "(например, ``myapp``) по шаблону:" 14 | 15 | msgid "" 16 | "cartridge create --name myapp\n" 17 | "cd ./myapp" 18 | msgstr "" 19 | "cartridge create --name myapp\n" 20 | "cd ./myapp" 21 | 22 | msgid "" 23 | ":doc:`Build the application `" 24 | " for local development and testing:" 25 | msgstr "" 26 | ":doc:`Сборка приложения ` для" 27 | " локальной разработки и тестирования:" 28 | 29 | msgid "cartridge build" 30 | msgstr "cartridge build" 31 | 32 | msgid "" 33 | ":doc:`Run instances locally " 34 | "`:" 35 | msgstr "" 36 | ":doc:`Локальный запуск экземпляров " 37 | "`:" 38 | 39 | msgid "" 40 | "cartridge start\n" 41 | "cartridge stop" 42 | msgstr "" 43 | "cartridge start\n" 44 | "cartridge stop" 45 | 46 | msgid "" 47 | ":doc:`Pack the application ` " 48 | "into a distributable (like an RPM package):" 49 | msgstr "" 50 | ":doc:`Упаковка приложения ` в " 51 | "дистрибутив (например, в RPM-пакет):" 52 | 53 | msgid "cartridge pack rpm" 54 | msgstr "cartridge pack rpm" 55 | -------------------------------------------------------------------------------- /test/make_preparations/test_build_projects.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import subprocess 4 | 5 | import pytest 6 | from project import Project 7 | 8 | 9 | @pytest.fixture(scope="module") 10 | def default_project(cartridge_cmd, module_tmpdir): 11 | project = Project(cartridge_cmd, 'default-project', module_tmpdir, 'cartridge') 12 | return project 13 | 14 | 15 | def test_make_project_with_cartridge(project_with_cartridge, cartridge_cmd, tmpdir): 16 | project = project_with_cartridge 17 | dir = os.getenv("CC_TEST_PREBUILT_PROJECTS") 18 | if dir is None: 19 | print("Directory for cartridge-cli integration tests prebuilt projects is not set.\n", 20 | "Please set enviromental variable: CC_TEST_PREBUILT_PROJECTS") 21 | assert dir is not None 22 | path = dir + "/project_with_cartridge" 23 | 24 | cmd = [ 25 | cartridge_cmd, "build", project.path 26 | ] 27 | 28 | process = subprocess.run(cmd, cwd=tmpdir) 29 | assert process.returncode == 0 30 | shutil.copytree(project.path, path) 31 | 32 | 33 | def test_make_default_project(default_project, cartridge_cmd, tmpdir): 34 | project = default_project 35 | dir = os.getenv("CC_TEST_PREBUILT_PROJECTS") 36 | if dir is None: 37 | print("Directory for cartridge-cli integration tests prebuilt projects is not set.\n", 38 | "Please set enviromental variable: CC_TEST_PREBUILT_PROJECTS") 39 | assert dir is not None 40 | path = dir + "/default_project" 41 | 42 | cmd = [ 43 | cartridge_cmd, "build", project.path 44 | ] 45 | 46 | process = subprocess.run(cmd, cwd=tmpdir) 47 | assert process.returncode == 0 48 | shutil.copytree(project.path, path) 49 | -------------------------------------------------------------------------------- /cli/admin/help.go: -------------------------------------------------------------------------------- 1 | package admin 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "time" 7 | 8 | "github.com/apex/log" 9 | 10 | "github.com/spf13/pflag" 11 | "github.com/tarantool/cartridge-cli/cli/connector" 12 | ) 13 | 14 | func adminFuncHelp(conn *connector.Conn, flagSet *pflag.FlagSet, funcName string) error { 15 | funcInfo, err := getFuncInfo(funcName, conn) 16 | if err != nil { 17 | return getCliExtError("Failed to get function %q signature: %s", funcName, err) 18 | } 19 | 20 | log.Infof("Admin function %q usage:\n\n%s", funcName, funcInfo.Format()) 21 | 22 | conflictingFlagNames := getConflictingFlagNames(funcInfo.Args, flagSet) 23 | if len(conflictingFlagNames) > 0 { 24 | log.Warnf( 25 | "Function has arguments with names that conflict with `cartridge admin` flags: %s. "+ 26 | "Calling this function will raise an error", 27 | strings.Join(conflictingFlagNames, ", "), 28 | ) 29 | } 30 | 31 | return nil 32 | } 33 | 34 | func getFuncInfo(funcName string, conn *connector.Conn) (*FuncInfo, error) { 35 | funcBody, err := getAdminFuncEvalTypedBody(adminHelpFuncName) 36 | if err != nil { 37 | return nil, err 38 | } 39 | 40 | req := connector.EvalReq(funcBody, funcName).SetReadTimeout(3 * time.Second) 41 | 42 | funcInfoSlice := []FuncInfo{} 43 | if err := conn.ExecTyped(req, &funcInfoSlice); err != nil { 44 | return nil, err 45 | } 46 | 47 | if len(funcInfoSlice) != 1 { 48 | return nil, fmt.Errorf("Function signature received in a bad format") 49 | } 50 | 51 | funcInfo := funcInfoSlice[0] 52 | funcInfo.Name = funcName 53 | 54 | return &funcInfo, nil 55 | 56 | } 57 | 58 | var ( 59 | funcHelpMsgTmpl = `{{ .FuncInfo }}{{ if .ArgsUsage }} 60 | 61 | Args: 62 | {{ .ArgsUsage }}{{ end }}` 63 | ) 64 | -------------------------------------------------------------------------------- /cli/repair/set_leader.go: -------------------------------------------------------------------------------- 1 | package repair 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/tarantool/cartridge-cli/cli/common" 7 | "github.com/tarantool/cartridge-cli/cli/context" 8 | ) 9 | 10 | func patchConfSetLeader(topologyConf *TopologyConfType, ctx *context.Ctx) ([]common.ResultMessage, error) { 11 | return patchConf(setLeader, topologyConf, ctx) 12 | } 13 | 14 | func setLeader(topologyConf *TopologyConfType, ctx *context.Ctx) error { 15 | instanceUUID := ctx.Repair.SetLeaderInstanceUUID 16 | replicasetUUID := ctx.Repair.SetLeaderReplicasetUUID 17 | 18 | instanceConf, ok := topologyConf.Instances[instanceUUID] 19 | if !ok { 20 | return fmt.Errorf("Instance %s isn't found in cluster", instanceUUID) 21 | } 22 | 23 | // check that specified instance isn't disabled or expelled 24 | // and belongs to specified replicaset 25 | if instanceConf.IsExpelled { 26 | return fmt.Errorf("Instance %s is expelled", instanceUUID) 27 | } 28 | 29 | if instanceConf.IsDisabled { 30 | return fmt.Errorf("Instance %s is disabled", instanceUUID) 31 | } 32 | 33 | if instanceConf.ReplicasetUUID != replicasetUUID { 34 | return fmt.Errorf("Instance %s doesn't belong to replicaset %s", instanceUUID, replicasetUUID) 35 | } 36 | 37 | replicasetConf, ok := topologyConf.Replicasets[replicasetUUID] 38 | if !ok { 39 | return fmt.Errorf("Replicaset %s isn't found in the cluster", replicasetUUID) 40 | } 41 | 42 | instanceIndex := common.StringsSliceElemIndex(replicasetConf.Leaders, instanceUUID) 43 | if instanceIndex != -1 { 44 | replicasetConf.SetLeaders(common.RemoveFromStringSlice(replicasetConf.Leaders, instanceIndex)) 45 | } 46 | replicasetConf.SetLeaders(common.InsertInStringSlice(replicasetConf.Leaders, 0, instanceUUID)) 47 | 48 | return nil 49 | } 50 | -------------------------------------------------------------------------------- /cli/commands/log.go: -------------------------------------------------------------------------------- 1 | package commands 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | "github.com/apex/log" 8 | "github.com/spf13/cobra" 9 | 10 | "github.com/tarantool/cartridge-cli/cli/project" 11 | "github.com/tarantool/cartridge-cli/cli/running" 12 | ) 13 | 14 | func init() { 15 | var logCmd = &cobra.Command{ 16 | Use: "log [INSTANCE_NAME...]", 17 | Short: "Get logs of instance(s)", 18 | Long: fmt.Sprintf("Get logs of instance(s)\n\n%s", runningCommonUsage), 19 | Run: func(cmd *cobra.Command, args []string) { 20 | err := runLogCmd(cmd, args) 21 | if err != nil { 22 | log.Fatalf(err.Error()) 23 | } 24 | }, 25 | ValidArgsFunction: ShellCompRunningInstances, 26 | } 27 | 28 | rootCmd.AddCommand(logCmd) 29 | 30 | // FLAGS 31 | configureFlags(logCmd) 32 | 33 | // application name flag 34 | addNameFlag(logCmd) 35 | 36 | // log-specific flags 37 | logCmd.Flags().BoolVarP(&ctx.Running.LogFollow, "follow", "f", false, logFollowUsage) 38 | logCmd.Flags().IntVarP(&ctx.Running.LogLines, "lines", "n", 0, logLinesUsage) 39 | 40 | // stateboard flags 41 | addStateboardRunningFlags(logCmd) 42 | 43 | // log-specific paths 44 | logCmd.Flags().StringVar(&ctx.Running.LogDir, "log-dir", "", logDirUsage) 45 | // common running paths 46 | addCommonRunningPathsFlags(logCmd) 47 | } 48 | 49 | func runLogCmd(cmd *cobra.Command, args []string) error { 50 | setStateboardFlagIsSet(cmd) 51 | 52 | if err := setDefaultValue(cmd.Flags(), "lines", strconv.Itoa(defaultLogLines)); err != nil { 53 | return project.InternalError("Failed to set default lines value: %s", err) 54 | } 55 | 56 | if err := running.FillCtx(&ctx, args); err != nil { 57 | return err 58 | } 59 | 60 | if err := running.Log(&ctx); err != nil { 61 | return err 62 | } 63 | 64 | return nil 65 | } 66 | -------------------------------------------------------------------------------- /examples/getting-started-app/init.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | if package.setsearchroot ~= nil then 6 | package.setsearchroot() 7 | else 8 | -- Workaround for rocks loading in tarantool 1.10 9 | -- It can be removed in tarantool > 2.2 10 | -- By default, when you do require('mymodule'), tarantool looks into 11 | -- the current working directory and whatever is specified in 12 | -- package.path and package.cpath. If you run your app while in the 13 | -- root directory of that app, everything goes fine, but if you try to 14 | -- start your app with "tarantool myapp/init.lua", it will fail to load 15 | -- its modules, and modules from myapp/.rocks. 16 | local fio = require('fio') 17 | local app_dir = fio.abspath(fio.dirname(arg[0])) 18 | print('App dir set to ' .. app_dir) 19 | package.path = app_dir .. '/?.lua;' .. package.path 20 | package.path = app_dir .. '/?/init.lua;' .. package.path 21 | package.path = app_dir .. '/.rocks/share/tarantool/?.lua;' .. package.path 22 | package.path = app_dir .. '/.rocks/share/tarantool/?/init.lua;' .. package.path 23 | package.cpath = app_dir .. '/?.so;' .. package.cpath 24 | package.cpath = app_dir .. '/?.dylib;' .. package.cpath 25 | package.cpath = app_dir .. '/.rocks/lib/tarantool/?.so;' .. package.cpath 26 | package.cpath = app_dir .. '/.rocks/lib/tarantool/?.dylib;' .. package.cpath 27 | end 28 | 29 | local cartridge = require('cartridge') 30 | local ok, err = cartridge.cfg({ 31 | roles = { 32 | 'cartridge.roles.vshard-storage', 33 | 'cartridge.roles.vshard-router', 34 | 'app.roles.api', 35 | 'app.roles.storage', 36 | }, 37 | cluster_cookie = 'getting-started-app-cluster-cookie', 38 | }) 39 | 40 | assert(ok, tostring(err)) 41 | -------------------------------------------------------------------------------- /test/integration/replicasets/test_set_weight.py: -------------------------------------------------------------------------------- 1 | from integration.replicasets.utils import get_replicaset_by_alias 2 | from utils import get_log_lines, get_replicasets, run_command_and_get_output 3 | 4 | 5 | def test_bad_replicaset_name(cartridge_cmd, project_with_vshard_replicasets): 6 | project = project_with_vshard_replicasets.project 7 | 8 | cmd = [ 9 | cartridge_cmd, 'replicasets', 'set-weight', 10 | '--replicaset', 'unknown-replicaset', 11 | '123.45', 12 | ] 13 | 14 | rc, output = run_command_and_get_output(cmd, cwd=project.path) 15 | assert rc == 1 16 | assert "Replica set unknown-replicaset isn't found in current topology" in output 17 | 18 | 19 | def test_set_weight(cartridge_cmd, project_with_vshard_replicasets): 20 | project = project_with_vshard_replicasets.project 21 | instances = project_with_vshard_replicasets.instances 22 | replicasets = project_with_vshard_replicasets.replicasets 23 | 24 | hot_storage_rpl = replicasets['hot-storage'] 25 | hot_master = instances['hot-master'] 26 | admin_api_url = hot_master.get_admin_api_url() 27 | 28 | NEW_WEIGHT = 123.45 29 | 30 | # set replicaset weight 31 | cmd = [ 32 | cartridge_cmd, 'replicasets', 'set-weight', 33 | '--replicaset', hot_storage_rpl.name, 34 | str(NEW_WEIGHT), 35 | ] 36 | 37 | rc, output = run_command_and_get_output(cmd, cwd=project.path) 38 | assert rc == 0 39 | assert get_log_lines(output) == [ 40 | '• Replica set %s weight is set to %s' % (hot_storage_rpl.name, NEW_WEIGHT), 41 | ] 42 | 43 | replicasets = get_replicasets(admin_api_url) 44 | hot_replicaset = get_replicaset_by_alias(replicasets, hot_storage_rpl.name) 45 | assert hot_replicaset is not None 46 | assert hot_replicaset['weight'] == NEW_WEIGHT 47 | -------------------------------------------------------------------------------- /cli/failover/validate.go: -------------------------------------------------------------------------------- 1 | package failover 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | var ( 8 | eventualModeParamsError = "Please, don't specify --%s flag when using eventual mode" 9 | exampleStateboardParamsJSON = `{"uri": "localhost:4401", "password": "passwd"}` 10 | ) 11 | 12 | func validateSetFailoverOpts(opts *FailoverOpts) error { 13 | switch (*opts)["mode"] { 14 | case "eventual": 15 | if err := validateEventualMode(opts); err != nil { 16 | return err 17 | } 18 | case "stateful": 19 | if err := validateStatefulMode(opts); err != nil { 20 | return err 21 | } 22 | case "raft": 23 | if err := validateEventualMode(opts); err != nil { 24 | return err 25 | } 26 | case "disabled": 27 | return nil 28 | default: 29 | return fmt.Errorf("Failover mode should be `stateful`, `eventual`, `raft` or `disabled`") 30 | } 31 | 32 | return nil 33 | } 34 | 35 | func validateEventualMode(opts *FailoverOpts) error { 36 | if _, found := (*opts)["state_provider"]; found { 37 | return fmt.Errorf(eventualModeParamsError, "state-provider") 38 | } 39 | 40 | return nil 41 | } 42 | 43 | func validateStatefulMode(opts *FailoverOpts) error { 44 | if _, found := (*opts)["state_provider"]; !found { 45 | return fmt.Errorf("Please, specify --state-provider flag when using stateful mode") 46 | } 47 | 48 | switch (*opts)["state_provider"] { 49 | case "stateboard": 50 | if _, found := (*opts)["stateboard_params"]; !found { 51 | return fmt.Errorf( 52 | "Please, specify params for stateboard state provider, using --provider-params '%s'", 53 | exampleStateboardParamsJSON, 54 | ) 55 | } 56 | case "etcd2": 57 | return nil // Because all etcd2 parameters are optional 58 | default: 59 | return fmt.Errorf("--state-provider flag should be `stateboard` or `etcd2`") 60 | } 61 | 62 | return nil 63 | } 64 | -------------------------------------------------------------------------------- /cli/connector/conn_opts.go: -------------------------------------------------------------------------------- 1 | package connector 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | const ( 8 | TCPNetwork = "tcp" 9 | UnixNetwork = "unix" 10 | ) 11 | 12 | type ConnOpts struct { 13 | Network string 14 | Address string 15 | Username string 16 | Password string 17 | } 18 | 19 | func getConnOpts(connString, username, password string) *ConnOpts { 20 | connOpts := ConnOpts{ 21 | Username: username, 22 | Password: password, 23 | } 24 | 25 | connStringParts := strings.SplitN(connString, "@", 2) 26 | address := connStringParts[len(connStringParts)-1] 27 | 28 | if len(connStringParts) > 1 { 29 | authString := connStringParts[0] 30 | authStringParts := strings.SplitN(authString, ":", 2) 31 | 32 | if connOpts.Username == "" { 33 | connOpts.Username = authStringParts[0] 34 | } 35 | if len(authStringParts) > 1 && connOpts.Password == "" { 36 | connOpts.Password = authStringParts[1] 37 | } 38 | } 39 | 40 | addrLen := len(address) 41 | switch { 42 | case addrLen > 0 && (address[0] == '.' || address[0] == '/'): 43 | connOpts.Network = UnixNetwork 44 | connOpts.Address = address 45 | case addrLen >= 7 && address[0:7] == "unix://": 46 | connOpts.Network = UnixNetwork 47 | connOpts.Address = address[7:] 48 | case addrLen >= 5 && address[0:5] == "unix:": 49 | connOpts.Network = UnixNetwork 50 | connOpts.Address = address[5:] 51 | case addrLen >= 6 && address[0:6] == "unix/:": 52 | connOpts.Network = UnixNetwork 53 | connOpts.Address = address[6:] 54 | case addrLen >= 6 && address[0:6] == "tcp://": 55 | connOpts.Network = TCPNetwork 56 | connOpts.Address = address[6:] 57 | case addrLen >= 4 && address[0:4] == "tcp:": 58 | connOpts.Network = TCPNetwork 59 | connOpts.Address = address[4:] 60 | default: 61 | connOpts.Network = TCPNetwork 62 | connOpts.Address = address 63 | } 64 | 65 | return &connOpts 66 | } 67 | -------------------------------------------------------------------------------- /.github/workflows/pull-translation.yml: -------------------------------------------------------------------------------- 1 | name: Pull translations 2 | 3 | on: 4 | workflow_dispatch: 5 | branches: 6 | - '!master' 7 | jobs: 8 | pull-translations: 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - uses: actions/checkout@v2 13 | with: 14 | token: ${{secrets.PRIVATE_REPO_ACCESS_TOKEN}} 15 | 16 | - name: Set branch name from source branch 17 | run: echo "BRANCH_NAME=${GITHUB_REF##*/}" >> $GITHUB_ENV 18 | 19 | - name: Setup Python environment 20 | uses: actions/setup-python@v4 21 | with: 22 | python-version: '3.9' 23 | 24 | - name: Setup Python requirements 25 | run: | 26 | python -m pip install --upgrade pip 27 | pip install -r doc/requirements.txt 28 | 29 | - name: Build pot files 30 | run: python -m sphinx . doc/locale/en -c doc -b gettext 31 | 32 | - name: Pull translations from Crowdin 33 | uses: crowdin/github-action@1.0.21 34 | with: 35 | config: 'doc/crowdin.yaml' 36 | upload_sources: false 37 | upload_translations: false 38 | push_translations: false 39 | download_translations: true 40 | download_language: 'ru' 41 | crowdin_branch_name: "${{env.BRANCH_NAME}}" 42 | debug_mode: true 43 | env: 44 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 45 | CROWDIN_PERSONAL_TOKEN: ${{secrets.CROWDIN_PERSONAL_TOKEN}} 46 | 47 | - name: Cleanup translation files 48 | run: | 49 | sudo chown -R runner:docker doc/locale/ru/LC_MESSAGES 50 | python doc/cleanup.py po 51 | 52 | - name: Commit translation files 53 | uses: stefanzweifel/git-auto-commit-action@v4.1.2 54 | with: 55 | commit_message: "Update translations" 56 | file_pattern: "*.po" 57 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | before: 2 | hooks: 3 | - go mod download 4 | - mage genCompletion # generate completion scripts. In global hook to avoid parallel writing. 5 | 6 | env: 7 | - GO111MODULE=on 8 | 9 | builds: 10 | - 11 | id: cartridge 12 | binary: cartridge 13 | dir: cli 14 | 15 | env: 16 | - CGO_ENABLED=0 17 | 18 | ldflags: 19 | - -s -w 20 | - -X github.com/tarantool/cartridge-cli/cli/version.gitTag={{ .Tag }} 21 | - -X github.com/tarantool/cartridge-cli/cli/version.gitCommit={{ .ShortCommit }} 22 | 23 | goos: 24 | - darwin 25 | - linux 26 | goarch: 27 | - amd64 28 | 29 | archives: 30 | - 31 | id: "cartridge" 32 | builds: ['cartridge'] 33 | format: tar.gz 34 | name_template: "{{ .ProjectName }}-{{ .Version }}.{{ .Os }}.{{ .Arch }}" 35 | replacements: 36 | darwin: macOS 37 | linux: Linux 38 | files: 39 | - README.rst 40 | - LICENSE 41 | - CHANGELOG.md 42 | - completion/*/** 43 | 44 | snapshot: 45 | name_template: "{{ .Tag }}-{{ .ShortCommit }}" 46 | 47 | changelog: 48 | skip: true 49 | 50 | release: 51 | draft: true 52 | 53 | nfpms: 54 | - 55 | id: "cartridge" 56 | builds: ['cartridge'] 57 | formats: 58 | - deb 59 | - rpm 60 | homepage: "https://github.com/tarantool/cartridge-cli" 61 | maintainer: "Elizaveta Dokshina " 62 | description: "Tarantool Cartridge command line interface" 63 | license: "BSD" 64 | files: 65 | "completion/bash/cartridge": "/etc/bash_completion.d/cartridge" 66 | 67 | overrides: 68 | rpm: 69 | replacements: 70 | amd64: x86_64 71 | file_name_template: "{{ .ProjectName }}-{{ .Version }}.{{ .Arch }}" 72 | 73 | deb: 74 | file_name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Arch }}" 75 | -------------------------------------------------------------------------------- /cli/bench/types.go: -------------------------------------------------------------------------------- 1 | package bench 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/FZambia/tarantool" 7 | "github.com/tarantool/cartridge-cli/cli/context" 8 | ) 9 | 10 | // Results describes set of benchmark results. 11 | type Results struct { 12 | handledRequestsCount int // Count of all executed requests. 13 | successResultCount int // Count of successful request in all connections. 14 | failedResultCount int // Count of failed request in all connections. 15 | duration float64 // Benchmark duration. 16 | requestsPerSecond int // Cumber of requests per second - the main measured value. 17 | } 18 | 19 | // RequestOperaion describes insert, select or update operation in request. 20 | type RequestOperaion func(*Request) 21 | 22 | // Request describes various types of requests. 23 | type Request struct { 24 | operation RequestOperaion // insertOperation, selectOperation or updateOperation. 25 | ctx context.BenchCtx 26 | tarantoolConnection *tarantool.Connection 27 | results *Results 28 | } 29 | 30 | // RequestsGenerator data structure for abstraction of a renewable heap of identical requests. 31 | type RequestsGenerator struct { 32 | request Request // Request with specified operation. 33 | count int // Count of requests. 34 | } 35 | 36 | // RequestsSequence data structure for abstraction for the constant issuance of new requests. 37 | type RequestsSequence struct { 38 | requests []RequestsGenerator 39 | // currentRequestIndex describes what type of request will be issued by the sequence. 40 | currentRequestIndex int 41 | // currentCounter describes how many requests of the same type 42 | // are left to issue from RequestsPool. 43 | currentCounter int 44 | // findNewRequestsGeneratorMutex provides goroutine-safe search for new generator. 45 | findNewRequestsGeneratorMutex sync.Mutex 46 | } 47 | -------------------------------------------------------------------------------- /test/files/init_roles_reload_allowed.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | -- configure path so that you can run application 6 | -- from outside the root directory 7 | if package.setsearchroot ~= nil then 8 | package.setsearchroot() 9 | else 10 | -- Workaround for rocks loading in tarantool 1.10 11 | -- It can be removed in tarantool > 2.2 12 | -- By default, when you do require('mymodule'), tarantool looks into 13 | -- the current working directory and whatever is specified in 14 | -- package.path and package.cpath. If you run your app while in the 15 | -- root directory of that app, everything goes fine, but if you try to 16 | -- start your app with "tarantool myapp/init.lua", it will fail to load 17 | -- its modules, and modules from myapp/.rocks. 18 | local fio = require('fio') 19 | local app_dir = fio.abspath(fio.dirname(arg[0])) 20 | package.path = app_dir .. '/?.lua;' .. package.path 21 | package.path = app_dir .. '/?/init.lua;' .. package.path 22 | package.path = app_dir .. '/.rocks/share/tarantool/?.lua;' .. package.path 23 | package.path = app_dir .. '/.rocks/share/tarantool/?/init.lua;' .. package.path 24 | package.cpath = app_dir .. '/?.so;' .. package.cpath 25 | package.cpath = app_dir .. '/?.dylib;' .. package.cpath 26 | package.cpath = app_dir .. '/.rocks/lib/tarantool/?.so;' .. package.cpath 27 | package.cpath = app_dir .. '/.rocks/lib/tarantool/?.dylib;' .. package.cpath 28 | end 29 | 30 | -- configure cartridge 31 | 32 | local cartridge = require('cartridge') 33 | 34 | local ok, err = cartridge.cfg({ 35 | roles = { 36 | 'cartridge.roles.vshard-storage', 37 | 'cartridge.roles.vshard-router', 38 | 'cartridge.roles.metrics', 39 | 'app.roles.custom', 40 | }, 41 | roles_reload_allowed = true, 42 | }) 43 | 44 | assert(ok, tostring(err)) 45 | -------------------------------------------------------------------------------- /test/integration/connect/test_enter.py: -------------------------------------------------------------------------------- 1 | from integration.connect.utils import (assert_error, 2 | assert_exited_piped_commands, 3 | assert_session_push_commands, 4 | assert_successful_piped_commands) 5 | 6 | 7 | def test_bad_instance_name(cartridge_cmd, project_with_instances): 8 | project = project_with_instances.project 9 | 10 | cmd = [ 11 | cartridge_cmd, 'enter', 'unknown-instance', 12 | ] 13 | 14 | assert_error(project, cmd, "Instance unknown-instance is not running") 15 | 16 | 17 | def test_enter_piped(cartridge_cmd, project_with_instances): 18 | project = project_with_instances.project 19 | instances = project_with_instances.instances 20 | 21 | router = instances['router'] 22 | 23 | cmd = [ 24 | cartridge_cmd, 'enter', router.name, 25 | ] 26 | 27 | assert_successful_piped_commands(project, cmd, exp_connect='%s.%s' % (project.name, router.name), 28 | remote_control=False) 29 | 30 | 31 | def test_instance_exited(cartridge_cmd, project_with_instances): 32 | project = project_with_instances.project 33 | instances = project_with_instances.instances 34 | 35 | router = instances['router'] 36 | 37 | cmd = [ 38 | cartridge_cmd, 'enter', router.name, 39 | ] 40 | 41 | assert_exited_piped_commands(project, cmd, exp_connect='%s.%s' % (project.name, router.name)) 42 | 43 | 44 | def test_session_push(cartridge_cmd, project_with_instances): 45 | project = project_with_instances.project 46 | instances = project_with_instances.instances 47 | 48 | router = instances['router'] 49 | 50 | cmd = [ 51 | cartridge_cmd, 'enter', router.name, 52 | ] 53 | 54 | assert_session_push_commands(project, cmd, exp_connect='%s.%s' % (project.name, router.name)) 55 | -------------------------------------------------------------------------------- /cli/connector/request.go: -------------------------------------------------------------------------------- 1 | package connector 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | type Request struct { 8 | execFunc func(conn *Conn) ([]interface{}, error) 9 | execTypedFunc func(conn *Conn, resData interface{}) error 10 | 11 | pushCallback func(interface{}) 12 | readTimeout time.Duration 13 | } 14 | 15 | func (req *Request) SetPushCallback(pushCallback func(interface{})) *Request { 16 | req.pushCallback = pushCallback 17 | return req 18 | } 19 | 20 | func (req *Request) SetReadTimeout(readTimeout time.Duration) *Request { 21 | req.readTimeout = readTimeout 22 | return req 23 | } 24 | 25 | func EvalReq(funcBody string, args ...interface{}) *Request { 26 | if args == nil { 27 | args = []interface{}{} 28 | } 29 | 30 | req := &Request{} 31 | 32 | req.execFunc = func(conn *Conn) ([]interface{}, error) { 33 | return conn.evalFunc(conn, funcBody, args, ExecOpts{ 34 | PushCallback: req.pushCallback, 35 | ReadTimeout: req.readTimeout, 36 | }) 37 | } 38 | 39 | req.execTypedFunc = func(conn *Conn, resData interface{}) error { 40 | _, err := conn.evalFunc(conn, funcBody, args, ExecOpts{ 41 | PushCallback: req.pushCallback, 42 | ReadTimeout: req.readTimeout, 43 | ResData: resData, 44 | }) 45 | 46 | return err 47 | } 48 | 49 | return req 50 | } 51 | 52 | func CallReq(funcName string, args ...interface{}) *Request { 53 | req := &Request{} 54 | 55 | req.execFunc = func(conn *Conn) ([]interface{}, error) { 56 | return conn.callFunc(conn, funcName, args, ExecOpts{ 57 | PushCallback: req.pushCallback, 58 | ReadTimeout: req.readTimeout, 59 | }) 60 | } 61 | 62 | req.execTypedFunc = func(conn *Conn, resData interface{}) error { 63 | _, err := conn.callFunc(conn, funcName, args, ExecOpts{ 64 | PushCallback: req.pushCallback, 65 | ReadTimeout: req.readTimeout, 66 | ResData: resData, 67 | }) 68 | 69 | return err 70 | } 71 | 72 | return req 73 | } 74 | -------------------------------------------------------------------------------- /cli/commands/bench.go: -------------------------------------------------------------------------------- 1 | package commands 2 | 3 | import ( 4 | "github.com/apex/log" 5 | "github.com/spf13/cobra" 6 | "github.com/tarantool/cartridge-cli/cli/bench" 7 | ) 8 | 9 | func init() { 10 | var benchCmd = &cobra.Command{ 11 | Use: "bench", 12 | Short: "Util for running benchmarks for Tarantool", 13 | Long: "Benchmark utility that simulates running commands done by N clients at the same time sending M simultaneous queries", 14 | Run: func(cmd *cobra.Command, args []string) { 15 | if err := bench.Run(ctx.Bench); err != nil { 16 | log.Fatalf(err.Error()) 17 | } 18 | }, 19 | } 20 | rootCmd.AddCommand(benchCmd) 21 | 22 | configureFlags(benchCmd) 23 | 24 | benchCmd.Flags().StringVar(&ctx.Bench.URL, "url", "127.0.0.1:3301", "Tarantool address") 25 | benchCmd.Flags().StringVar(&ctx.Bench.User, "user", "guest", "Tarantool user for connection") 26 | benchCmd.Flags().StringVar(&ctx.Bench.Password, "password", "", "Tarantool password for connection") 27 | 28 | benchCmd.Flags().IntVar(&ctx.Bench.Connections, "connections", 10, "Number of concurrent connections") 29 | benchCmd.Flags().IntVar(&ctx.Bench.SimultaneousRequests, "requests", 10, "Number of simultaneous requests per connection") 30 | benchCmd.Flags().IntVar(&ctx.Bench.Duration, "duration", 10, "Duration of benchmark test (seconds)") 31 | benchCmd.Flags().IntVar(&ctx.Bench.KeySize, "keysize", 10, "Size of key part of benchmark data (bytes)") 32 | benchCmd.Flags().IntVar(&ctx.Bench.DataSize, "datasize", 20, "Size of value part of benchmark data (bytes)") 33 | 34 | benchCmd.Flags().IntVar(&ctx.Bench.InsertCount, "insert", 100, "percentage of inserts") 35 | benchCmd.Flags().IntVar(&ctx.Bench.SelectCount, "select", 0, "percentage of selects") 36 | benchCmd.Flags().IntVar(&ctx.Bench.UpdateCount, "update", 0, "percentage of updates") 37 | benchCmd.Flags().IntVar(&ctx.Bench.PreFillingCount, "fill", bench.PreFillingCount, "number of records to pre-fill the space") 38 | 39 | } 40 | -------------------------------------------------------------------------------- /.github/workflows/push-translation.yml: -------------------------------------------------------------------------------- 1 | name: Push translation sources 2 | 3 | on: 4 | workflow_dispatch: 5 | paths: 6 | - 'doc/**/*.rst' 7 | - 'doc/conf.py' 8 | jobs: 9 | push-translation-sources: 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - uses: actions/checkout@v2 14 | 15 | - name: Set branch name from source branch 16 | run: echo "BRANCH_NAME=${GITHUB_HEAD_REF##*/}" >> $GITHUB_ENV 17 | 18 | - name: Start translation service deployment 19 | uses: bobheadxi/deployments@v0.5.2 20 | id: translation 21 | with: 22 | step: start 23 | token: ${{secrets.GITHUB_TOKEN}} 24 | env: ${{env.BRANCH_NAME}} 25 | ref: ${{github.head_ref}} 26 | 27 | - name: Setup Python environment 28 | uses: actions/setup-python@v4 29 | with: 30 | python-version: '3.9' 31 | 32 | - name: Setup Python requirements 33 | run: | 34 | python -m pip install --upgrade pip 35 | pip install -r doc/requirements.txt 36 | 37 | - name: Build pot files 38 | run: python -m sphinx . doc/locale/en -c doc -b gettext 39 | 40 | - name: Push POT files to crowdin 41 | uses: crowdin/github-action@1.0.21 42 | with: 43 | upload_sources: true 44 | upload_translations: false 45 | crowdin_branch_name: ${{env.BRANCH_NAME}} 46 | config: 'doc/crowdin.yaml' 47 | env: 48 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 49 | CROWDIN_PERSONAL_TOKEN: ${{secrets.CROWDIN_PERSONAL_TOKEN}} 50 | 51 | - name: update deployment status 52 | uses: bobheadxi/deployments@v0.5.2 53 | with: 54 | step: finish 55 | token: ${{secrets.GITHUB_TOKEN}} 56 | status: ${{job.status}} 57 | deployment_id: ${{steps.translation.outputs.deployment_id}} 58 | env_url: https://crowdin.com/project/tarantool-cartridge-cli/ru#/${{env.BRANCH_NAME}} 59 | -------------------------------------------------------------------------------- /cli/repair/remove.go: -------------------------------------------------------------------------------- 1 | package repair 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/tarantool/cartridge-cli/cli/common" 7 | "github.com/tarantool/cartridge-cli/cli/context" 8 | ) 9 | 10 | func patchConfRemoveInstance(topologyConf *TopologyConfType, ctx *context.Ctx) ([]common.ResultMessage, error) { 11 | return patchConf(removeInstance, topologyConf, ctx) 12 | } 13 | 14 | func removeInstance(topologyConf *TopologyConfType, ctx *context.Ctx) error { 15 | instanceUUID := ctx.Repair.RemoveInstanceUUID 16 | 17 | instanceConf, ok := topologyConf.Instances[instanceUUID] 18 | if !ok { 19 | return fmt.Errorf("Instance %s isn't found in cluster", instanceUUID) 20 | } 21 | 22 | if !instanceConf.IsExpelled { 23 | replicasetUUID := instanceConf.ReplicasetUUID 24 | replicasetConf, ok := topologyConf.Replicasets[replicasetUUID] 25 | 26 | if ok { 27 | leaderIndex := common.StringsSliceElemIndex(replicasetConf.Leaders, instanceUUID) 28 | if leaderIndex != -1 { 29 | replicasetConf.SetLeaders(common.RemoveFromStringSlice(replicasetConf.Leaders, leaderIndex)) 30 | } 31 | 32 | instanceIndex := common.StringsSliceElemIndex(replicasetConf.Instances, instanceUUID) 33 | if instanceIndex != -1 { 34 | replicasetConf.SetInstances(common.RemoveFromStringSlice(replicasetConf.Instances, instanceIndex)) 35 | } 36 | 37 | if len(replicasetConf.Leaders) == 0 { 38 | if len(replicasetConf.Instances) > 0 { 39 | replicasetConf.SetLeaders(append(replicasetConf.Leaders, replicasetConf.Instances[0])) 40 | } 41 | } 42 | 43 | if len(replicasetConf.Instances) == 0 { 44 | if err := topologyConf.RemoveReplicaset(replicasetUUID); err != nil { 45 | return fmt.Errorf("Failed to remove replicaset %s from config: %s", replicasetUUID, err) 46 | } 47 | } 48 | } 49 | } 50 | 51 | if err := topologyConf.RemoveInstance(instanceUUID); err != nil { 52 | return fmt.Errorf("Failed to remove instance %s from config: %s", instanceUUID, err) 53 | } 54 | 55 | return nil 56 | } 57 | -------------------------------------------------------------------------------- /cli/common/crypto.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "crypto/md5" 5 | "crypto/sha1" 6 | "crypto/sha256" 7 | "fmt" 8 | "io" 9 | "os" 10 | ) 11 | 12 | // FileSHA256Hex computes SHA256 for a given file. 13 | // The result is returned in a hex form 14 | func FileSHA256Hex(path string) (string, error) { 15 | file, err := os.Open(path) 16 | if err != nil { 17 | return "", err 18 | } 19 | defer file.Close() 20 | 21 | hasher := sha256.New() 22 | if _, err := io.Copy(hasher, file); err != nil { 23 | return "", err 24 | } 25 | 26 | return fmt.Sprintf("%x", hasher.Sum(nil)), nil 27 | } 28 | 29 | // FileSHA1Hex computes SHA1 for a given file. 30 | // The result is returned in a hex form 31 | func FileSHA1Hex(path string) (string, error) { 32 | file, err := os.Open(path) 33 | if err != nil { 34 | return "", err 35 | } 36 | defer file.Close() 37 | 38 | hasher := sha1.New() 39 | if _, err := io.Copy(hasher, file); err != nil { 40 | return "", err 41 | } 42 | 43 | return fmt.Sprintf("%x", hasher.Sum(nil)), nil 44 | } 45 | 46 | // FileMD5 computes MD5 for a given file. 47 | // The result is returned in a binary form 48 | func FileMD5(path string) ([]byte, error) { 49 | file, err := os.Open(path) 50 | if err != nil { 51 | return nil, err 52 | } 53 | defer file.Close() 54 | 55 | hasher := md5.New() 56 | if _, err := io.Copy(hasher, file); err != nil { 57 | return nil, err 58 | } 59 | 60 | return hasher.Sum(nil), nil 61 | } 62 | 63 | // FileMD5Hex computes MD5 for a given file. 64 | // The result is returned in a hex form 65 | func FileMD5Hex(path string) (string, error) { 66 | fileMD5, err := FileMD5(path) 67 | if err != nil { 68 | return "", err 69 | } 70 | 71 | return fmt.Sprintf("%x", fileMD5), nil 72 | } 73 | 74 | // StringSHA1Hex computes SHA1 for a given string 75 | // The result is returned in a hex form 76 | func StringSHA1Hex(source string) string { 77 | hasher := sha1.New() 78 | hasher.Write([]byte(source)) 79 | 80 | return fmt.Sprintf("%x", hasher.Sum(nil)) 81 | } 82 | -------------------------------------------------------------------------------- /cli/replicasets/weight.go: -------------------------------------------------------------------------------- 1 | package replicasets 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | "github.com/apex/log" 8 | "github.com/tarantool/cartridge-cli/cli/cluster" 9 | "github.com/tarantool/cartridge-cli/cli/context" 10 | "github.com/tarantool/cartridge-cli/cli/project" 11 | ) 12 | 13 | func SetWeight(ctx *context.Ctx, args []string) error { 14 | if ctx.Replicasets.ReplicasetName == "" { 15 | return fmt.Errorf("Please, specify replica set name via --replicaset flag") 16 | } 17 | 18 | if err := project.FillCtx(ctx); err != nil { 19 | return err 20 | } 21 | 22 | if len(args) != 1 { 23 | return fmt.Errorf("Should be specified one argument - replica set weight") 24 | } 25 | 26 | weight, err := strconv.ParseFloat(args[0], 64) 27 | if err != nil { 28 | return fmt.Errorf("Failed to parse specified weight. Please, specify valid float") 29 | } 30 | 31 | conn, err := cluster.ConnectToSomeJoinedInstance(ctx) 32 | if err != nil { 33 | return err 34 | } 35 | 36 | topologyReplicaset, err := getTopologyReplicaset(conn, ctx.Replicasets.ReplicasetName) 37 | if err != nil { 38 | return err 39 | } 40 | 41 | editReplicasetOpts, err := getSetWeightEditReplicasetOpts(weight, topologyReplicaset) 42 | if err != nil { 43 | return fmt.Errorf("Failed to get edit_topology options for setting weight: %s", err) 44 | } 45 | 46 | newTopologyReplicaset, err := editReplicaset(conn, editReplicasetOpts) 47 | if err != nil { 48 | return fmt.Errorf("Failed to update roles list: %s", err) 49 | } 50 | 51 | formattedWeight := strconv.FormatFloat(*newTopologyReplicaset.Weight, 'f', -1, 64) 52 | log.Infof("Replica set %s weight is set to %s", ctx.Replicasets.ReplicasetName, formattedWeight) 53 | 54 | return nil 55 | } 56 | 57 | func getSetWeightEditReplicasetOpts(weight float64, topologyReplicaset *TopologyReplicaset) (*EditReplicasetOpts, error) { 58 | editReplicasetOpts := EditReplicasetOpts{ 59 | ReplicasetUUID: topologyReplicaset.UUID, 60 | Weight: &weight, 61 | } 62 | 63 | return &editReplicasetOpts, nil 64 | } 65 | -------------------------------------------------------------------------------- /test/files/init_check_passed_params.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | if package.setsearchroot ~= nil then 6 | package.setsearchroot() 7 | else 8 | local fio = require('fio') 9 | local app_dir = fio.abspath(fio.dirname(arg[0])) 10 | print('App dir set to ' .. app_dir) 11 | package.path = app_dir .. '/?.lua;' .. package.path 12 | package.path = app_dir .. '/?/init.lua;' .. package.path 13 | package.path = app_dir .. '/.rocks/share/tarantool/?.lua;' .. package.path 14 | package.path = app_dir .. '/.rocks/share/tarantool/?/init.lua;' .. package.path 15 | package.cpath = app_dir .. '/?.so;' .. package.cpath 16 | package.cpath = app_dir .. '/?.dylib;' .. package.cpath 17 | package.cpath = app_dir .. '/.rocks/lib/tarantool/?.so;' .. package.cpath 18 | package.cpath = app_dir .. '/.rocks/lib/tarantool/?.dylib;' .. package.cpath 19 | end 20 | 21 | local argparse = require('cartridge.argparse') 22 | 23 | local actual_param_value = argparse.get_box_opts().net_msg_max 24 | assert( 25 | actual_param_value == 1024, 26 | string.format('Mismatch of net_msg_max: %s != %s', actual_param_value, 1024) 27 | ) 28 | 29 | actual_param_value = argparse.parse().user_param 30 | assert( 31 | actual_param_value == 'user_data', 32 | string.format('Mismatch of user_param: %s != %s', actual_param_value, 'user_data') 33 | ) 34 | 35 | local cartridge = require('cartridge') 36 | 37 | local ok, err = cartridge.cfg({ 38 | roles = { 39 | 'cartridge.roles.vshard-storage', 40 | 'cartridge.roles.vshard-router', 41 | 'cartridge.roles.metrics', 42 | 'app.roles.custom', 43 | }, 44 | }) 45 | 46 | assert(ok, tostring(err)) 47 | 48 | local admin = require('app.admin') 49 | admin.init() 50 | 51 | local metrics = require('cartridge.roles.metrics') 52 | metrics.set_export({ 53 | { 54 | path = '/metrics', 55 | format = 'prometheus' 56 | }, 57 | { 58 | path = '/health', 59 | format = 'health' 60 | } 61 | }) 62 | -------------------------------------------------------------------------------- /test/integration/replicasets/test_bootstrap_vshard.py: -------------------------------------------------------------------------------- 1 | from utils import (get_log_lines, is_vshard_bootstrapped, 2 | run_command_and_get_output) 3 | 4 | 5 | def test_bootstrap(cartridge_cmd, project_with_vshard_replicasets): 6 | project = project_with_vshard_replicasets.project 7 | instances = project_with_vshard_replicasets.instances 8 | 9 | # bootstrap vshard 10 | cmd = [ 11 | cartridge_cmd, 'replicasets', 'bootstrap-vshard', 12 | ] 13 | 14 | rc, output = run_command_and_get_output(cmd, cwd=project.path) 15 | assert rc == 0 16 | 17 | assert get_log_lines(output) == [ 18 | "• Bootstrap vshard task completed successfully, check the cluster status" 19 | ] 20 | 21 | router = instances['router'] 22 | admin_api_url = router.get_admin_api_url() 23 | assert is_vshard_bootstrapped(admin_api_url) 24 | 25 | # bootstrap again 26 | cmd = [ 27 | cartridge_cmd, 'replicasets', 'bootstrap-vshard', 28 | ] 29 | 30 | rc, output = run_command_and_get_output(cmd, cwd=project.path) 31 | assert rc == 1 32 | 33 | assert "already bootstrapped" in output 34 | 35 | 36 | def test_no_vshard_roles_avaliable(cartridge_cmd, project_with_replicaset_no_roles): 37 | project = project_with_replicaset_no_roles.project 38 | 39 | # bootstrap vshard 40 | cmd = [ 41 | cartridge_cmd, 'replicasets', 'bootstrap-vshard', 42 | ] 43 | 44 | rc, output = run_command_and_get_output(cmd, cwd=project.path) 45 | assert rc == 1 46 | 47 | assert 'No remotes with role "vshard-router" available' in output 48 | 49 | 50 | def test_boostrap_vshard_without_setup(cartridge_cmd, project_with_instances): 51 | project = project_with_instances.project 52 | 53 | # bootstrap vshard without joined instances 54 | cmd = [ 55 | cartridge_cmd, 'replicasets', 'bootstrap-vshard', 56 | ] 57 | 58 | rc, output = run_command_and_get_output(cmd, cwd=project.path) 59 | assert rc == 1 60 | assert "No instances joined to cluster found" in output 61 | -------------------------------------------------------------------------------- /cli/commands/cartridge.go: -------------------------------------------------------------------------------- 1 | package commands 2 | 3 | import ( 4 | "github.com/apex/log" 5 | "github.com/apex/log/handlers/cli" 6 | "github.com/spf13/cobra" 7 | "github.com/tarantool/cartridge-cli/cli/context" 8 | "github.com/tarantool/cartridge-cli/cli/version" 9 | ) 10 | 11 | var ( 12 | ctx context.Ctx 13 | needVersion bool 14 | rootCmd = &cobra.Command{ 15 | Use: "cartridge", 16 | Short: "Tarantool Cartridge command-line interface", 17 | 18 | PersistentPreRun: func(cmd *cobra.Command, args []string) { 19 | setLogLevel() 20 | }, 21 | 22 | Run: func(cmd *cobra.Command, args []string) { 23 | if len(args) == 0 && !needVersion { 24 | cmd.Help() 25 | } else { 26 | printVersion(cmd) 27 | } 28 | }, 29 | } 30 | ) 31 | 32 | func init() { 33 | rootCmd.SetVersionTemplate("{{ .Version }}\n") 34 | 35 | rootCmd.PersistentFlags().BoolVar(&ctx.Cli.Verbose, "verbose", false, "Verbose output") 36 | rootCmd.PersistentFlags().BoolVar(&ctx.Cli.Quiet, "quiet", false, "Hide build commands output") 37 | rootCmd.PersistentFlags().BoolVar(&ctx.Cli.Debug, "debug", false, "Debug mode") 38 | rootCmd.Flags().BoolVarP(&needVersion, "version", "v", false, "Show version information") 39 | 40 | addVersionFlags(rootCmd.Flags()) 41 | rootCmd.Flags().MarkHidden("rocks") 42 | rootCmd.Flags().MarkHidden("project-path") 43 | 44 | initLogger() 45 | } 46 | 47 | func Execute() { 48 | if err := rootCmd.Execute(); err != nil { 49 | log.Fatalf(err.Error()) 50 | } 51 | } 52 | 53 | func initLogger() { 54 | log.SetHandler(cli.Default) 55 | } 56 | 57 | func setLogLevel() { 58 | if ctx.Cli.Debug { 59 | ctx.Cli.Verbose = true 60 | } 61 | 62 | if ctx.Cli.Verbose { 63 | log.SetLevel(log.DebugLevel) 64 | } 65 | 66 | if ctx.Cli.Quiet { 67 | log.SetLevel(log.ErrorLevel) 68 | } 69 | } 70 | 71 | func printVersion(cmd *cobra.Command) { 72 | projectPathIsSet := cmd.Flags().Changed("project-path") 73 | if err := version.PrintVersionString(projectPath, projectPathIsSet, showRocksVersions); err != nil { 74 | log.Fatalf(err.Error()) 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /cli/connect/common.go: -------------------------------------------------------------------------------- 1 | package connect 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/tarantool/cartridge-cli/cli/context" 7 | ) 8 | 9 | const ( 10 | MaxHistoryLines = 10000 11 | 12 | TCPNetwork = "tcp" 13 | UnixNetwork = "unix" 14 | ) 15 | 16 | type ConnOpts struct { 17 | Network string 18 | Address string 19 | Username string 20 | Password string 21 | } 22 | 23 | type GetRawSuggestionsFunc func(console *Console, lastWord string) interface{} 24 | 25 | func getConnOpts(connString string, ctx *context.Ctx) (*ConnOpts, error) { 26 | connOpts := ConnOpts{ 27 | Username: ctx.Connect.Username, 28 | Password: ctx.Connect.Password, 29 | } 30 | 31 | connStringParts := strings.SplitN(connString, "@", 2) 32 | address := connStringParts[len(connStringParts)-1] 33 | 34 | if len(connStringParts) > 1 { 35 | authString := connStringParts[0] 36 | authStringParts := strings.SplitN(authString, ":", 2) 37 | 38 | if connOpts.Username == "" { 39 | connOpts.Username = authStringParts[0] 40 | } 41 | if len(authStringParts) > 1 && connOpts.Password == "" { 42 | connOpts.Password = authStringParts[1] 43 | } 44 | } 45 | 46 | addrLen := len(address) 47 | switch { 48 | case addrLen > 0 && (address[0] == '.' || address[0] == '/'): 49 | connOpts.Network = UnixNetwork 50 | connOpts.Address = address 51 | case addrLen >= 7 && address[0:7] == "unix://": 52 | connOpts.Network = UnixNetwork 53 | connOpts.Address = address[7:] 54 | case addrLen >= 5 && address[0:5] == "unix:": 55 | connOpts.Network = UnixNetwork 56 | connOpts.Address = address[5:] 57 | case addrLen >= 6 && address[0:6] == "unix/:": 58 | connOpts.Network = UnixNetwork 59 | connOpts.Address = address[6:] 60 | case addrLen >= 6 && address[0:6] == "tcp://": 61 | connOpts.Network = TCPNetwork 62 | connOpts.Address = address[6:] 63 | case addrLen >= 4 && address[0:4] == "tcp:": 64 | connOpts.Network = TCPNetwork 65 | connOpts.Address = address[4:] 66 | default: 67 | connOpts.Network = TCPNetwork 68 | connOpts.Address = address 69 | } 70 | 71 | return &connOpts, nil 72 | } 73 | -------------------------------------------------------------------------------- /cli/pack/validate.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/tarantool/cartridge-cli/cli/context" 7 | ) 8 | 9 | func Validate(ctx *context.Ctx) error { 10 | if ctx.Pack.Type != RpmType && ctx.Pack.Type != DebType { 11 | if ctx.Pack.UnitTemplatePath != "" { 12 | return fmt.Errorf("--unit-template option can be used only with rpm and deb types") 13 | } 14 | 15 | if ctx.Pack.InstUnitTemplatePath != "" { 16 | return fmt.Errorf("--instantiated-unit-template option can be used only with rpm and deb types") 17 | } 18 | 19 | if ctx.Pack.StatboardUnitTemplatePath != "" { 20 | return fmt.Errorf("--statboard-unit-template option can be used only with rpm and deb types") 21 | } 22 | } 23 | 24 | if ctx.Pack.Type != DockerType { 25 | if len(ctx.Pack.ImageTags) > 0 { 26 | return fmt.Errorf("--tag option can be used only with docker type") 27 | } 28 | 29 | if ctx.Tarantool.TarantoolVersion != "" { 30 | return fmt.Errorf("--tarantool-version option can be used only with docker type") 31 | } 32 | } 33 | 34 | if !ctx.Build.InDocker && ctx.Pack.Type != DockerType { 35 | if len(ctx.Docker.CacheFrom) > 0 { 36 | return fmt.Errorf("--cache-from option can be used only with --use-docker flag or docker type") 37 | } 38 | 39 | if ctx.Build.DockerFrom != "" { 40 | return fmt.Errorf("--build-from option can be used only with --use-docker flag or docker type") 41 | } 42 | 43 | if ctx.Pack.DockerFrom != "" { 44 | return fmt.Errorf("--from option can be used only with --use-docker flag or docker type") 45 | } 46 | 47 | if ctx.Build.SDKLocal { 48 | return fmt.Errorf("--sdk-local option can be used only with --use-docker flag or docker type") 49 | } 50 | 51 | if ctx.Build.SDKPath != "" { 52 | return fmt.Errorf("--sdk-path option can be used only with --use-docker flag or docker type") 53 | } 54 | } 55 | 56 | if (ctx.Build.SDKPath != "" || ctx.Build.SDKLocal) && ctx.Tarantool.TarantoolVersion != "" { 57 | return fmt.Errorf("You can specify only one of --tarantool-version,--sdk-path or --sdk-local") 58 | } 59 | 60 | return nil 61 | } 62 | -------------------------------------------------------------------------------- /cli/failover/setup.go: -------------------------------------------------------------------------------- 1 | package failover 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | 8 | "github.com/apex/log" 9 | "github.com/tarantool/cartridge-cli/cli/common" 10 | "github.com/tarantool/cartridge-cli/cli/context" 11 | "github.com/tarantool/cartridge-cli/cli/project" 12 | "gopkg.in/yaml.v2" 13 | ) 14 | 15 | const ( 16 | defaultFailoverParamsFile = "failover.yml" 17 | ) 18 | 19 | func Setup(ctx *context.Ctx) error { 20 | var err error 21 | 22 | if err := project.FillCtx(ctx); err != nil { 23 | return err 24 | } 25 | 26 | if ctx.Failover.File == "" { 27 | ctx.Failover.File = defaultFailoverParamsFile 28 | } 29 | 30 | if ctx.Failover.File, err = filepath.Abs(ctx.Failover.File); err != nil { 31 | return fmt.Errorf("Failed to get %s failover configuration file absolute path: %s", ctx.Failover.File, err) 32 | } 33 | 34 | log.Infof("Configure failover described in %s", ctx.Failover.File) 35 | 36 | failoverOpts, err := getFailoverOptsFromFile(ctx) 37 | if err != nil { 38 | return fmt.Errorf("Failed to parse %s failover configuration file: %s", ctx.Failover.File, err) 39 | } 40 | 41 | if err := failoverOpts.Manage(ctx); err != nil { 42 | return fmt.Errorf("Failed to configure failover: %s", err) 43 | } 44 | 45 | log.Infof("Failover configured successfully") 46 | 47 | return nil 48 | } 49 | 50 | func getFailoverOptsFromFile(ctx *context.Ctx) (*FailoverOpts, error) { 51 | if _, err := os.Stat(ctx.Failover.File); os.IsNotExist(err) { 52 | return nil, fmt.Errorf("File %s with failover configurations doesn't exists", ctx.Failover.File) 53 | } else if err != nil { 54 | return nil, fmt.Errorf("Failed to process %s file: %s", ctx.Failover.File, err) 55 | } 56 | 57 | fileContent, err := common.GetFileContentBytes(ctx.Failover.File) 58 | if err != nil { 59 | return nil, fmt.Errorf("Failed to read %s file: %s", ctx.Failover.File, err) 60 | } 61 | 62 | var failoverOpts FailoverOpts 63 | if err := yaml.Unmarshal(fileContent, &failoverOpts); err != nil { 64 | return nil, fmt.Errorf("Failed to parse failover configurations: %s", err) 65 | } 66 | 67 | return &failoverOpts, nil 68 | } 69 | -------------------------------------------------------------------------------- /cli/replicasets/completion.go: -------------------------------------------------------------------------------- 1 | package replicasets 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/tarantool/cartridge-cli/cli/cluster" 8 | "github.com/tarantool/cartridge-cli/cli/common" 9 | "github.com/tarantool/cartridge-cli/cli/connector" 10 | "github.com/tarantool/cartridge-cli/cli/context" 11 | "github.com/tarantool/cartridge-cli/cli/project" 12 | ) 13 | 14 | const ( 15 | completionEvalTimeout = 3 * time.Second 16 | ) 17 | 18 | func GetReplicasetRolesComp(ctx *context.Ctx) ([]string, error) { 19 | if ctx.Replicasets.ReplicasetName == "" { 20 | return nil, fmt.Errorf("Please, specify replica set name via --replicaset flag") 21 | } 22 | 23 | if err := project.FillCtx(ctx); err != nil { 24 | return nil, err 25 | } 26 | 27 | conn, err := cluster.ConnectToSomeJoinedInstance(ctx) 28 | if err != nil { 29 | return nil, err 30 | } 31 | 32 | topologyReplicaset, err := getTopologyReplicaset(conn, ctx.Replicasets.ReplicasetName) 33 | if err != nil { 34 | return nil, err 35 | } 36 | 37 | return topologyReplicaset.Roles, nil 38 | } 39 | 40 | func GetReplicasetRolesToAddComp(ctx *context.Ctx) ([]string, error) { 41 | if err := project.FillCtx(ctx); err != nil { 42 | return nil, err 43 | } 44 | 45 | conn, err := cluster.ConnectToSomeJoinedInstance(ctx) 46 | if err != nil { 47 | return nil, err 48 | } 49 | 50 | // get all known roles 51 | var knownRoles []Role 52 | req := connector.EvalReq(getKnownRolesBody).SetReadTimeout(cluster.SimpleOperationTimeout) 53 | if err := conn.ExecTyped(req, &knownRoles); err != nil { 54 | return nil, fmt.Errorf("Failed to get known roles: %s", err) 55 | } 56 | 57 | roleNames := make([]string, len(knownRoles)) 58 | for i, role := range knownRoles { 59 | roleNames[i] = role.Name 60 | } 61 | 62 | // get replicaset roles 63 | if ctx.Replicasets.ReplicasetName == "" { 64 | return roleNames, nil 65 | } 66 | 67 | topologyReplicaset, err := getTopologyReplicaset(conn, ctx.Replicasets.ReplicasetName) 68 | if err != nil { 69 | return roleNames, nil 70 | } 71 | 72 | replicasetRoles := topologyReplicaset.Roles 73 | 74 | rolesToAdd := common.GetStringSlicesDifference(roleNames, replicasetRoles) 75 | 76 | return rolesToAdd, nil 77 | } 78 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/test/helper.lua: -------------------------------------------------------------------------------- 1 | -- This file is required automatically by luatest. 2 | -- Add common configuration here. 3 | 4 | local fio = require('fio') 5 | local t = require('luatest') 6 | local cartridge_helpers = require('cartridge.test-helpers') 7 | 8 | local helper = {} 9 | 10 | helper.root = fio.dirname(fio.abspath(package.search('init'))) 11 | helper.datadir = fio.pathjoin(helper.root, 'tmp', 'db_test') 12 | helper.server_command = fio.pathjoin(helper.root, 'init.lua') 13 | 14 | helper.cluster = cartridge_helpers.Cluster:new({ 15 | server_command = helper.server_command, 16 | datadir = helper.datadir, 17 | use_vshard = false, 18 | replicasets = { 19 | { 20 | alias = 'api', 21 | uuid = cartridge_helpers.uuid('a'), 22 | roles = {'app.roles.custom'}, 23 | servers = { 24 | { instance_uuid = cartridge_helpers.uuid('a', 1), alias = 'api' }, 25 | }, 26 | }, 27 | } 28 | }) 29 | 30 | function helper.truncate_space_on_cluster(cluster, space_name) 31 | assert(cluster ~= nil) 32 | for _, server in ipairs(cluster.servers) do 33 | server.net_box:eval([[ 34 | local space_name = ... 35 | local space = box.space[space_name] 36 | if space ~= nil and not box.cfg.read_only then 37 | space:truncate() 38 | end 39 | ]], {space_name}) 40 | end 41 | end 42 | 43 | function helper.drop_space_on_cluster(cluster, space_name) 44 | assert(cluster ~= nil) 45 | for _, server in ipairs(cluster.servers) do 46 | server.net_box:eval([[ 47 | local space_name = ... 48 | local space = box.space[space_name] 49 | if space ~= nil and not box.cfg.read_only then 50 | space:drop() 51 | end 52 | ]], {space_name}) 53 | end 54 | end 55 | 56 | function helper.stop_cluster(cluster) 57 | assert(cluster ~= nil) 58 | cluster:stop() 59 | fio.rmtree(cluster.datadir) 60 | end 61 | 62 | t.before_suite(function() 63 | fio.rmtree(helper.datadir) 64 | fio.mktree(helper.datadir) 65 | box.cfg({work_dir = helper.datadir}) 66 | end) 67 | 68 | return helper 69 | -------------------------------------------------------------------------------- /cli/running/writer_test.go: -------------------------------------------------------------------------------- 1 | package running 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func getLogsBytes(logs []string) []byte { 13 | return []byte(strings.Join(logs, "\n")) 14 | } 15 | 16 | func getLogsWithPrefix(prefix string, logs []string) string { 17 | resLines := make([]string, len(logs)) 18 | for i := range logs { 19 | resLines[i] = fmt.Sprintf("%s | %s", prefix, logs[i]) 20 | } 21 | 22 | return strings.Join(resLines, "\n") 23 | } 24 | 25 | func TestWrite(t *testing.T) { 26 | t.Parallel() 27 | 28 | assert := assert.New(t) 29 | 30 | out := bytes.NewBuffer(nil) 31 | var logs []string 32 | var logBytes []byte 33 | var n int 34 | var err error 35 | 36 | id := "my-id" 37 | 38 | writer := newColorizedWriter(id) 39 | writer.out = out 40 | 41 | // multiline 42 | out.Reset() 43 | logs = []string{ 44 | "Some long", 45 | "multiline", 46 | "log", 47 | } 48 | logBytes = getLogsBytes(logs) 49 | n, err = writer.Write(logBytes) 50 | assert.Nil(err) 51 | assert.Equal(len(logBytes), n) 52 | assert.Equal(getLogsWithPrefix(id, logs), out.String()) 53 | 54 | // one line (w/o \n) 55 | out.Reset() 56 | logs = []string{ 57 | "Some one-line log line", 58 | } 59 | logBytes = getLogsBytes(logs) 60 | n, err = writer.Write(logBytes) 61 | assert.Nil(err) 62 | assert.Equal(len(logBytes), n) 63 | assert.Equal(getLogsWithPrefix(id, logs), out.String()) 64 | } 65 | 66 | func TestNoPrefixWrite(t *testing.T) { 67 | t.Parallel() 68 | 69 | assert := assert.New(t) 70 | out := bytes.NewBuffer(nil) 71 | 72 | writer := newDummyWriter() 73 | writer.out = out 74 | 75 | out.Reset() 76 | logs := []string{ 77 | "Some long\nmultiline\nlog", 78 | } 79 | 80 | logBytes := getLogsBytes(logs) 81 | n, err := writer.Write(logBytes) 82 | assert.Nil(err) 83 | assert.Equal(len(logBytes), n) 84 | assert.Equal(strings.Join(logs, "\n"), out.String()) 85 | 86 | out.Reset() 87 | logs = []string{ 88 | "One line log without prefix.", 89 | } 90 | 91 | logBytes = getLogsBytes(logs) 92 | n, err = writer.Write(logBytes) 93 | assert.Nil(err) 94 | assert.Equal(len(logBytes), n) 95 | assert.Equal(strings.Join(logs, "\n"), out.String()) 96 | } 97 | -------------------------------------------------------------------------------- /doc/commands/status.rst: -------------------------------------------------------------------------------- 1 | Checking instance status 2 | ======================== 3 | 4 | Run the ``status`` command to check the current status of one or more instances: 5 | 6 | .. code-block:: bash 7 | 8 | cartridge status [INSTANCE_NAME...] [flags] 9 | 10 | where ``[INSTANCE_NAME...]`` means that more than one instance can be specified. 11 | 12 | If no ``INSTANCE_NAME`` is provided, all the instances from the 13 | Cartridge instance configuration file are taken as arguments. 14 | See the ``--cfg`` option below. 15 | 16 | .. note:: 17 | 18 | Make sure the instance(s) you are checking were started with ``cartridge start -d``. 19 | 20 | Flags 21 | ----- 22 | 23 | .. container:: table 24 | 25 | .. list-table:: 26 | :widths: 20 80 27 | :header-rows: 0 28 | 29 | * - ``--name`` 30 | - Application name. 31 | By default, it is taken from the ``package`` field of the application's ``.rockspec``. 32 | * - ``--stateboard`` 33 | - Get the status of the application stateboard and the instances. 34 | Ignored if ``--stateboard-only`` is specified. 35 | * - ``--stateboard-only`` 36 | - Get only the application stateboard status. 37 | If specified, ``INSTANCE_NAME...`` is ignored. 38 | * - ``--run-dir`` 39 | - The directory where PID and socket files are stored. 40 | Defaults to ``./tmp/run``. 41 | ``run-dir`` is also a section of ``.cartridge.yml``. 42 | Learn more about 43 | :doc:`instance paths `. 44 | * - ``--cfg`` 45 | - Path to the Cartridge instances configuration file. 46 | Defaults to ``./instances.yml``. 47 | ``cfg``is also a section of ``.cartridge.yml``. 48 | Learn more about 49 | :doc:`instance paths `. 50 | 51 | ``status`` also supports :doc:`global flags `. 52 | 53 | .. note:: 54 | 55 | Use the exact same ``run-dir`` as you did with ``cartridge start``. 56 | The PID files stored in that directory are used to stop running instances. 57 | 58 | -------------------------------------------------------------------------------- /doc/commands/stop.rst: -------------------------------------------------------------------------------- 1 | Stopping instances 2 | ================== 3 | 4 | To stop one or more instances that are running locally in the background, run: 5 | 6 | .. code-block:: bash 7 | 8 | cartridge stop [INSTANCE_NAME...] [flags] 9 | 10 | where ``[INSTANCE_NAME...]`` means that more than one instance can be specified. 11 | 12 | If no ``INSTANCE_NAME`` is provided, all the instances from the 13 | Cartridge instance configuration file are taken as arguments. 14 | See the ``--cfg`` option below. 15 | 16 | Flags 17 | ----- 18 | 19 | .. container:: table 20 | 21 | .. list-table:: 22 | :widths: 20 80 23 | :header-rows: 0 24 | 25 | * - ``--name`` 26 | - Application name. 27 | By default, it is taken from the ``package`` field 28 | of the application's ``.rockspec``. 29 | * - ``-f, --force`` 30 | - Force stop the instance(s) with a SIGKILL. 31 | By default, the instances receive a SIGTERM. 32 | * - ``--stateboard`` 33 | - Stop the application 34 | :ref:`stateboard ` 35 | and the instances. 36 | Ignored if ``--stateboard-only`` is specified. 37 | * - ``--stateboard-only`` 38 | - Stop only the application stateboard. 39 | If specified, ``INSTANCE_NAME...`` is ignored. 40 | * - ``--run-dir`` 41 | - The directory where PID and socket files are stored. 42 | Defaults to ``./tmp/run``. 43 | ``run-dir`` is also a section of ``.cartridge.yml``. 44 | Learn more about 45 | :doc:`instance paths `. 46 | * - ``--cfg`` 47 | - Path to the Cartridge instances configuration file. 48 | Defaults to ``./instances.yml``. 49 | ``cfg``is also a section of ``.cartridge.yml``. 50 | Learn more about 51 | :doc:`instance paths `. 52 | 53 | .. note:: 54 | 55 | Use the exact same ``run-dir`` as you did with ``cartridge start``. 56 | The PID files stored in that directory are used to stop running instances. 57 | 58 | -------------------------------------------------------------------------------- /doc/pre-post-build.rst: -------------------------------------------------------------------------------- 1 | Pre-build and post-build scripts 2 | ================================ 3 | 4 | Put the files ``cartridge.pre-build`` and ``cartridge.post-build`` 5 | in your application directory to control the packaging process. 6 | 7 | .. note:: 8 | 9 | These files are not to be confused with 10 | :ref:`pre-install and post-install scripts `, 11 | which can be added to an RPM/DEB package of your Cartridge application. 12 | 13 | 14 | cartridge.pre-build 15 | ------------------- 16 | 17 | If your application depends on closed-source rocks, or if the build should contain 18 | rocks from a project added as a submodule, then you need to **install** all these 19 | dependencies before calling ``tarantoolctl rocks make``. 20 | To avoid doing it manually, use the file ``cartridge.pre-build``. 21 | 22 | ``cartridge.pre-build`` is a script that runs before ``tarantoolctl rocks make``. 23 | The main purpose of this script is to build non-standard rocks modules 24 | (for example, from a submodule). Specify in it all the ``.rocks`` to build from submodules. 25 | For example: ``tarantoolctl rocks make --chdir ./third_party/proj``. 26 | 27 | The file must be executable. 28 | 29 | If you created your application from template, 30 | ``cartridge.pre-build`` is already in your application directory. 31 | 32 | 33 | Example 34 | ~~~~~~~ 35 | 36 | .. code-block:: bash 37 | 38 | #!/bin/sh 39 | 40 | # The main purpose of this script is to build non-standard rocks modules. 41 | # The script will run before `tarantoolctl rocks make` during application build. 42 | 43 | tarantoolctl rocks make --chdir ./third_party/my-custom-rock-module 44 | 45 | 46 | cartridge.post-build 47 | -------------------- 48 | 49 | ``cartridge.post-build`` is a script that runs after ``tarantoolctl rocks make``. 50 | The main purpose of this script is to remove build artifacts from the final package. 51 | Must be executable. 52 | 53 | Example 54 | ~~~~~~~ 55 | 56 | .. code-block:: bash 57 | 58 | #!/bin/sh 59 | 60 | # The main purpose of this script is to remove build artifacts from the resulting package. 61 | # The script will run after `tarantoolctl rocks make` during application build. 62 | 63 | rm -rf third_party 64 | rm -rf node_modules 65 | rm -rf doc 66 | 67 | -------------------------------------------------------------------------------- /doc/commands/log.rst: -------------------------------------------------------------------------------- 1 | Get instance logs 2 | ================= 3 | 4 | To get the logs of an instance running in the background, use the ``log`` command: 5 | 6 | .. code-block:: bash 7 | 8 | cartridge log [INSTANCE_NAME...] [flags] 9 | 10 | which means that more than one instance name can be specified. 11 | 12 | Options 13 | ------- 14 | 15 | .. container:: table 16 | 17 | .. list-table:: 18 | :widths: 20 80 19 | :header-rows: 0 20 | 21 | * - ``-f, --follow`` 22 | - Output appended data as the log grows. 23 | * - ``-n, --lines int`` 24 | - Number of last lines to be displayed. Defaults to 15. 25 | * - ``--stateboard`` 26 | - Get both stateboard and instance logs. 27 | Ignored if ``--stateboard-only`` is specified. 28 | * - ``--stateboard-only`` 29 | - Get only stateboard logs. 30 | If specified, ``INSTANCE_NAME...`` is ignored. 31 | * - ``--log-dir`` 32 | - The directory that stores logs for instances that are running in the background. 33 | Defaults to ``./tmp/log``. 34 | ``log-dir`` is also a section of ``.cartridge.yml``. 35 | Learn more about 36 | :doc:`instance paths `. 37 | * - ``--run-dir`` 38 | - The directory where PID and socket files are stored. 39 | Defaults to ``./tmp/run``. 40 | ``run-dir`` is also a section of ``.cartridge.yml``. 41 | Learn more about 42 | :doc:`instance paths `. 43 | * - ``--cfg`` 44 | - Path to the Cartridge instances configuration file. 45 | Defaults to ``./instances.yml``. 46 | ``cfg`` is also a section of ``.cartridge.yml``. 47 | Learn more about 48 | :doc:`instance paths `. 49 | 50 | ``log`` also supports :doc:`global flags `. 51 | 52 | .. note:: 53 | 54 | Use the exact same ``log-dir`` as you did with ``cartridge start``. 55 | The logs are retrieved from the files stored in that directory. 56 | 57 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/init.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | -- configure path so that you can run application 6 | -- from outside the root directory 7 | if package.setsearchroot ~= nil then 8 | package.setsearchroot() 9 | else 10 | -- Workaround for rocks loading in tarantool 1.10 11 | -- It can be removed in tarantool > 2.2 12 | -- By default, when you do require('mymodule'), tarantool looks into 13 | -- the current working directory and whatever is specified in 14 | -- package.path and package.cpath. If you run your app while in the 15 | -- root directory of that app, everything goes fine, but if you try to 16 | -- start your app with "tarantool myapp/init.lua", it will fail to load 17 | -- its modules, and modules from myapp/.rocks. 18 | local fio = require('fio') 19 | local app_dir = fio.abspath(fio.dirname(arg[0])) 20 | package.path = app_dir .. '/?.lua;' .. package.path 21 | package.path = app_dir .. '/?/init.lua;' .. package.path 22 | package.path = app_dir .. '/.rocks/share/tarantool/?.lua;' .. package.path 23 | package.path = app_dir .. '/.rocks/share/tarantool/?/init.lua;' .. package.path 24 | package.cpath = app_dir .. '/?.so;' .. package.cpath 25 | package.cpath = app_dir .. '/?.dylib;' .. package.cpath 26 | package.cpath = app_dir .. '/.rocks/lib/tarantool/?.so;' .. package.cpath 27 | package.cpath = app_dir .. '/.rocks/lib/tarantool/?.dylib;' .. package.cpath 28 | end 29 | 30 | local has_module, compat = pcall(require, 'compat') 31 | if has_module then 32 | compat.fiber_slice_default = 'new' 33 | end 34 | 35 | -- configure cartridge 36 | 37 | local cartridge = require('cartridge') 38 | 39 | local ok, err = cartridge.cfg({ 40 | roles = { 41 | 'cartridge.roles.vshard-storage', 42 | 'cartridge.roles.vshard-router', 43 | 'cartridge.roles.metrics', 44 | 'app.roles.custom', 45 | }, 46 | }) 47 | 48 | assert(ok, tostring(err)) 49 | 50 | -- register admin function to use it with 'cartridge admin' command 51 | 52 | local admin = require('app.admin') 53 | admin.init() 54 | 55 | local metrics = require('cartridge.roles.metrics') 56 | metrics.set_export({ 57 | { 58 | path = '/metrics', 59 | format = 'prometheus' 60 | }, 61 | { 62 | path = '/health', 63 | format = 'health' 64 | } 65 | }) 66 | -------------------------------------------------------------------------------- /doc/commands.rst: -------------------------------------------------------------------------------- 1 | Supported Cartridge CLI commands 2 | ================================ 3 | 4 | .. container:: table 5 | 6 | .. list-table:: 7 | :widths: 20 80 8 | :header-rows: 0 9 | 10 | * - :doc:`create ` 11 | - Create a new application from template 12 | * - :doc:`build ` 13 | - Build an application for local development and testing 14 | * - :doc:`start ` 15 | - Start one or more Tarantool instances locally 16 | * - :doc:`stop ` 17 | - Stop one or more Tarantool instances started locally 18 | * - :doc:`status ` 19 | - Get the status of one or more instances running locally 20 | * - :doc:`enter ` 21 | - Enter a locally running instance 22 | * - :doc:`connect ` 23 | - Connect to a locally running instance at a specific address 24 | * - :doc:`log ` 25 | - Get the logs of one or more instances 26 | * - :doc:`clean ` 27 | - Clean the files of one or more instances 28 | * - :doc:`pack ` 29 | - Pack the application into a distributable bundle 30 | * - :doc:`repair ` 31 | - Patch cluster configuration files 32 | * - :doc:`admin ` 33 | - Сall an admin function provided by the application 34 | * - :doc:`replicasets ` 35 | - Manage cluster replica sets running locally 36 | * - :doc:`failover ` 37 | - Manage cluster failover 38 | 39 | All commands support :doc:`global flags ` 40 | that control output verbosity. 41 | 42 | .. toctree:: 43 | :hidden: 44 | 45 | create 46 | build 47 | start 48 | stop 49 | status 50 | enter 51 | connect 52 | log 53 | clean 54 | pack 55 | repair 56 | admin 57 | replicasets 58 | failover 59 | 60 | -------------------------------------------------------------------------------- /doc/migration-to-tt.rst: -------------------------------------------------------------------------------- 1 | Migration from Cartridge CLI to tt 2 | ================================== 3 | 4 | .. note:: 5 | 6 | The migration instruction is also available in the 7 | `tt repository `_ 8 | on GitHub. 9 | 10 | To start managing a Cartridge application with ``tt`` instead of Cartridge CLI, 11 | run ``tt init`` in the application directory: 12 | 13 | .. code-block:: bash 14 | 15 | $ tt init 16 | • Found existing config '.cartridge.yml' 17 | • Environment config is written to 'tt.yaml' 18 | 19 | This creates a ``tt`` environment based on the existing Cartridge configuration. 20 | Now you're ready to manage the application with ``tt``: 21 | 22 | .. code-block:: bash 23 | 24 | $ tt start 25 | • Starting an instance [app:s1-master]... 26 | • Starting an instance [app:s1-replica]... 27 | • Starting an instance [app:s2-master]... 28 | • Starting an instance [app:s2-replica]... 29 | • Starting an instance [app:stateboard]... 30 | • Starting an instance [app:router]... 31 | $ tt status 32 | INSTANCE STATUS PID 33 | app:s1-replica RUNNING 112645 34 | app:s2-master RUNNING 112646 35 | app:s2-replica RUNNING 112647 36 | app:stateboard RUNNING 112655 37 | app:router RUNNING 112656 38 | app:s1-master RUNNING 112644 39 | 40 | Commands difference 41 | ------------------- 42 | 43 | Most Cartridge CLI commands look the same in ``tt``: ``cartridge start`` and 44 | ``tt start``, ``cartridge create`` and ``tt create``, and so on. To migrate such 45 | calls, it is usually enough to replace the utility name. There can be slight differences 46 | in command flags and format. For details on ``tt`` commands, see the 47 | :ref:`tt commands reference `. 48 | 49 | The following commands are different in ``tt``: 50 | 51 | * Cartridge CLI commands ``admin``, ``bench``, ``failover``, ``repair``, ``replicasets`` 52 | are implemented as subcommands of ``tt cartridge``. Example, ``tt cartridge repair``. 53 | * ``cartridge enter`` and ``cartridge connect`` are covered by ``tt connect``. 54 | * The analog of ``cartridge gen completion`` is ``tt completion`` 55 | * ``cartridge log`` and ``cartridge pack docker`` functionality is not supported in ``tt``. 56 | 57 | -------------------------------------------------------------------------------- /cli/connect/connect.go: -------------------------------------------------------------------------------- 1 | package connect 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/tarantool/cartridge-cli/cli/common" 7 | "github.com/tarantool/cartridge-cli/cli/context" 8 | "github.com/tarantool/cartridge-cli/cli/project" 9 | "github.com/tarantool/cartridge-cli/cli/running" 10 | ) 11 | 12 | const ( 13 | // see https://github.com/tarantool/tarantool/blob/b53cb2aeceedc39f356ceca30bd0087ee8de7c16/src/box/lua/console.c#L265 14 | tarantoolWordSeparators = "\t\r\n !\"#$%&'()*+,-/;<=>?@[\\]^`{|}~" 15 | ) 16 | 17 | func Enter(ctx *context.Ctx, args []string) error { 18 | var err error 19 | 20 | if err := project.FillCtx(ctx); err != nil { 21 | return err 22 | } 23 | 24 | if ctx.Running.Instances, err = common.GetInstancesFromArgs(args); err != nil { 25 | return err 26 | } 27 | 28 | if len(ctx.Running.Instances) != 1 { 29 | return fmt.Errorf("Should be specified one instance name") 30 | } 31 | 32 | instanceName := ctx.Running.Instances[0] 33 | 34 | process := running.NewInstanceProcess(ctx, instanceName) 35 | if !process.IsRunning() { 36 | return common.ErrWrapCheckInstanceNameCommonMisprint([]string{instanceName}, ctx.Project.Name, 37 | fmt.Errorf("Instance %s is not running", instanceName)) 38 | } 39 | 40 | socketPath := project.GetInstanceConsoleSock(ctx, instanceName) 41 | title := project.GetInstanceID(ctx, instanceName) 42 | 43 | connOpts := ConnOpts{ 44 | Network: "unix", 45 | Address: socketPath, 46 | } 47 | 48 | if err := runConsole(&connOpts, title); err != nil { 49 | return fmt.Errorf("Failed to run interactive console: %s", err) 50 | } 51 | 52 | return nil 53 | } 54 | 55 | func Connect(ctx *context.Ctx, args []string) error { 56 | if len(args) != 1 { 57 | return fmt.Errorf("Should be specified one connection string") 58 | } 59 | 60 | connString := args[0] 61 | 62 | connOpts, err := getConnOpts(connString, ctx) 63 | if err != nil { 64 | return fmt.Errorf("Failed to get connection opts: %s", err) 65 | } 66 | 67 | if err := runConsole(connOpts, ""); err != nil { 68 | return fmt.Errorf("Failed to run interactive console: %s", err) 69 | } 70 | 71 | return nil 72 | 73 | } 74 | 75 | func runConsole(connOpts *ConnOpts, title string) error { 76 | console, err := NewConsole(connOpts, title) 77 | if err != nil { 78 | return fmt.Errorf("Failed to create new console: %s", err) 79 | } 80 | defer console.Close() 81 | 82 | if err := console.Run(); err != nil { 83 | return fmt.Errorf("Failed to start new console: %s", err) 84 | } 85 | 86 | return nil 87 | } 88 | -------------------------------------------------------------------------------- /cli/connector/binary.go: -------------------------------------------------------------------------------- 1 | package connector 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/FZambia/tarantool" 8 | ) 9 | 10 | func connectBinary(connOpts *ConnOpts) (*tarantool.Connection, error) { 11 | connectStr := fmt.Sprintf("%s://%s", connOpts.Network, connOpts.Address) 12 | 13 | binaryConn, err := tarantool.Connect(connectStr, tarantool.Opts{ 14 | User: connOpts.Username, 15 | Password: connOpts.Password, 16 | SkipSchema: true, // see https://github.com/FZambia/tarantool/issues/3 17 | RequestTimeout: 0, 18 | }) 19 | if err != nil { 20 | return nil, fmt.Errorf("Failed to connect: %s", err) 21 | } 22 | 23 | return binaryConn, nil 24 | } 25 | 26 | func initBinaryConn(conn *Conn, connOpts *ConnOpts) error { 27 | var err error 28 | 29 | if conn.binary, err = connectBinary(connOpts); err != nil { 30 | return err 31 | } 32 | 33 | conn.evalFunc = evalBinary 34 | conn.callFunc = callBinary 35 | 36 | return nil 37 | } 38 | 39 | func evalBinary(conn *Conn, funcBody string, args []interface{}, execOpts ExecOpts) ([]interface{}, error) { 40 | evalReq := tarantool.Eval(funcBody, args) 41 | return processTarantoolReqBinary(conn, evalReq, execOpts) 42 | } 43 | 44 | func callBinary(conn *Conn, funcName string, args []interface{}, execOpts ExecOpts) ([]interface{}, error) { 45 | callReq := tarantool.Call(funcName, args) 46 | return processTarantoolReqBinary(conn, callReq, execOpts) 47 | } 48 | 49 | func processTarantoolReqBinary(conn *Conn, req *tarantool.Request, execOpts ExecOpts) ([]interface{}, error) { 50 | if execOpts.PushCallback != nil { 51 | req.WithPush(func(r *tarantool.Response) { 52 | execOpts.PushCallback(r.Data[0]) 53 | }) 54 | } 55 | 56 | ctx := context.Background() 57 | var cancel context.CancelFunc 58 | 59 | if execOpts.ReadTimeout != 0 { 60 | ctx, cancel = context.WithTimeout(ctx, execOpts.ReadTimeout) 61 | defer cancel() 62 | } 63 | 64 | return execTarantoolReqBinary(ctx, conn.binary, req, execOpts.ResData) 65 | } 66 | 67 | func execTarantoolReqBinary(ctx context.Context, binaryConn *tarantool.Connection, req *tarantool.Request, resData interface{}) ([]interface{}, error) { 68 | var err error 69 | var resp *tarantool.Response 70 | 71 | if resData != nil { 72 | err = binaryConn.ExecTypedContext(ctx, req, resData) 73 | } else { 74 | resp, err = binaryConn.ExecContext(ctx, req) 75 | } 76 | 77 | if err != nil { 78 | return nil, err 79 | } 80 | 81 | if resp == nil { 82 | return nil, nil 83 | } 84 | 85 | return resp.Data, nil 86 | } 87 | -------------------------------------------------------------------------------- /cli/failover/set.go: -------------------------------------------------------------------------------- 1 | package failover 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | 7 | "github.com/apex/log" 8 | "github.com/tarantool/cartridge-cli/cli/context" 9 | "github.com/tarantool/cartridge-cli/cli/project" 10 | ) 11 | 12 | func Set(ctx *context.Ctx) error { 13 | if (ctx.Failover.Mode == "eventual" || ctx.Failover.Mode == "disabled") && ctx.Failover.ProviderParamsJSON != "" { 14 | return fmt.Errorf("Please, don't specify provider parameters when using %s mode", ctx.Failover.Mode) 15 | } 16 | 17 | if ctx.Failover.Mode == "disabled" { 18 | return Disable(ctx) 19 | } 20 | 21 | if err := project.FillCtx(ctx); err != nil { 22 | return err 23 | } 24 | 25 | failoverOpts, err := getFailoverOpts(ctx) 26 | if err != nil { 27 | return err 28 | } 29 | 30 | log.Infof("Configure %s failover", (*failoverOpts)["mode"]) 31 | 32 | if err := failoverOpts.Manage(ctx); err != nil { 33 | return err 34 | } 35 | 36 | log.Infof("Failover configured successfully") 37 | 38 | return nil 39 | } 40 | 41 | func getFailoverOpts(ctx *context.Ctx) (*FailoverOpts, error) { 42 | failoverOpts, err := initFailoverOpts(ctx) 43 | if err != nil { 44 | return nil, err 45 | } 46 | 47 | if (*failoverOpts)["mode"] == "stateful" { 48 | if _, found := (*failoverOpts)["state_provider"]; found && ctx.Failover.ProviderParamsJSON != "" { 49 | var providerParams ProviderParams 50 | if err := json.Unmarshal([]byte(ctx.Failover.ProviderParamsJSON), &providerParams); err != nil { 51 | return nil, fmt.Errorf("Failed to parse provider parameters: %s", err) 52 | } 53 | 54 | if (*failoverOpts)["state_provider"] == "stateboard" { 55 | (*failoverOpts)["stateboard_params"] = providerParams 56 | } else if (*failoverOpts)["state_provider"] == "etcd2" { 57 | (*failoverOpts)["etcd2_params"] = providerParams 58 | } 59 | } 60 | } 61 | 62 | if err := validateSetFailoverOpts(failoverOpts); err != nil { 63 | return nil, err 64 | } 65 | 66 | return failoverOpts, nil 67 | } 68 | 69 | func initFailoverOpts(ctx *context.Ctx) (*FailoverOpts, error) { 70 | failoverOpts := FailoverOpts{ 71 | "mode": ctx.Failover.Mode, 72 | } 73 | 74 | if ctx.Failover.ParamsJSON != "" { 75 | if err := json.Unmarshal([]byte(ctx.Failover.ParamsJSON), &failoverOpts); err != nil { 76 | return nil, fmt.Errorf("Failed to parse failover parameters: %s", err) 77 | } 78 | } 79 | 80 | if ctx.Failover.StateProvider != "" { 81 | failoverOpts["state_provider"] = ctx.Failover.StateProvider 82 | } 83 | 84 | return &failoverOpts, nil 85 | } 86 | -------------------------------------------------------------------------------- /cli/replicasets/save.go: -------------------------------------------------------------------------------- 1 | package replicasets 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | 8 | "github.com/tarantool/cartridge-cli/cli/cluster" 9 | "github.com/tarantool/cartridge-cli/cli/project" 10 | 11 | "gopkg.in/yaml.v2" 12 | 13 | "github.com/apex/log" 14 | "github.com/tarantool/cartridge-cli/cli/context" 15 | ) 16 | 17 | func Save(ctx *context.Ctx, args []string) error { 18 | var err error 19 | 20 | if err := project.FillCtx(ctx); err != nil { 21 | return err 22 | } 23 | 24 | if ctx.Replicasets.File == "" { 25 | ctx.Replicasets.File = defaultReplicasetsFile 26 | } 27 | if ctx.Replicasets.File, err = filepath.Abs(ctx.Replicasets.File); err != nil { 28 | return fmt.Errorf("Failed to get replicasets configuration file absolute path: %s", err) 29 | } 30 | 31 | log.Infof("Save current replicasets to %s", ctx.Replicasets.File) 32 | 33 | conn, err := cluster.ConnectToSomeJoinedInstance(ctx) 34 | if err != nil { 35 | return err 36 | } 37 | 38 | topologyReplicasets, err := getTopologyReplicasets(conn) 39 | if err != nil { 40 | return fmt.Errorf("Failed to get current topology replicasets: %s", err) 41 | } 42 | 43 | newReplicasetsConf := getReplicasetsConf(topologyReplicasets) 44 | newConfContent, err := yaml.Marshal(*newReplicasetsConf) 45 | if err != nil { 46 | return project.InternalError("Failed to marshal new replicasets conf content: %s", err) 47 | } 48 | 49 | confFile, err := os.OpenFile(ctx.Replicasets.File, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755) 50 | if err != nil { 51 | return fmt.Errorf("Failed to open replicasets config for writing: %s", err) 52 | } 53 | 54 | if _, err := confFile.Write(newConfContent); err != nil { 55 | return fmt.Errorf("Failed to write new replicasets config: %s", err) 56 | } 57 | 58 | return nil 59 | } 60 | 61 | func getReplicasetsConf(topologyReplicasets *TopologyReplicasets) *ReplicasetsConf { 62 | replicasetsConf := &ReplicasetsConf{} 63 | 64 | for _, topologyReplicaset := range *topologyReplicasets { 65 | replicasetConf := &ReplicasetConf{ 66 | Roles: topologyReplicaset.Roles, 67 | AllRW: topologyReplicaset.AllRW, 68 | Weight: topologyReplicaset.Weight, 69 | VshardGroup: topologyReplicaset.VshardGroup, 70 | } 71 | 72 | for _, topologyInstance := range topologyReplicaset.Instances { 73 | replicasetConf.InstanceNames = append(replicasetConf.InstanceNames, topologyInstance.Alias) 74 | } 75 | 76 | (*replicasetsConf)[topologyReplicaset.Alias] = replicasetConf 77 | } 78 | 79 | return replicasetsConf 80 | } 81 | -------------------------------------------------------------------------------- /cli/create/templates/cartridge/README.md: -------------------------------------------------------------------------------- 1 | # Simple Tarantool Cartridge-based application 2 | 3 | This a simplest application based on Tarantool Cartridge. 4 | 5 | ## Quick start 6 | 7 | To build application and setup topology: 8 | 9 | ```bash 10 | cartridge build 11 | cartridge start -d 12 | cartridge replicasets setup --bootstrap-vshard 13 | ``` 14 | 15 | Now you can visit http://localhost:8081 and see your application's Admin Web UI. 16 | 17 | **Note**, that application stateboard is always started by default. 18 | See [`.cartridge.yml`](./.cartridge.yml) file to change this behavior. 19 | 20 | ## Application 21 | 22 | Application entry point is [`init.lua`](./init.lua) file. 23 | It configures Cartridge, initializes admin functions and exposes metrics endpoints. 24 | Before requiring `cartridge` module `package_compat.cfg()` is called. 25 | It configures package search path to correctly start application on production 26 | (e.g. using `systemd`). 27 | 28 | ## Roles 29 | 30 | Application has one simple role, [`app.roles.custom`](./app/roles/custom.lua). 31 | It exposes `/hello` and `/metrics` endpoints: 32 | 33 | ```bash 34 | curl localhost:8081/hello 35 | curl localhost:8081/metrics 36 | ``` 37 | 38 | Also, Cartridge roles [are registered](./init.lua) 39 | (`vshard-storage`, `vshard-router` and `metrics`). 40 | 41 | You can add your own role, but don't forget to register in using 42 | `cartridge.cfg` call. 43 | 44 | ## Instances configuration 45 | 46 | Configuration of instances that can be used to start application 47 | locally is places in [instances.yml](./instances.yml). 48 | It is used by `cartridge start`. 49 | 50 | ## Topology configuration 51 | 52 | Topology configuration is described in [`replicasets.yml`](./replicasets.yml). 53 | It is used by `cartridge replicasets setup`. 54 | 55 | ## Tests 56 | 57 | Simple unit and integration tests are placed in [`test`](./test) directory. 58 | 59 | First, we need to install test dependencies: 60 | 61 | ```bash 62 | ./deps.sh 63 | ``` 64 | 65 | Then, run linter: 66 | 67 | ```bash 68 | .rocks/bin/luacheck . 69 | ``` 70 | 71 | Now we can run tests: 72 | 73 | ```bash 74 | cartridge stop # to prevent "address already in use" error 75 | .rocks/bin/luatest -v 76 | ``` 77 | 78 | ## Admin 79 | 80 | Application has admin function [`probe`](./app/admin.lua) configured. 81 | You can use it to probe instances: 82 | 83 | ```bash 84 | cartridge start -d # if you've stopped instances 85 | cartridge admin probe \ 86 | --name {{ .Name }} \ 87 | --run-dir ./tmp/run \ 88 | --uri localhost:3302 89 | ``` 90 | -------------------------------------------------------------------------------- /README.dev.md: -------------------------------------------------------------------------------- 1 | ## How to run tests 2 | 3 | ### Requirements 4 | 5 | 1. Install [Go](https://go.dev/doc/install) 1.18. 6 | ```bash 7 | go version 8 | ``` 9 | 10 | 2. Install [Python](https://www.python.org/downloads/) 3.x and [pip](https://pypi.org/project/pip/). 11 | ```bash 12 | python3 --version 13 | pip3 --version 14 | ``` 15 | 16 | 3. Install `unzip`, `rpm` and `cpio` packages. 17 | ```bash 18 | unzip -v 19 | rpm --version 20 | cpio --version 21 | ``` 22 | 23 | 4. Install [git](https://git-scm.com/downloads). 24 | ```bash 25 | git --version 26 | ``` 27 | 28 | 5. Install [docker](https://www.docker.com/get-started). 29 | ```bash 30 | docker --version 31 | ``` 32 | 33 | 6. Install [Tarantool](https://www.tarantool.io/en/download/os-installation/) (1.10 or 2.x version). 34 | ```bash 35 | tarantool --version 36 | ``` 37 | 38 | 7. Install [mage](https://github.com/magefile/mage). 39 | ```bash 40 | mage --version 41 | ``` 42 | If something went wrong, this may help. 43 | ```bash 44 | export PATH=$(go env GOPATH)/bin:$PATH 45 | ``` 46 | 47 | 8. Install [pytest](https://docs.pytest.org/en/6.2.x/getting-started.html). 48 | ```bash 49 | python3 -m pytest --version 50 | ``` 51 | 52 | 9. Clone this repo. 53 | ```bash 54 | git clone git@github.com:tarantool/cartridge-cli.git 55 | cd ./cartridge-cli 56 | ``` 57 | 58 | 10. To run tests, git user must be configured. For example, 59 | ```bash 60 | git config --global user.email "test@tarantool.io" 61 | git config --global user.name "Tar Antool" 62 | ``` 63 | 64 | 11. Install pytest dependencies. 65 | ```bash 66 | pip3 install -r test/requirements.txt 67 | ``` 68 | 69 | 12. Install luacheck. 70 | ```bash 71 | tarantoolctl rocks install luacheck 72 | ``` 73 | 74 | 13. Install lichen. 75 | ```bash 76 | go install github.com/uw-labs/lichen@latest 77 | ``` 78 | 79 | All remaining dependencies (like code generation) will be invoked with mage if needed. 80 | 81 | ### Test run 82 | 83 | To run all tests, call 84 | ```bash 85 | mage test 86 | ``` 87 | 88 | You can run specific test sections. 89 | ```bash 90 | # Static code analysis (including tests code). 91 | mage lint 92 | # Go unit tests. 93 | mage unit 94 | # pytest integration tests. 95 | mage integration 96 | # Run test example with pytest. 97 | mage testExamples 98 | # pytest end-to-end tests for packages. 99 | mage e2e 100 | ``` 101 | -------------------------------------------------------------------------------- /test/integration/admin/test_help.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from utils import (get_admin_connection_params, get_log_lines, 3 | run_command_and_get_output) 4 | 5 | 6 | @pytest.mark.parametrize('connection_type', ['find-socket', 'connect', 'instance']) 7 | def test_help_many_args(cartridge_cmd, custom_admin_running_instances, connection_type, tmpdir): 8 | project = custom_admin_running_instances['project'] 9 | 10 | cmd = [ 11 | cartridge_cmd, 'admin', 12 | 'echo_user', '--help', 13 | ] 14 | cmd.extend(get_admin_connection_params(connection_type, project)) 15 | 16 | rc, output = run_command_and_get_output(cmd, cwd=tmpdir) 17 | assert rc == 0 18 | 19 | assert get_log_lines(output) == [ 20 | '• Admin function "echo_user" usage:', 21 | 'echo_user usage', 22 | 'Args:', 23 | '--age number age usage', 24 | '--loves-cakes boolean loves_cakes usage', 25 | '--username string username usage', 26 | ] 27 | 28 | 29 | def test_help_no_args(cartridge_cmd, custom_admin_running_instances, tmpdir): 30 | project = custom_admin_running_instances['project'] 31 | run_dir = project.get_run_dir() 32 | 33 | cmd = [ 34 | cartridge_cmd, 'admin', 35 | '--name', project.name, 36 | '--run-dir', run_dir, 37 | 'func_no_args', '--help', 38 | ] 39 | rc, output = run_command_and_get_output(cmd, cwd=tmpdir) 40 | assert rc == 0 41 | 42 | assert get_log_lines(output) == [ 43 | '• Admin function "func_no_args" usage:', 44 | 'func_no_args usage', 45 | ] 46 | 47 | 48 | def test_help_long_func_name(cartridge_cmd, custom_admin_running_instances, tmpdir): 49 | project = custom_admin_running_instances['project'] 50 | run_dir = project.get_run_dir() 51 | 52 | exp_output_lines = [ 53 | '• Admin function "func.long.name" usage:', 54 | 'func_long_name usage', 55 | ] 56 | 57 | cmd = [ 58 | cartridge_cmd, 'admin', 59 | '--name', project.name, 60 | '--run-dir', run_dir, 61 | 'func.long.name', '--help', 62 | ] 63 | rc, output = run_command_and_get_output(cmd, cwd=tmpdir) 64 | assert rc == 0 65 | assert get_log_lines(output) == exp_output_lines 66 | 67 | cmd = [ 68 | cartridge_cmd, 'admin', 69 | '--name', project.name, 70 | '--run-dir', run_dir, 71 | 'func', 'long', 'name', '--help', 72 | ] 73 | rc, output = run_command_and_get_output(cmd, cwd=tmpdir) 74 | assert rc == 0 75 | assert get_log_lines(output) == exp_output_lines 76 | -------------------------------------------------------------------------------- /cli/common/lua.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "sort" 8 | 9 | "github.com/apex/log" 10 | lua "github.com/yuin/gopher-lua" 11 | ) 12 | 13 | const ( 14 | rocksManifestPath = ".rocks/share/tarantool/rocks/manifest" 15 | ) 16 | 17 | type RocksVersions map[string][]string 18 | 19 | // LuaReadStringVar reads global string variable from specified Lua file 20 | func LuaReadStringVar(filePath string, varName string) (string, error) { 21 | L := lua.NewState() 22 | defer L.Close() 23 | 24 | // set env to empty table 25 | emptyEnv := lua.LTable{} 26 | L.Env = &emptyEnv 27 | 28 | if err := L.DoFile(filePath); err != nil { 29 | return "", fmt.Errorf("Failed to read file %s: %s", filePath, err) 30 | } 31 | 32 | luaVal := L.Env.RawGetString(varName) 33 | if luaVal.Type() == lua.LTNil { 34 | return "", fmt.Errorf("Variable `%s` is not set in %s", varName, filePath) 35 | } 36 | 37 | if luaVal.Type() != lua.LTString { 38 | return "", fmt.Errorf("Field `%s` must be string in %s", varName, filePath) 39 | } 40 | 41 | return luaVal.String(), nil 42 | } 43 | 44 | // LuaGetRocksVersions gets map which contains {name: versions} from rocks manifest 45 | func LuaGetRocksVersions(appDirPath string) (RocksVersions, error) { 46 | rocksVersionsMap := RocksVersions{} 47 | 48 | manifestFilePath := filepath.Join(appDirPath, rocksManifestPath) 49 | if _, err := os.Stat(manifestFilePath); err == nil { 50 | L := lua.NewState() 51 | defer L.Close() 52 | 53 | if err := L.DoFile(manifestFilePath); err != nil { 54 | return nil, fmt.Errorf("Failed to read manifest file %s: %s", manifestFilePath, err) 55 | } 56 | 57 | depsL := L.Env.RawGetString("dependencies") 58 | depsLTable, ok := depsL.(*lua.LTable) 59 | if !ok { 60 | return nil, fmt.Errorf("Failed to read manifest file: dependencies is not a table") 61 | } 62 | 63 | depsLTable.ForEach(func(depNameL lua.LValue, depInfoL lua.LValue) { 64 | depName := depNameL.String() 65 | 66 | depInfoLTable, ok := depInfoL.(*lua.LTable) 67 | if !ok { 68 | log.Warnf("Failed to get %s dependency info", depName) 69 | } else { 70 | depInfoLTable.ForEach(func(depVersionL lua.LValue, _ lua.LValue) { 71 | rocksVersionsMap[depName] = append(rocksVersionsMap[depName], depVersionL.String()) 72 | }) 73 | } 74 | }) 75 | 76 | for _, versions := range rocksVersionsMap { 77 | sort.Strings(versions) 78 | } 79 | 80 | } else if !os.IsNotExist(err) { 81 | return nil, fmt.Errorf("Failed to read manifest file %s: %s", manifestFilePath, err) 82 | } 83 | 84 | return rocksVersionsMap, nil 85 | } 86 | -------------------------------------------------------------------------------- /doc/commands/build.rst: -------------------------------------------------------------------------------- 1 | Building your application locally 2 | ================================= 3 | 4 | To build your application locally (for local testing), run this in any directory: 5 | 6 | .. code-block:: bash 7 | 8 | cartridge build [PATH] [flags] 9 | 10 | Flags 11 | ----- 12 | 13 | .. container:: table 14 | 15 | .. list-table:: 16 | :widths: 20 80 17 | :header-rows: 0 18 | 19 | * - ``--spec`` 20 | - Path to a custom ``.rockspec`` file 21 | that you want use for the current build. 22 | 23 | If you run ``cartridge build`` without the ``--spec`` flag, 24 | your application directory must contain a ``.rockspec``. 25 | The file is already in that directory if you created your app from the default template. 26 | 27 | ``build`` also supports :doc:`global flags `. 28 | The ``--quiet`` flag is particularly convenient when building an application. 29 | 30 | Details 31 | ------- 32 | 33 | The command requires one argument -- the path to your application directory 34 | (that is, to the build source). 35 | The default path is ``.`` (current directory). 36 | 37 | ``cartridge build`` runs: 38 | 39 | 1. ``./cartridge.pre-build`` (if this file exists in the application root directory) 40 | 2. ``tarantoolctl rocks make`` 41 | 42 | During step 2 -- the key step here -- ``cartridge`` installs all dependencies 43 | specified in the ``.rockspec`` file. 44 | 45 | If your application depends on closed-source rocks, or if the build should contain 46 | rocks from a project added as a submodule, **install** all these 47 | dependencies **before** calling ``tarantoolctl rocks make``. 48 | You can do so using a special file, ``cartridge.pre-build``, 49 | which has to be located in your application directory. 50 | If you created your application from template, the directory already contains the file. 51 | 52 | In ``cartridge.pre-build``, specify all the rocks to build from submodules. 53 | For example, add the following line: 54 | 55 | .. code-block:: bash 56 | 57 | tarantoolctl rocks make --chdir ./third_party/proj 58 | 59 | To learn more, read about 60 | :doc:`pre-build and post-build scripts `. 61 | 62 | The fully built application will appear in the ``.rocks`` directory. 63 | You can start it locally from your application directory. 64 | 65 | Instead of using the pre-build script, you can define the build logic 66 | by including ``cmake`` commands in your ``.rockspec``, 67 | `like we do it in Cartridge `_. 68 | 69 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Release 3 | 4 | on: 5 | push: 6 | branches: 7 | - master 8 | tags: 9 | - '*' 10 | 11 | env: 12 | GO_VERSION: '>=1.21.5' 13 | GORELEASER_VERSION: v0.146.0 14 | 15 | jobs: 16 | create-packages: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@master 20 | with: 21 | fetch-depth: 0 22 | 23 | - name: Setup Go 24 | uses: actions/setup-go@v3 25 | with: 26 | go-version: ${{ env.GO_VERSION }} 27 | 28 | - name: Setup Mage 29 | run: | 30 | git clone https://github.com/magefile/mage 31 | cd mage 32 | go run bootstrap.go 33 | 34 | - name: Setup GoReleaser 35 | run: | 36 | curl -O -L https://github.com/goreleaser/goreleaser/releases/download/${{ env.GORELEASER_VERSION }}/goreleaser_amd64.deb 37 | sudo dpkg -i goreleaser_amd64.deb 38 | rm goreleaser_amd64.deb 39 | 40 | - name: Set GoReleaser flags 41 | id: set-goreleaser-flags 42 | run: | 43 | if ${{ startsWith(github.ref, 'refs/tags') }} ; then 44 | echo "::set-output name=GORELEASER_FLAGS::--rm-dist --skip-validate" 45 | else 46 | echo "::set-output name=GORELEASER_FLAGS::--rm-dist --snapshot --skip-publish --skip-validate" 47 | fi 48 | 49 | - name: Build packages 50 | env: 51 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 52 | run: | 53 | goreleaser release ${{ steps.set-goreleaser-flags.outputs.GORELEASER_FLAGS }} 54 | 55 | - name: Upload packages artifacts 56 | uses: actions/upload-artifact@v2 57 | with: 58 | name: packages 59 | path: dist 60 | 61 | publish-s3: 62 | needs: create-packages 63 | runs-on: ubuntu-latest 64 | if: startsWith(github.ref, 'refs/tags') 65 | steps: 66 | - uses: actions/checkout@master 67 | 68 | - name: Setup Go 69 | uses: actions/setup-go@v3 70 | with: 71 | go-version: ${{ env.GO_VERSION }} 72 | 73 | - name: Setup Mage 74 | run: | 75 | git clone https://github.com/magefile/mage 76 | cd mage 77 | go run bootstrap.go 78 | 79 | - name: Download packages artifacts 80 | uses: actions/download-artifact@v2 81 | with: 82 | name: packages 83 | path: dist 84 | 85 | - name: Publish packages to RWS 86 | env: 87 | RWS_URL_PART: https://rws.tarantool.org/release/modules 88 | RWS_AUTH: ${{ secrets.RWS_AUTH }} 89 | run: mage publishRWS 90 | -------------------------------------------------------------------------------- /doc/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | 1. Install third-party software: 5 | 6 | * `Install `__ 7 | ``git``, the version control system. 8 | 9 | * `Install `__ 10 | the ``unzip`` utility. 11 | 12 | * `Install `__ 13 | the ``gcc`` compiler. 14 | 15 | * `Install `__ 16 | the ``cmake`` and ``make`` tools. 17 | 18 | 19 | 2. Install Tarantool 1.10 or higher: 20 | 21 | You can: 22 | 23 | * `Install it from a package `__. 24 | * `Build it from source `__. 25 | 26 | 3. [For all platforms except macOS] If you build Tarantool from source, 27 | you need to set up the Tarantool packages repository manually: 28 | 29 | .. code-block:: bash 30 | 31 | curl -L https://tarantool.io/release/2/installer.sh | sudo -E bash -s -- --repo-only 32 | 33 | 4. Install the ``cartridge-cli`` package: 34 | 35 | * For CentOS, Fedora, ALT Linux (RPM package): 36 | 37 | .. code-block:: bash 38 | 39 | sudo yum install cartridge-cli 40 | 41 | * For Debian, Ubuntu (DEB package): 42 | 43 | .. code-block:: bash 44 | 45 | sudo apt-get install cartridge-cli 46 | 47 | * For MacOS X (Homebrew formula): 48 | 49 | .. code-block:: bash 50 | 51 | brew install cartridge-cli 52 | 53 | * Or build locally: 54 | 55 | .. code-block:: bash 56 | 57 | mage build 58 | 59 | 5. Check the installation: 60 | 61 | .. code-block:: bash 62 | 63 | cartridge version 64 | 65 | Enable shell completion 66 | ----------------------- 67 | 68 | Linux 69 | ~~~~~ 70 | 71 | The ``cartridge-cli`` RPM and DEB packages contain a Bash completion script, 72 | ``/etc/bash_completion.d/cartridge``. 73 | 74 | To enable completion after ``cartridge-cli`` installation, open a new shell or 75 | source the completion file at ``/etc/bash_completion.d/cartridge``. 76 | Make sure that you have ``bash-completion`` installed. 77 | 78 | To install Zsh completion, run: 79 | 80 | .. code-block:: bash 81 | 82 | cartridge gen completion --skip-bash --zsh="${fpath[1]}/_cartridge" 83 | 84 | Now enable shell completion: 85 | 86 | .. code-block:: bash 87 | 88 | echo "autoload -U compinit; compinit" >> ~/.zshrc 89 | 90 | OS X 91 | ~~~~ 92 | 93 | If you install ``cartridge-cli`` from ``brew``, it automatically installs both 94 | Bash and Zsh completion. 95 | 96 | --------------------------------------------------------------------------------