├── .ansible-lint
├── .gitignore
├── .python-version
├── .travis.yml
├── CHANGELOG.md
├── LICENSE
├── MANIFEST.in
├── Makefile
├── README.md
├── Vagrantfile
├── docs
├── config.yml.example
├── cookbook
│ └── README.md
├── guide
│ └── README.md
├── testcase_example.yml
├── testcase_geo_advanced_example.yml
└── testcase_geo_example.yml
├── requirements-dev.txt
├── requirements.txt
├── setup.cfg
├── setup.py
├── tank
├── __init__.py
├── bootstrap.py
├── controllers
│ ├── __init__.py
│ ├── ansible.py
│ ├── base.py
│ └── cluster.py
├── core
│ ├── __init__.py
│ ├── binding.py
│ ├── cloud_settings.py
│ ├── exc.py
│ ├── lambdas.py
│ ├── regions.py
│ ├── run.py
│ ├── testcase.py
│ ├── tf.py
│ └── utils.py
├── ext
│ └── __init__.py
├── logging_conf.py
├── main.py
├── plugins
│ └── __init__.py
├── resources
│ ├── ansible
│ │ ├── ansible-requirements.yml
│ │ ├── ansible.cfg
│ │ ├── common.yml
│ │ ├── core.yml
│ │ └── templates
│ │ │ └── ansible-report.json.j2
│ ├── bindings.yml
│ ├── providers
│ │ ├── digitalocean
│ │ │ ├── backend.tf
│ │ │ └── main.tf
│ │ └── gce
│ │ │ ├── backend.tf
│ │ │ └── main.tf
│ ├── regions.yml
│ ├── scripts
│ │ └── tank-packetloss
│ └── testcase_schema.yml
├── terraform_installer.py
└── version.py
├── tests
├── conftest.py
├── test_main.py
├── test_tank.py
└── test_testcase.py
└── web3_foundation_grants_badge_black.png
/.ansible-lint:
--------------------------------------------------------------------------------
1 | exclude_paths:
2 | - .travis.yml
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | coverage-report/
44 | .cache
45 | nosetests.xml
46 | coverage.xml
47 | *.cover
48 | .hypothesis/
49 | .pytest_cache/
50 |
51 | # Translations
52 | *.mo
53 | *.pot
54 |
55 | # Django stuff:
56 | *.log
57 | local_settings.py
58 | db.sqlite3
59 |
60 | # Flask stuff:
61 | instance/
62 | .webassets-cache
63 |
64 | # Scrapy stuff:
65 | .scrapy
66 |
67 | # Sphinx documentation
68 | docs/_build/
69 |
70 | # PyBuilder
71 | target/
72 |
73 | # Jupyter Notebook
74 | .ipynb_checkpoints
75 |
76 | # pyenv
77 | .python-version
78 |
79 | # celery beat schedule file
80 | celerybeat-schedule
81 |
82 | # SageMath parsed files
83 | *.sage.py
84 |
85 | # Environments
86 | .env
87 | .venv
88 | env/
89 | venv/
90 | ENV/
91 | env.bak/
92 | venv.bak/
93 |
94 | # Spyder project settings
95 | .spyderproject
96 | .spyproject
97 |
98 | # Rope project settings
99 | .ropeproject
100 |
101 | # mkdocs documentation
102 | /site
103 |
104 | # mypy
105 | .mypy_cache/
106 |
107 | # IDEs
108 | .idea
109 |
110 | # Vagrant
111 | .vagrant
112 |
--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------
1 | 3.7.2
2 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 |
3 | matrix:
4 | include:
5 | - os: linux
6 | dist: xenial
7 | python: 3.7
8 | - os: osx
9 | osx_image: xcode11
10 | language: shell
11 |
12 | sudo: false
13 |
14 | cache:
15 | - pip
16 |
17 | before_install:
18 | - if [ "$TRAVIS_OS_NAME" = "osx" ]; then brew update ; fi
19 |
20 | script:
21 | - make dist
22 |
23 | deploy:
24 | - provider: pypi
25 | user: alevkin
26 | password:
27 | secure: "XS3KiJA4iA9ctQbc+jBFAocg1xphTOLloFAcOmXp4fStXvZbnWQ0SynsJ+zrQ7/6I3lXxfiJMC3Rqew5BIliIRmUAoj4zXuWUqiU81OSimKHp8eItJwXcS7oOHuYxn2V5OxgbwhwMsVUXw9qhCzRDNLZj18kUkFZo1nks0XHebliC2ma6FKgxThFyPr5fqUwnnNpbZZgr1y51UeOAyCFvfR9ikE0sf82jwf3OOWpNXt0GtdE+atiA9Eql4SrQZKxJdBXxsvugB2/oLZO21buWy/DVfc9OWzcrCTsh6NxTM6N3ajsbzvB7eRtJjwIf6o3Prb+40sa1QUTdKoWy9+5PiFpj+2IuAoSmJH95Ms2itjk8Hv04SkJvnQyb1MVGuNj/QLp+y8gZK5laM6CfXO3v+IGuYIMrnbwSG//FuGTdFoPJGbCWiHICHpvJogzwbVQwJ9g5ot8Tl97JURHDDdkjNUvXfcjet9qdfJ5iIn/o+zqEnkjLelhdLW/s/rfIytvESppTb03SUQDX3XXk405ftrwzNczxfvtoxmYvfYYB5waBKmqfjfoT3m/oQ2jIs8FTmb7VU/zoc7RHruBQJTSGsfjDXSqsOxu+8YsA9HA9QaCrCT61wsqloCBR3HFvIuaudMLws3P6mg+AaMNsBzyLeA/bd4X1+pzLvZiQ1DwDv4="
28 | distributions: sdist bdist_wheel
29 | on:
30 | tags: true
31 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # MixBytes Tank Change History
2 |
3 | ## 1.0.1
4 |
5 | Fix for GCE provider
6 |
7 | Change local terraform priority
8 |
9 | ## 1.0
10 |
11 | Added support of geographically distributed testing, network packet loss
12 |
13 | Numerous usability improvements
14 |
15 |
16 | ## 0.2
17 |
18 | Added support of test cases
19 |
20 | Added support of different blockchains via so-called bindings
21 |
22 |
23 | ## 0.1
24 |
25 | Rewritten in Python
26 |
27 |
28 | ## 0.0.1
29 |
30 | Initial release
31 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2019 MixBytes
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | recursive-include *.py *.sh
2 | include setup.cfg
3 | include README.md CHANGELOG.md LICENSE
4 | include *.txt
5 | recursive-include tank/resources *
6 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: clean virtualenv test dist dist-upload
2 |
3 | clean:
4 | find . -name '*.py[co]' -delete
5 |
6 | virtualenv:
7 | virtualenv --prompt '|> tank <| ' env
8 | env/bin/pip install -r requirements-dev.txt
9 | env/bin/python setup.py develop
10 | @echo
11 | @echo "VirtualENV Setup Complete. Now run: source env/bin/activate"
12 | @echo
13 |
14 | test:
15 | python -m pytest \
16 | -v \
17 | --cov=tank \
18 | --cov-report=term \
19 | --cov-report=html:coverage-report \
20 | tests/
21 |
22 | dist: clean
23 | rm -rf dist/*
24 | python setup.py sdist
25 | python setup.py bdist_wheel
26 |
27 | dist-upload:
28 | twine upload dist/*
29 |
30 | test-dist-upload:
31 | twine upload -r test dist/*
32 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # MixBytes Tank [](https://travis-ci.org/mixbytes/tank)
2 |
3 | MixBytes Tank is a console tool which can set up a blockchain cluster in minutes in a cloud and bench it using various transaction loads.
4 | It'll highlight blockchain problems and give insights into performance and stability of the technology.
5 |
6 | At the moment, supported blockchains are [Haya](https://github.com/mixbytes/haya) and [Polkadot](https://polkadot.network).
7 |
8 | Setup - bench - dispose workflow is very similar to a test case, that is why configuration of such run is described in a declarative YAML file called "testcase".
9 |
10 | More info can be found at:
11 |
12 | * [Guide](docs/guide/README.md)
13 | * [Cookbook](docs/cookbook/README.md)
14 | * Quick guide below
15 |
16 | Contributions are welcomed!
17 |
18 | Discuss in our chat: [https://t.me/MixBytes](https://t.me/MixBytes).
19 |
20 |
21 | # Quick guide
22 |
23 | ## Requirements
24 |
25 | - Python3
26 |
27 | ## Installation
28 |
29 | ```shell
30 | pip3 install mixbytes-tank
31 | ```
32 |
33 | Use the `--pre` option to get the latest release candidate:
34 |
35 | ```shell
36 | pip3 install mixbytes-tank --pre
37 | ```
38 |
39 |
40 | ## Usage
41 |
42 | ### 1. Configure the user config
43 |
44 | Configure `~/.tank.yml`. The example can be found at [docs/config.yml.example](docs/config.yml.example).
45 |
46 | Please configure at least one cloud provider. The essential steps are:
47 | * providing (and possibly creating) a key pair
48 | * registering a public key with your cloud provider (if needed)
49 | * specifying a cloud provider access token or credentials
50 |
51 | ### 2. Create or get a tank testcase
52 |
53 | The example can be found at [docs/testcase_example.yml](docs/testcase_example.yml).
54 |
55 | ### 3. Start a tank run
56 |
57 | ```shell
58 | tank cluster deploy
59 | ```
60 |
61 | As a result, the cluster instance listing will be printed along with the run id and the monitoring link.
62 |
63 | ### 4. Log in to the monitoring
64 |
65 | Open the monitoring link in your browser, type in 'tank' in the username and password fields.
66 | You will see cluster metrics in the predefined dashboards.
67 |
68 | ### 5. List current active runs
69 |
70 | There can be multiple tank runs at the same time. The runs list and brief information about each run can be seen via:
71 |
72 | ```shell
73 | tank cluster list
74 | ```
75 |
76 | ### 6. Create synthetic load
77 |
78 | ```shell
79 | tank cluster bench [--tps N] [--total-tx N]
80 | ```
81 |
82 | `` - run ID
83 |
84 | `` - a js file with a load profile: custom logic which creates transactions to be sent to the cluster
85 |
86 | `--tps` - total number of generated transactions per second,
87 |
88 | `--total-tx` - total number of transactions to be sent.
89 |
90 | ### 7. Shutdown and remove the cluster
91 |
92 | ```shell
93 | tank cluster destroy
94 | ```
95 |
96 |
97 | # Development
98 |
99 | ## Branching
100 |
101 | features -> `develop` -> `master`.
102 |
103 | `master` is production-ready code.
104 |
105 | `develop` is expected-to-be production-ready code which is merged into master after thorough testing
106 | (so, no faulty code in `develop` pls).
107 |
108 | `develop` and `master` branches are permanent.
109 |
--------------------------------------------------------------------------------
/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure("2") do |config|
2 | config.vm.box = "pogosoftware/ubuntu-18.04-docker"
3 | end
4 |
--------------------------------------------------------------------------------
/docs/config.yml.example:
--------------------------------------------------------------------------------
1 | tank:
2 | # cloud provider to use
3 | provider: digitalocean
4 | ansible:
5 | forks: 40 # number of parallel processes to use during cluster provisioning
6 | # Optional. Login and password to access monitoring
7 | monitoring:
8 | admin_user: "your_login"
9 | admin_password: "your_password"
10 |
11 | digitalocean:
12 | # Private key to manage benchmark instances.
13 | # It's recommended to create a distinct key pair for benchmarking purposes.
14 | # The simplest way is:
15 | # ssh-keygen -t rsa -b 2048 -f bench_key
16 | # (leave passphrase empty)
17 | # Please provide the full path to the private key.
18 | # Make sure that the permissions of the file are 0600 or 0400.
19 | pvt_key: /home/eenae/bench_temp_key
20 |
21 | # MD5 fingerprint of the public key.
22 | # Please note, in case of Digital Ocean you must add this key to your account at
23 | # https://cloud.digitalocean.com/account/security (you can also get the fingerprint there).
24 | ssh_fingerprint: "d5:13:35:7a:3d:2e:ca:eb:90:da:b5:5b:dd:bd:d8:fd"
25 |
26 | # Access token. In case of Digital Ocean the token can be created at
27 | # https://cloud.digitalocean.com/account/api/tokens.
28 | token: "5ffac1b92062cb08109f23c06c6f8f1787f096cac109fea60000000000000000"
29 |
30 | # Optional, for every cloud provider: ansible variable definitions.
31 | # Each variable will be passed to ansible as -e bc_{var_name}={value}
32 | ansible:
33 | # Name of the network interface of cloud machines
34 | private_interface: eth0
35 |
36 | gce:
37 | # Private key to manage benchmark instances.
38 | # It's recommended to create a distinct key pair for benchmarking purposes.
39 | # The simplest way is:
40 | # ssh-keygen -t rsa -b 2048 -f bench_key
41 | # (leave passphrase empty)
42 | # Please provide the full path to the private key.
43 | pvt_key: /home/eenae/bench_temp_key
44 |
45 | # Public key to manage benchmark instances.
46 | pub_key: /home/eenae/bench_temp_key.pub
47 |
48 | # Google Cloud Project ID and path to file with credentials
49 | project: noble-district-987654
50 | cred_path: /home/eenae/.ssh/gce.json
51 |
--------------------------------------------------------------------------------
/docs/cookbook/README.md:
--------------------------------------------------------------------------------
1 | # Cookbook
2 |
3 | ### Changing a blockchain node image
4 |
5 | Sometimes (e.g. during development) it's useful to test a blockchain node image rather than the one specified in the binding.
6 |
7 | To do so, redefine a corresponding Ansible variable in a testcase.
8 | E.g. for Polkadot case, you can write the following testcase:
9 |
10 | ```yaml
11 | binding: polkadot
12 |
13 | instances:
14 | boot: 1
15 | producer: 3
16 |
17 | ansible:
18 | polkadot_image: your-dockerhub-account/polkadot:docker-tag
19 | ```
20 |
21 | ### Alternative binding version
22 |
23 | Sometimes it is not enough to tweak a couple of Ansible variables as described above, and you may want to make changes to the binding.
24 |
25 | Bindings can be configured in `~/.tank/bindings.yml` (by default the predefined binding config is copied during the first run creation).
26 |
27 | You can create your own binding with any name and supply a git link to the binding Ansible role.
28 | The link can be accompanied with a branch / tag name to use.
29 |
30 | Fork an existing binding into a new repository. Alternatively, you can create a branch in the existing binding repository.
31 | Make the desired changes in the repository.
32 |
33 | Configure a new binding in `~/.tank/bindings.yml`:
34 |
35 | ```yaml
36 | my_binding:
37 | ansible:
38 | src: https://github.com/me/my-binding-repo
39 | ```
40 |
41 | use it in a testcase:
42 |
43 | ```yaml
44 | binding: my_binding
45 |
46 | instances:
47 | boot: 1
48 | producer: 3
49 | ```
50 |
--------------------------------------------------------------------------------
/docs/guide/README.md:
--------------------------------------------------------------------------------
1 | # Guide
2 |
3 | ## Requirements
4 |
5 | - Python3
6 |
7 |
8 | ## Installation
9 |
10 | ### Terraform & Terraform-Inventory
11 |
12 | You don't need to worry about installation of these tools.
13 | Everything will be automatically installed in the `~/.tank/bin` directory when the first `Run` object is created.
14 |
15 | ### Optional: create virtualenv
16 |
17 | Optionally, create and activate virtualenv (assuming `venv` is a directory of a newly-created virtualenv)
18 |
19 | Linux:
20 | ```shell
21 | sudo apt-get install -y python3-virtualenv
22 | python3 -m virtualenv -p python3 venv
23 | ```
24 |
25 | MacOS:
26 | ```shell
27 | pip3 install virtualenv
28 | python3 -m virtualenv -p python3 venv
29 | ```
30 |
31 | After creating virtualenv and opening a terminal, activate virtualenv first to be able to work with Tank:
32 |
33 | ```shell
34 | . venv/bin/activate
35 | ```
36 |
37 | Alternatively, each time call the Tank executable directly: `venv/bin/tank`.
38 |
39 | ### Tank
40 | ```shell
41 | pip3 install mixbytes-tank
42 | ```
43 |
44 |
45 | ## Configuration
46 |
47 | ### User config
48 |
49 | The user config is stored in `~/.tank.yml`. It keeps settings which are the same for the current user regardless of the blockchain or the testcase used at the moment
50 | (e.g., it tells which cloud provider to use no matter which blockchain you are testing).
51 |
52 | The example can be found at [docs/config.yml.example](../config.yml.example).
53 |
54 | The user config contains cloud provider configurations, pointer to the current cloud provider, and some auxiliary values.
55 |
56 | #### Cloud provider configuration
57 |
58 | Please configure at least one cloud provider. The essential steps are:
59 | * providing (and possibly creating) a key pair
60 | * registering a public key with your cloud provider (if needed)
61 | * specifying a cloud provider access token or credentials
62 |
63 | We recommend creating a distinct key pair for benchmarking purposes.
64 | The key must not be protected with a passphrase.
65 | Make sure that the permissions of the private key are 0600 or 0400 (i.e. the private key is not accessible by anyone except the owner).
66 | The simplest way is:
67 |
68 | ```shell
69 | ssh-keygen -t rsa -b 2048 -f bench_key
70 | ```
71 |
72 | The command will create a private key file (`bench_key`) and a public key file (`bench_key.pub`).
73 | The private key will be used to gain access to the cloud instances created during a run.
74 | It must be provided to each cloud provider using the `pvt_key` option.
75 |
76 | The public key goes to cloud provider settings in accordance with the cloud provider requirements (e.g. GCE takes a file and DO - only a fingerprint).
77 |
78 | A cloud provider is configured as a designated section in the user config.
79 | The Digital Ocean section is called `digitalocean`, the Google Compute Engine section - `gce`.
80 |
81 | The purpose of having multiple cloud provider sections at the time is to be able to quickly switch cloud providers using the `provider` pointer in the `tank` section.
82 |
83 | ##### Ansible variables forwarding
84 |
85 | There is a way to globally specify some Ansible variables for a particular cloud provider.
86 | It can be done in the `ansible` section of the cloud provider configuration.
87 | Obviously, the values specified should be used in some blockchain bindings (see below).
88 | The fact that the same variables will be passed to any blockchain binding makes this feature rarely used and low-level.
89 | Each variable will be prefixed with `bc_` before being passed to Ansible.
90 |
91 | #### Other options
92 |
93 | #### Logging
94 |
95 | Note: these options affect only Tank logging. Terraform and Ansible won't be affected.
96 |
97 | `log.logging`: `level`: sets the log level. Acceptable values are `ERROR`, `WARNING` (by default), `INFO`, `DEBUG`.
98 |
99 | `log.logging`: `file`: sets the log file name (console logging is set by default).
100 |
101 | ### Testcase
102 |
103 | A Tank testcase describes a benchmark scenario.
104 |
105 | A simple example can be found at [docs/testcase_example.yml](../testcase_example.yml).
106 |
107 | Principal testcase contents are a current blockchain binding name and the configuration of instances.
108 |
109 | #### Blockchain binding
110 |
111 | Tank supports many blockchains by using a concept of binding.
112 | A binding provides an ansible role to deploy the blockchain (some examples [here](https://github.com/mixbytes?utf8=✓&q=tank.ansible&type=&language=))
113 | and javascript code - to create load in the cluster ([examples here](https://github.com/mixbytes?utf8=✓&q=tank.bench&type=&language=)).
114 | Similarly, databases use bindings to provide APIs to programming languages.
115 |
116 | A binding is specified by its name, e.g.:
117 | ```yaml
118 | binding: polkadot
119 | ```
120 |
121 | You shouldn't worry about writing or understanding a binding unless you want to add support of some blockchain to Tank.
122 |
123 | #### Blockchain instances
124 |
125 | A blockchain cluster consists of a number of different *instance roles*, e.g. full nodes and miners/validators.
126 | Available roles depend on the binding used.
127 |
128 | A *blockchain instances configuration* is a set of *role configurations*.
129 | E.g., in the simplest case:
130 |
131 | ```yaml
132 | instances:
133 | boot: 1
134 | producer: 3
135 | ```
136 |
137 | ##### role configuration
138 |
139 | A *role configuration* is a number in the simplest case. The number specifies how many servers should be set up for the role to be installed.
140 |
141 | ```yaml
142 | instances:
143 | producer: 3
144 | ```
145 |
146 | Alternatively, a role configuration can be written as an object with various options - generally applicable and role configuration-specific.
147 |
148 | ```yaml
149 | instances:
150 | boot:
151 | count: 1
152 | ```
153 |
154 | * An option `count` specifies how many servers to set up with this role installed.
155 |
156 | * An option `regions` sets a *region configuration* for the role configuration.
157 |
158 | ##### region configuration
159 |
160 | A region configuration provides *region options* for region names.
161 |
162 | In the simplest case, a region configuration says how many role instances per region should be set up:
163 |
164 | ```yaml
165 | instances:
166 | producer:
167 | regions:
168 | Europe: 4
169 | Asia: 3
170 | NorthAmerica: 3
171 | ```
172 |
173 | A region name is one of the following: `Europe`, `Asia`, `NorthAmerica`, `random`, `default`.
174 |
175 | `Europe`, `Asia`, `NorthAmerica` region names are self-explanatory.
176 |
177 | `random` region indicates that instances must be distributed evenly across available regions.
178 |
179 | Region names are cloud provider-agnostic and can be configured in `~/.tank/regions.yml` (by default the predefined region config is copied and used at the moment of the first run creation).
180 |
181 | In general, a *region options* can be written as a set of various options - that are generally applicable and region-specific.
182 |
183 | * `count` region option specifies how many servers should be set up in the region.
184 |
185 | ##### Generally applicable options
186 |
187 | Generally applicable options can be specified in a number of contexts: *instances*, *role configuration*, *region configuration*.
188 |
189 | More local contexts have higher precedence over wrapping contexts,
190 | e.g. an option specified in a role configuration takes precedence over the same option specified at the `instances` level:
191 |
192 | ```yaml
193 | instances:
194 | type: standard
195 |
196 | boot:
197 | count: 1
198 | type: large
199 |
200 | producer:
201 | regions:
202 | random: 10
203 | ```
204 |
205 | The options are:
206 |
207 | * `type` - an instance type, which is a cloud-agnostic machine size.
208 | Available types: micro (~1 GB mem), small (~2 GB mem), standard (4GB), large (8GB), xlarge (16GB), xxlarge (32GB), huge (64GB)
209 | * `packetloss` - simulates bad network operation and sets the percent of lost packets. Note: TCP ports 1..1024 are not packetloss-ed.
210 |
211 | ##### Instance configuration examples
212 |
213 | A simple geographically distributed test case - [docs/testcase_geo_example.yml](../testcase_geo_example.yml).
214 |
215 | An example of utilizing generally applicable options and a region configuration can be found here [docs/testcase_geo_advanced_example.yml](../testcase_geo_advanced_example.yml).
216 |
217 | #### Ansible variables forwarding
218 |
219 | There is a way to pass some Ansible variables from a testcase to a cluster.
220 | This low-level feature can be used to tailor the blockchain for a particular test case.
221 | Variables can be specified in the `ansible` section of a testcase.
222 | Each variable will be prefixed with `bc_` before being passed to Ansible.
223 |
224 |
225 | ## Usage
226 |
227 | ### Run the tank
228 |
229 | Deploy a new cluster via
230 | ```shell
231 | tank cluster deploy
232 | ```
233 | or
234 | ```shell
235 | tank run
236 | ```
237 |
238 | This command will create a cluster dedicated to the specified test case.
239 | Such clusters are named *runs* in Tank terminology.
240 | There can be multiple coexisting runs on a developer's machine.
241 | Any changes to the testcase made after the `deploy` command won't affect the run.
242 |
243 | After the command is finished, you will see a listing of cluster machines and a run id, e.g.:
244 |
245 | ```shell
246 | IP HOSTNAME
247 | ------------- -------------------------------------
248 | 167.71.36.223 tank-polkadot-db2d81e031a1-boot-0
249 | 167.71.36.231 tank-polkadot-db2d81e031a1-monitoring
250 | 167.71.36.222 tank-polkadot-db2d81e031a1-producer-0
251 | 165.22.74.160 tank-polkadot-db2d81e031a1-producer-1
252 |
253 | Monitoring: http://167.71.36.231/
254 |
255 | Tank run id: festive_lalande
256 | ```
257 |
258 | You can also see the monitoring link - that's where all the metrics are collected (see below).
259 |
260 | The cluster is up and running at this moment.
261 | You can see its state on the dashboards or query cluster information via `info` and `inspect` commands (see below).
262 |
263 | ### Log in to the monitoring
264 |
265 | Tank uses ***grafana*** to visualize benchmark metrics. In order to access your ***grafana*** dashboard open the monitoring link in your browser.
266 | Access to dashboard requires entering ***grafana*** username and password.
267 | You can modify ***Grafana*** username and password in the in the `~/.tank.yml` configuration file (go to `monitoring` in the `tank` section).
268 | If you have not defined these variables in your configuration file, type in 'tank' in the username and password fields.
269 | You will see cluster metrics in the predefined dashboards.
270 | You can query the metrics at `http://{the monitoring ip}/explore`.
271 |
272 | ### Current active runs
273 |
274 | There can be multiple tank runs at the same time. The runs list and brief information about each run can be seen via:
275 |
276 | ```shell
277 | tank cluster list
278 | ```
279 |
280 | ### Information about a run
281 |
282 | To list hosts of a cluster call
283 |
284 | ```shell
285 | tank cluster info hosts {run id here}
286 | ```
287 |
288 | To get a detailed cluster info call
289 |
290 | ```shell
291 | tank cluster inspect {run id here}
292 | ```
293 |
294 | ### Synthetic load
295 |
296 | Tank can run a javascript load profile on the cluster.
297 |
298 | ```shell
299 | tank cluster bench [--tps N] [--total-tx N]
300 | ```
301 |
302 | `` - run ID
303 |
304 | `` - a js file with a load profile: custom logic which creates transactions to be sent to the cluster
305 |
306 | `--tps` - total number of generated transactions per second,
307 |
308 | `--total-tx` - total number of transactions to be sent.
309 |
310 | In the simplest case, a developer writes logic to create and send transaction, and
311 | Tank takes care of distributing and running the code, providing the requested tps.
312 |
313 | You can bench the same cluster with different load profiles by providing different arguments to the bench subcommand.
314 | The documentation on profile development can be found at [https://github.com/mixbytes/tank.bench-common](https://github.com/mixbytes/tank.bench-common#what-is-profile).
315 |
316 | Binding parts responsible for benching can be found [here](https://github.com/mixbytes?utf8=✓&q=tank.bench&type=&language=).
317 | Examples of load profiles can be found in `profileExamples` subfolders, e.g. [https://github.com/mixbytes/tank.bench-polkadot/tree/master/profileExamples](https://github.com/mixbytes/tank.bench-polkadot/tree/master/profileExamples).
318 |
319 | ### Shut down and remove a cluster
320 |
321 | Entire Tank data of a particular run (both in the cloud and on the developer's machine) will be irreversibly deleted:
322 |
323 | ```shell
324 | tank cluster destroy
325 | ```
326 |
327 | ### Other cluster commands
328 |
329 | The `cluster deploy` command actually does the following steps:
330 |
331 | * init
332 | * create
333 | * dependency
334 | * provision
335 |
336 | These steps can be executed step by step or repeated. This is low-level tank usage.
337 | Tank does not check for the correct order or applicability of these operations if you run them manually.
338 |
339 | For more information call `tank cluster -h`
340 |
341 | #### init
342 |
343 | It creates a run and prepares Terraform execution.
344 |
345 | #### plan
346 |
347 | This is a read-only command that generates and shows an execution plan by Terraform.
348 | The plan shows cloud resources that will be created during `create`.
349 |
350 | #### create
351 |
352 | It creates a cluster in a cloud by calling Terraform for the run.
353 |
354 | #### dependency
355 |
356 | It installs necessary Ansible dependencies (roles) for the run.
357 |
358 | #### provision
359 |
360 | It sets up all necessary software in a cluster by calling Ansible for the run.
361 |
--------------------------------------------------------------------------------
/docs/testcase_example.yml:
--------------------------------------------------------------------------------
1 | # Blockchain binding name
2 | # Names of available bindings can be found at resources/bindings.yml
3 | # Binding names are not predefined, they just have to correspond to an entry in ~/.tank/bindings.yml
4 | binding: polkadot
5 |
6 | # Configuration of instances for the chosen binding
7 | instances:
8 | boot: # this codename of the instance role will be included into the hostname
9 | # optionally, you can customize instances
10 | count: 1
11 | # cloud-agnostic machine size
12 | # the option can be specified on the instances level, per instance role, per region
13 | # available options: micro, small, standard, large, xlarge, xxlarge, huge
14 | type: large
15 |
16 | producer: 3
17 |
18 |
19 | # Optional low-level kung fu: passing ansible variables to the binding used.
20 | # Make sure you know what you're doing.
21 | ansible:
22 | # Each variable will be passed to ansible as -e bc_{var_name}={value}.
23 | # Any variable can be specified.
24 | path_general: /lol
25 |
--------------------------------------------------------------------------------
/docs/testcase_geo_advanced_example.yml:
--------------------------------------------------------------------------------
1 | binding: polkadot
2 |
3 | # Configuration of instances for the chosen binding
4 | instances:
5 | boot:
6 | count: 1
7 | type: large
8 |
9 | producer:
10 | regions:
11 | Europe: 4
12 |
13 | Asia:
14 | count: 3
15 | type: standard
16 | # percent of packets lost
17 | # the option can be specified on the instances level, per instance role, per region
18 | packetloss: 20
19 |
20 | NorthAmerica: 3
21 |
22 | # you still can provide the count, it has to match sum of counts by region
23 | # count: 10
24 |
--------------------------------------------------------------------------------
/docs/testcase_geo_example.yml:
--------------------------------------------------------------------------------
1 | binding: polkadot
2 |
3 | # Configuration of instances for the chosen binding
4 | instances:
5 | boot:
6 | count: 1
7 | type: large
8 |
9 | producer:
10 | regions:
11 | random: 10
12 |
--------------------------------------------------------------------------------
/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | -r requirements.txt
2 |
3 | pytest
4 | pytest-cov
5 | coverage
6 | twine>=1.11.0
7 | setuptools>=38.6.0
8 | wheel>=0.31.0
9 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | cement==3.0.4
2 | ansible>=2.8.0
3 | jinja2
4 | pyyaml
5 | colorlog
6 | sh==1.12.13
7 | jsonschema==3.0.1
8 | filelock
9 | tabulate==0.8.3
10 | namesgenerator==0.3
11 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mixbytes/tank/b5595df8c97a101e074696a93898ccbfa210261d/setup.cfg
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 |
2 | from setuptools import setup, find_packages
3 | from tank.version import get_version
4 |
5 | with open('README.md', 'r') as f:
6 | LONG_DESCRIPTION = f.read()
7 |
8 | with open('requirements.txt', 'r') as f:
9 | requires = list(filter(None, map(str.strip, f.read().split('\n'))))
10 |
11 | setup_options = dict(
12 | name='mixbytes-tank',
13 | version=get_version(),
14 | description='Benchmark engine for blockchains',
15 | long_description=LONG_DESCRIPTION,
16 | long_description_content_type='text/markdown',
17 | author='MixBytes LLC',
18 | author_email='hello@mixbytes.io',
19 | url='https://github.com/mixbytes/tank/',
20 | license='Apache-2.0',
21 | classifiers=[
22 | # https://pypi.org/pypi?%3Aaction=list_classifiers
23 | "Development Status :: 3 - Alpha",
24 | "Environment :: Console",
25 | "Intended Audience :: Developers",
26 | "License :: OSI Approved :: Apache Software License",
27 | "Operating System :: POSIX :: Linux",
28 | "Operating System :: MacOS",
29 | "Topic :: Software Development :: Testing",
30 | "Topic :: System :: Benchmark",
31 | "Topic :: System :: Distributed Computing",
32 | "Topic :: System :: Clustering",
33 | "Programming Language :: Python :: 3",
34 | "Programming Language :: Python :: 3.7",
35 | "Programming Language :: Python :: 3 :: Only",
36 | "Programming Language :: Python :: Implementation :: CPython"
37 | ],
38 | keywords='bench benchmark blockchain',
39 | packages=find_packages(exclude=['ez_setup', 'tests*']),
40 | include_package_data=True,
41 | install_requires=requires,
42 | python_requires='>=3',
43 | entry_points="""
44 | [console_scripts]
45 | tank = tank.main:main
46 | """,
47 | )
48 |
49 | setup(**setup_options)
50 |
--------------------------------------------------------------------------------
/tank/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mixbytes/tank/b5595df8c97a101e074696a93898ccbfa210261d/tank/__init__.py
--------------------------------------------------------------------------------
/tank/bootstrap.py:
--------------------------------------------------------------------------------
1 |
2 | from .controllers.base import Base
3 |
4 |
5 | def load(app):
6 | app.handler.register(Base)
7 |
--------------------------------------------------------------------------------
/tank/controllers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mixbytes/tank/b5595df8c97a101e074696a93898ccbfa210261d/tank/controllers/__init__.py
--------------------------------------------------------------------------------
/tank/controllers/ansible.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from __future__ import print_function
4 |
5 | import os
6 | import sys
7 | from collections import namedtuple
8 |
9 | from ansible.parsing.dataloader import DataLoader
10 | from ansible.vars.manager import VariableManager
11 | from ansible.inventory.manager import InventoryManager
12 | from ansible.executor.playbook_executor import PlaybookExecutor
13 |
14 | loader = DataLoader()
15 |
16 | inventory = InventoryManager(loader=loader, sources='digital_ocean.py')
17 | variable_manager = VariableManager(loader=loader, inventory=inventory)
18 | playbook_path = 'play.yml'
19 |
20 | if not os.path.exists(playbook_path):
21 | print('[INFO] The playbook does not exist')
22 | sys.exit()
23 |
24 | Options = namedtuple('Options',
25 | ['listtags', 'listtasks', 'listhosts', 'syntax',
26 | 'connection', 'module_path', 'forks', 'remote_user',
27 | 'private_key_file', 'ssh_common_args', 'ssh_extra_args',
28 | 'sftp_extra_args', 'scp_extra_args', 'become',
29 | 'become_method', 'become_user', 'verbosity', 'check',
30 | 'diff', 'skip_tags'])
31 | options = Options(
32 | listtags=False, listtasks=False, listhosts=False, syntax=False,
33 | connection='ssh', module_path=None, forks=100, remote_user='root',
34 | private_key_file=None, ssh_common_args=None, ssh_extra_args=None,
35 | sftp_extra_args=None, scp_extra_args=None, become=True,
36 | become_method='sudo', become_user='root', verbosity=None, check=False,
37 | diff=False, skip_tags=['debug'])
38 |
39 | # This can accomodate various other command line arguments.`
40 | # variable_manager.extra_vars = {'hosts': 'mywebserver'}
41 |
42 | passwords = {}
43 |
44 | pbex = PlaybookExecutor(
45 | playbooks=[playbook_path], inventory=inventory,
46 | variable_manager=variable_manager, loader=loader, options=options,
47 | passwords=passwords)
48 |
49 | results = pbex.run()
50 |
--------------------------------------------------------------------------------
/tank/controllers/base.py:
--------------------------------------------------------------------------------
1 |
2 | from cement import Controller
3 | from cement.utils.version import get_version_banner
4 |
5 | from tank.version import get_version
6 |
7 | VERSION_BANNER = """
8 | Bench toolkit for blockchain %s
9 | %s
10 | """ % (get_version(), get_version_banner())
11 |
12 |
13 | class Base(Controller):
14 | class Meta:
15 | label = 'base'
16 |
17 | # text displayed at the top of --help output
18 | description = 'Bench toolkit for blockchain'
19 |
20 | # controller level arguments. ex: 'tank --version'
21 | arguments = [
22 | # add a version banner
23 | (
24 | ['-v', '--version'],
25 | {'action': 'version', 'version': VERSION_BANNER},
26 | ),
27 | ]
28 |
29 | def _default(self):
30 | """Default action if no sub-command is passed."""
31 | self.app.args.print_help()
32 |
--------------------------------------------------------------------------------
/tank/controllers/cluster.py:
--------------------------------------------------------------------------------
1 |
2 | import sys
3 | import json
4 |
5 | from cement import Controller, ex
6 | from tabulate import tabulate
7 |
8 | from tank.core.exc import TankError
9 | from tank.core.run import Run
10 | from tank.core.testcase import TestCase
11 | from tank.core.lambdas import first, second
12 |
13 |
14 | class BaseClusterController(Controller):
15 | """Base controller for overriding."""
16 |
17 | def _deploy(self):
18 | testcase = TestCase(first(self.app.pargs.testcase), self.app)
19 | run = Run.new_run(self.app, testcase)
20 | print('Created tank run: {}'.format(run.run_id))
21 |
22 | run.init()
23 | run.create()
24 | run.dependency()
25 | run.provision()
26 |
27 | self._show_hosts(run.inspect())
28 | print('\nTank run id: {}'.format(run.run_id))
29 |
30 | def _show_hosts(self, run_inspect_data):
31 | if 'cluster' not in run_inspect_data:
32 | raise TankError('There are no information about hosts. Have you performed provision/deploy?')
33 |
34 | rows = sorted([[ip, i['hostname']] for ip, i in run_inspect_data['cluster'].items()], key=second)
35 | print(tabulate(list(rows), headers=['IP', 'HOSTNAME']))
36 |
37 | for ip, info in run_inspect_data['cluster'].items():
38 | if info['hostname'].endswith('-monitoring'):
39 | print('\nMonitoring: http://{}/'.format(ip))
40 | break
41 |
42 |
43 | class EmbeddedCluster(BaseClusterController):
44 | """Embedded cluster controller for providing short commands."""
45 |
46 | class Meta:
47 | label = 'embedded cluster'
48 | stacked_type = 'embedded'
49 | stacked_on = 'base'
50 |
51 | @ex(
52 | help='Create and setup a cluster (init, create, dependency, provision)',
53 | arguments=[(['testcase'], {'type': str, 'nargs': 1})]
54 | )
55 | def run(self):
56 | """Equal with cluster deploy."""
57 | self._deploy()
58 |
59 |
60 | class NestedCluster(BaseClusterController):
61 |
62 | class Meta:
63 | label = 'cluster'
64 | stacked_type = 'nested'
65 | stacked_on = 'base'
66 |
67 | # text displayed at the top of --help output
68 | description = 'Manipulating a cluster'
69 |
70 | # text displayed at the bottom of --help output
71 | title = 'Low level cluster management commands'
72 | help = 'Low level cluster management commands'
73 |
74 | @ex(help='Show clusters')
75 | def list(self):
76 | runs = Run.list_runs(self.app)
77 |
78 | def make_row(run):
79 | return [
80 | run.run_id,
81 | run.created_at.strftime('%c'),
82 | run.testcase_copy.total_instances + 1,
83 | run.meta['testcase_filename']
84 | ]
85 |
86 | print(tabulate(list(map(make_row, runs)), headers=['RUN ID', 'CREATED', 'INSTANCES', 'TESTCASE']))
87 |
88 | @ex(help='Init a Tank run, download plugins and modules for Terraform', hide=True,
89 | arguments=[(['testcase'], {'type': str, 'nargs': 1})])
90 | def init(self):
91 | testcase = TestCase(first(self.app.pargs.testcase), self.app)
92 | run = Run.new_run(self.app, testcase)
93 | print('Created tank run: {}'.format(run.run_id))
94 |
95 | run.init()
96 |
97 | @ex(help='Generate and show an execution plan by Terraform', hide=True,
98 | arguments=[(['run_id'], {'type': str, 'nargs': 1})])
99 | def plan(self):
100 | Run(self.app, first(self.app.pargs.run_id)).plan()
101 |
102 | @ex(help='Create instances for cluster', hide=True,
103 | arguments=[(['run_id'], {'type': str, 'nargs': 1})])
104 | def create(self):
105 | Run(self.app, first(self.app.pargs.run_id)).create()
106 |
107 | @ex(help='Install Ansible roles from Galaxy or SCM', hide=True,
108 | arguments=[(['run_id'], {'type': str, 'nargs': 1})])
109 | def dependency(self):
110 | Run(self.app, first(self.app.pargs.run_id)).dependency()
111 |
112 | @ex(help='Setup instances: configs, packages, services, etc', hide=True,
113 | arguments=[(['run_id'], {'type': str, 'nargs': 1})])
114 | def provision(self):
115 | Run(self.app, first(self.app.pargs.run_id)).provision()
116 |
117 | @ex(help='Runs bench on prepared cluster',
118 | arguments=[
119 | (['run_id'],
120 | {'type': str, 'nargs': 1}),
121 | (['load_profile'],
122 | {'type': str, 'nargs': 1}),
123 | (['--tps'],
124 | {'help': 'set global transactions per second generation rate',
125 | 'type': int}),
126 | (['--total-tx'],
127 | {'help': 'how many transactions to send',
128 | 'type': int}),
129 | ])
130 | def bench(self):
131 | Run(self.app, first(self.app.pargs.run_id)).bench(
132 | first(self.app.pargs.load_profile), self.app.pargs.tps, self.app.pargs.total_tx)
133 |
134 | @ex(help='Destroy all instances of the cluster',
135 | arguments=[(['run_id'], {'type': str, 'nargs': 1})])
136 | def destroy(self):
137 | Run(self.app, first(self.app.pargs.run_id)).destroy()
138 |
139 | @ex(help='Low-level info about a run',
140 | arguments=[(['run_id'], {'type': str, 'nargs': 1})])
141 | def inspect(self):
142 | data = Run(self.app, first(self.app.pargs.run_id)).inspect()
143 | json.dump(data, sys.stdout, indent=4, sort_keys=True)
144 |
145 | @ex(help='Info about a run',
146 | arguments=[(['info_type'], {'choices': ['hosts'], 'nargs': 1}),
147 | (['run_id'], {'type': str, 'nargs': 1})])
148 | def info(self):
149 | info_type = first(self.app.pargs.info_type)
150 | data = Run(self.app, first(self.app.pargs.run_id)).inspect()
151 |
152 | if info_type == 'hosts':
153 | self._show_hosts(data)
154 |
155 | @ex(
156 | help='Create and setup a cluster (init, create, dependency, provision)',
157 | arguments=[(['testcase'], {'type': str, 'nargs': 1})]
158 | )
159 | def deploy(self):
160 | self._deploy()
161 |
--------------------------------------------------------------------------------
/tank/core/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # module tank.core
3 | #
4 |
5 | import os.path
6 |
7 | from cement.utils import fs
8 |
9 |
10 | def resource_path(*path_parts: str) -> str:
11 | """
12 | Resolves path to a resource.
13 | """
14 | if '..' in path_parts:
15 | raise ValueError('parent directory references are forbidden')
16 |
17 | tank_src = os.path.dirname(os.path.dirname(fs.abspath(__file__)))
18 | return fs.join(tank_src, 'resources', *path_parts)
19 |
--------------------------------------------------------------------------------
/tank/core/binding.py:
--------------------------------------------------------------------------------
1 | #
2 | # module tank.core.binding
3 | #
4 | # Binding of the Tank core to a particular blockchain.
5 | #
6 |
7 | import os.path
8 | import copy
9 | from shutil import copy2
10 |
11 | from cement.utils import fs
12 |
13 | from tank.core import resource_path
14 | from tank.core.exc import TankConfigError
15 | from tank.core.utils import yaml_load
16 |
17 |
18 | class AnsibleBinding:
19 | """
20 | Ansible part of the binding.
21 | """
22 | BLOCKCHAIN_ROLE_NAME = 'tank.blockchain'
23 |
24 | def __init__(self, app, binding_name: str):
25 | """
26 | Ctor.
27 | :param binding_name: codename of the binding to use
28 | :param app: Tank app
29 | """
30 | self.binding_name = binding_name
31 | self._app = app
32 |
33 | def get_dependencies(self):
34 | """
35 | Provides ansible dependencies in the form of requirements.yml records.
36 | :returns: dependency record list
37 | """
38 | bindings_conf = _BindingsConfig(self._app)
39 | conf = bindings_conf.config.get(self.binding_name)
40 | if conf is None:
41 | raise TankConfigError('Configuration for binding named {} is not found under {}'.format(
42 | self.binding_name, bindings_conf.config_file
43 | ))
44 |
45 | result = {
46 | 'src': conf['ansible']['src'],
47 | 'name': self.__class__.BLOCKCHAIN_ROLE_NAME,
48 | }
49 | if 'version' in conf['ansible']:
50 | result['version'] = conf['ansible']['version']
51 |
52 | return [result]
53 |
54 |
55 | class _BindingsConfig:
56 |
57 | def __init__(self, app):
58 | self._app = app
59 |
60 | self.config_file = fs.join(app.user_dir, 'bindings.yml')
61 | # TODO atomically
62 | if not os.path.exists(self.config_file):
63 | # setting up the default config
64 | copy2(resource_path('bindings.yml'), self.config_file)
65 |
66 | # TODO validate
67 | self._config = yaml_load(self.config_file)
68 |
69 | @property
70 | def config(self):
71 | return copy.deepcopy(self._config)
72 |
--------------------------------------------------------------------------------
/tank/core/cloud_settings.py:
--------------------------------------------------------------------------------
1 | #
2 | # module tank.core.cloud_settings
3 | #
4 |
5 | from enum import Enum
6 |
7 | import yaml
8 | import jsonschema
9 |
10 | from tank.core.exc import TankConfigError
11 |
12 |
13 | class CloudProvider(Enum):
14 | DIGITAL_OCEAN = 'digitalocean'
15 | GOOGLE_CLOUD_ENGINE = 'gce'
16 |
17 | def __repr__(self):
18 | return '<%s.%s>' % (self.__class__.__name__, self.name)
19 |
20 | def __str__(self):
21 | """
22 | Serializes a member into string.
23 | """
24 | return self.value
25 |
26 | @classmethod
27 | def from_string(cls, str_value):
28 | """
29 | Deserializes a member from string.
30 | :param str_value: serialized form
31 | :return: enum member or None if not found
32 | """
33 | for m in cls:
34 | if m.value == str_value:
35 | return m
36 | else:
37 | return None
38 |
39 |
40 | class CloudUserSettings:
41 | """
42 | Management and validation of cloud provider user-specific settings.
43 | """
44 |
45 | def __init__(self, app_config):
46 | self.provider = CloudProvider.from_string(app_config.get('tank', 'provider'))
47 | if self.provider is None:
48 | raise TankConfigError('Cloud provider is not specified or not known')
49 |
50 | self.monitoring_vars = app_config.get_dict()['tank'].get('monitoring')
51 | try:
52 | jsonschema.validate(self.monitoring_vars, self.__class__._MONITORING_SCHEMA)
53 | except jsonschema.ValidationError as e:
54 | raise TankConfigError('Failed to validate admin_user/password monitoring settings', e)
55 |
56 | self.provider_vars = app_config.get_dict().get(self.provider.value)
57 | if self.provider_vars is None or not isinstance(self.provider_vars, dict):
58 | raise TankConfigError('Cloud provider is not configured')
59 |
60 | self.ansible_vars = self.provider_vars.pop('ansible', dict())
61 |
62 | try:
63 | jsonschema.validate(self.provider_vars, self.__class__._SCHEMAS[self.provider])
64 | except jsonschema.ValidationError as e:
65 | raise TankConfigError('Failed to validate config for cloud provider {}'.format(self.provider), e)
66 |
67 | assert 'pvt_key' in self.provider_vars, 'pvt_key is required for ansible'
68 |
69 | try:
70 | jsonschema.validate(self.ansible_vars, self.__class__._ANSIBLE_SCHEMA)
71 | except jsonschema.ValidationError as e:
72 | raise TankConfigError('Failed to validate ansible config for cloud provider {}'.format(self.provider), e)
73 |
74 | if 'private_interface' not in self.ansible_vars:
75 | self.ansible_vars['private_interface'] = {
76 | CloudProvider.DIGITAL_OCEAN: 'eth0',
77 | CloudProvider.GOOGLE_CLOUD_ENGINE: 'ens4',
78 | }[self.provider]
79 |
80 |
81 | _SCHEMAS = {
82 | CloudProvider.DIGITAL_OCEAN: yaml.safe_load(r'''
83 | type: object
84 | additionalProperties: False
85 | required:
86 | - token
87 | - pvt_key
88 | - ssh_fingerprint
89 | properties:
90 | token:
91 | type: string
92 | pvt_key:
93 | type: string
94 | ssh_fingerprint:
95 | type: string
96 | '''),
97 |
98 | CloudProvider.GOOGLE_CLOUD_ENGINE: yaml.safe_load(r'''
99 | type: object
100 | additionalProperties: False
101 | required:
102 | - pub_key
103 | - pvt_key
104 | - cred_path
105 | - project
106 | properties:
107 | pub_key:
108 | type: string
109 | pvt_key:
110 | type: string
111 | cred_path:
112 | type: string
113 | project:
114 | type: string
115 | '''),
116 | }
117 |
118 | _ANSIBLE_SCHEMA = yaml.safe_load(r'''
119 | type: object
120 | additionalProperties: False
121 | properties:
122 | private_interface:
123 | type: string
124 | ''')
125 |
126 | _MONITORING_SCHEMA = yaml.safe_load(r'''
127 | type: object
128 | additionalProperties: False
129 | required:
130 | - admin_user
131 | - admin_password
132 | properties:
133 | admin_user:
134 | type: string
135 | admin_password:
136 | type: string
137 | ''')
138 |
139 |
--------------------------------------------------------------------------------
/tank/core/exc.py:
--------------------------------------------------------------------------------
1 |
2 | class TankError(RuntimeError):
3 | """
4 | Base class of Tank-specific errors.
5 | """
6 | pass
7 |
8 |
9 | class TankConfigError(TankError):
10 | """
11 | Configuration error.
12 | """
13 | pass
14 |
15 |
16 | class TankTestCaseError(TankError):
17 | """
18 | Error specific for a test case configuration.
19 | """
20 | pass
21 |
--------------------------------------------------------------------------------
/tank/core/lambdas.py:
--------------------------------------------------------------------------------
1 | #
2 | # module tank.core.lambda
3 | #
4 |
5 |
6 | def first(subscriptable):
7 | return subscriptable[0]
8 |
9 |
10 | def second(subscriptable):
11 | return subscriptable[1]
12 |
13 |
14 | def third(subscriptable):
15 | return subscriptable[2]
16 |
--------------------------------------------------------------------------------
/tank/core/regions.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import os
3 | from shutil import copy2
4 |
5 | from cement.utils import fs
6 |
7 | from tank.core import resource_path
8 | from tank.core.utils import yaml_load
9 |
10 |
11 | class RegionsConfig(object):
12 | """Config object for regions."""
13 |
14 | FILE_NAME = 'regions.yml'
15 | REGIONS = ('Europe', 'Asia', 'NorthAmerica',)
16 |
17 | def __init__(self, app):
18 | """Load or copy config file."""
19 | self._config_file = fs.join(app.user_dir, self.FILE_NAME)
20 |
21 | if not os.path.exists(self._config_file):
22 | copy2(resource_path('regions.yml'), self._config_file)
23 |
24 | self._config = yaml_load(self._config_file)
25 |
26 | @property
27 | def config(self):
28 | """Return loaded config."""
29 | return copy.deepcopy(self._config)
30 |
--------------------------------------------------------------------------------
/tank/core/run.py:
--------------------------------------------------------------------------------
1 | #
2 | # module tank.core.run
3 | #
4 | import os
5 | import sys
6 | import stat
7 | import tempfile
8 | from shutil import rmtree
9 | from shutil import copytree
10 | from time import time
11 | from typing import Dict
12 | from uuid import uuid4
13 | import json
14 | from datetime import datetime
15 |
16 | import sh
17 | from cement import fs
18 | from filelock import FileLock
19 | import namesgenerator
20 |
21 | from tank.core import resource_path
22 | from tank.core.binding import AnsibleBinding
23 | from tank.core.exc import TankError, TankConfigError
24 | from tank.core.testcase import TestCase
25 | from tank.core.tf import PlanGenerator
26 | from tank.core.utils import yaml_load, yaml_dump, grep_dir, json_load, sha256
27 | from tank.terraform_installer import TerraformInstaller, TerraformInventoryInstaller
28 |
29 |
30 | class Run:
31 | """
32 | Single run of a tank testcase.
33 |
34 | TODO detect and handle CloudUserSettings change.
35 | """
36 |
37 | @classmethod
38 | def new_run(cls, app, testcase: TestCase):
39 | run_id = namesgenerator.get_random_name()
40 |
41 | fs.ensure_dir_exists(cls._runs_dir(app))
42 |
43 | temp_dir = tempfile.mkdtemp(prefix='_{}'.format(run_id), dir=cls._runs_dir(app))
44 | cls._save_meta(temp_dir, testcase)
45 |
46 | # make a copy to make sure any alterations of the source won't affect us
47 | testcase.save(fs.join(temp_dir, 'testcase.yml'))
48 |
49 | copytree(resource_path('scripts'), temp_dir+'/scripts')
50 |
51 | # TODO prevent collisions
52 | os.rename(temp_dir, fs.join(cls._runs_dir(app), run_id))
53 |
54 | return cls(app, run_id)
55 |
56 | @classmethod
57 | def list_runs(cls, app):
58 | fs.ensure_dir_exists(cls._runs_dir(app))
59 | return [cls(app, run_id) for run_id in grep_dir(cls._runs_dir(app), '^[a-zA-Z0-9][a-zA-Z_0-9]*$', isdir=True)]
60 |
61 |
62 | def __init__(self, app, run_id: str):
63 | self._app = app
64 | self.run_id = run_id
65 |
66 | # install terraform and terraform-inventory
67 | TerraformInstaller(storage_path=app.installation_dir).install()
68 | TerraformInventoryInstaller(storage_path=app.installation_dir).install()
69 |
70 | self._testcase = TestCase(fs.join(self._dir, 'testcase.yml'), app)
71 | self._meta = yaml_load(fs.join(self._dir, 'meta.yml'))
72 |
73 | def init(self):
74 | """
75 | Download plugins and modules for Terraform.
76 | """
77 | with self._lock:
78 | self._generate_tf_plan()
79 |
80 | sh.Command(self._app.terraform_run_command)(
81 | "init", "-backend-config", "path={}".format(self._tf_state_file), self._tf_plan_dir,
82 | _env=self._make_env(), _out=sys.stdout, _err=sys.stderr)
83 |
84 | def plan(self):
85 | """
86 | Generate and show an execution plan by Terraform.
87 | """
88 | with self._lock:
89 | sh.Command(self._app.terraform_run_command)(
90 | "plan", "-input=false", self._tf_plan_dir,
91 | _env=self._make_env(), _out=sys.stdout, _err=sys.stderr)
92 |
93 | def create(self):
94 | """
95 | Create instances for the cluster.
96 | """
97 | self._check_private_key_permissions()
98 |
99 | with self._lock:
100 | sh.Command(self._app.terraform_run_command)(
101 | "apply", "-auto-approve", "-parallelism=51", self._tf_plan_dir,
102 | _env=self._make_env(), _out=sys.stdout, _err=sys.stderr)
103 |
104 | def dependency(self):
105 | """
106 | Install Ansible roles from Galaxy or SCM.
107 | """
108 | with self._lock:
109 | ansible_deps = yaml_load(resource_path('ansible', 'ansible-requirements.yml'))
110 |
111 | ansible_deps.extend(AnsibleBinding(self._app, self._testcase.binding).get_dependencies())
112 |
113 | requirements_file = fs.join(self._dir, 'ansible-requirements.yml')
114 | yaml_dump(requirements_file, ansible_deps)
115 |
116 | sh.Command("ansible-galaxy")(
117 | "install", "-f", "-r", requirements_file,
118 | _env=self._make_env(), _out=sys.stdout, _err=sys.stderr)
119 |
120 | def provision(self):
121 | self._check_private_key_permissions()
122 |
123 | extra_vars = {
124 | # including blockchain-specific part of the playbook
125 | 'blockchain_ansible_playbook':
126 | fs.join(self._roles_path, AnsibleBinding.BLOCKCHAIN_ROLE_NAME, 'tank', 'playbook.yml'),
127 | # saving a report of the important cluster facts
128 | '_cluster_ansible_report': self._cluster_report_file,
129 | # grafana monitoring login/password
130 | 'monitoring_user_login': self._app.cloud_settings.monitoring_vars['admin_user'],
131 | 'monitoring_user_password': self._app.cloud_settings.monitoring_vars['admin_password'],
132 | }
133 |
134 | with self._lock:
135 | sh.Command("ansible-playbook")(
136 | "-f", self._app.ansible_config['forks'],
137 | "-u", "root",
138 | "-i", self._app.terraform_inventory_run_command,
139 | "--extra-vars", self._ansible_extra_vars(extra_vars),
140 | "--private-key={}".format(self._app.cloud_settings.provider_vars['pvt_key']),
141 | resource_path('ansible', 'core.yml'),
142 | _env=self._make_env(), _out=sys.stdout, _err=sys.stderr, _cwd=self._tf_plan_dir)
143 |
144 | def inspect(self):
145 | with self._lock:
146 | result = {
147 | 'meta': self.meta,
148 | 'testcase': self._testcase.content,
149 | }
150 |
151 | if os.path.exists(self._cluster_report_file):
152 | result['cluster'] = self._cluster_report()
153 |
154 | return result
155 |
156 | def bench(self, load_profile: str, tps: int, total_tx: int):
157 | self._check_private_key_permissions()
158 |
159 | bench_command = 'bench --common-config=/tool/bench.config.json ' \
160 | '--module-config=/tool/blockchain.bench.config.json'
161 | if tps is not None:
162 | # It's assumed, that every node is capable of running the bench.
163 | per_node_tps = max(int(tps / self._testcase.total_instances), 1)
164 | bench_command += ' --common.tps {}'.format(per_node_tps)
165 |
166 | if total_tx is not None:
167 | # It's assumed, that every node is capable of running the bench.
168 | per_node_tx = max(int(total_tx / self._testcase.total_instances), 1)
169 | bench_command += ' --common.stopOn.processedTransactions {}'.format(per_node_tx)
170 |
171 | # FIXME extract hostnames from inventory, but ignore monitoring
172 | ips = [ip for ip, i in self._cluster_report().items() if i['bench_present']]
173 | if not ips:
174 | raise TankError('There are no nodes capable of running the bench util')
175 | host_patterns = ','.join(ips)
176 |
177 | with self._lock:
178 | # send the load_profile to the cluster
179 | extra_vars = {'load_profile_local_file': fs.abspath(load_profile)}
180 |
181 | sh.Command("ansible-playbook")(
182 | "-f", self._app.ansible_config['forks'],
183 | "-u", "root",
184 | "-i", self._app.terraform_inventory_run_command,
185 | "--extra-vars", self._ansible_extra_vars(extra_vars),
186 | "--private-key={}".format(self._app.cloud_settings.provider_vars['pvt_key']),
187 | "-t", "send_load_profile",
188 | fs.join(self._roles_path, AnsibleBinding.BLOCKCHAIN_ROLE_NAME, 'tank', 'send_load_profile.yml'),
189 | _env=self._make_env(), _out=sys.stdout, _err=sys.stderr, _cwd=self._tf_plan_dir)
190 |
191 | # run the bench
192 | sh.Command("ansible")(
193 | '-f', '150', '-B', '3600', '-P', '10', '-u', 'root',
194 | '-i', self._app.terraform_inventory_run_command,
195 | '--private-key={}'.format(self._app.cloud_settings.provider_vars['pvt_key']),
196 | host_patterns,
197 | '-a', bench_command,
198 | _env=self._make_env(), _out=sys.stdout, _err=sys.stderr, _cwd=self._tf_plan_dir)
199 |
200 | def destroy(self):
201 | with self._lock:
202 | sh.Command(self._app.terraform_run_command)(
203 | "destroy", "-auto-approve", "-parallelism=100",
204 | self._tf_plan_dir,
205 | _env=self._make_env(), _out=sys.stdout, _err=sys.stderr)
206 |
207 | # atomic move before cleanup
208 | temp_dir = fs.join(self.__class__._runs_dir(self._app), '_{}'.format(self.run_id))
209 | os.rename(self._dir, temp_dir)
210 |
211 | # cleanup with the lock released
212 | rmtree(temp_dir)
213 |
214 |
215 | @property
216 | def meta(self) -> Dict:
217 | return dict(self._meta)
218 |
219 | @property
220 | def created_at(self) -> datetime:
221 | return datetime.fromtimestamp(self.meta['created'])
222 |
223 | @property
224 | def testcase_copy(self) -> TestCase:
225 | """
226 | Copy of the original testcase.
227 | """
228 | return self._testcase
229 |
230 |
231 | @classmethod
232 | def _runs_dir(cls, app) -> str:
233 | return fs.join(app.user_dir, 'run')
234 |
235 | @classmethod
236 | def _save_meta(cls, run_dir: str, testcase: TestCase):
237 | yaml_dump(fs.join(run_dir, 'meta.yml'), {
238 | 'testcase_filename': fs.abspath(testcase.filename),
239 | 'created': int(time()),
240 | 'setup_id': sha256(uuid4().bytes)[:12],
241 | })
242 |
243 | def _ansible_extra_vars(self, extra: Dict = None) -> str:
244 | a_vars = dict(('bc_{}'.format(k), str(v)) for k, v in self._app.cloud_settings.ansible_vars.items())
245 | a_vars.update(dict(('bc_{}'.format(k), str(v)) for k, v in self._testcase.ansible.items()))
246 |
247 | if extra is not None:
248 | a_vars.update(extra)
249 |
250 | return json.dumps(a_vars, sort_keys=True)
251 |
252 | def _make_env(self) -> Dict:
253 | fs.ensure_dir_exists(self._tf_data_dir)
254 | fs.ensure_dir_exists(self._log_dir)
255 |
256 | env = self._app.app_env
257 |
258 | env["TF_LOG_PATH"] = fs.join(self._log_dir, 'terraform.log')
259 | env["TF_DATA_DIR"] = self._tf_data_dir
260 | env["TF_VAR_state_path"] = self._tf_state_file
261 | env["TF_VAR_blockchain_name"] = self._testcase.binding.replace('_', '-')[:10]
262 | env["TF_VAR_setup_id"] = self._meta['setup_id']
263 | env["TF_VAR_scripts_path"] = fs.join(self._dir, 'scripts')
264 |
265 | for k, v in self._app.cloud_settings.provider_vars.items():
266 | env["TF_VAR_{}".format(k)] = v
267 |
268 | env["ANSIBLE_ROLES_PATH"] = self._roles_path
269 | env["ANSIBLE_CONFIG"] = resource_path('ansible', 'ansible.cfg')
270 | env["ANSIBLE_LOG_PATH"] = fs.join(self._log_dir, 'ansible.log')
271 |
272 | return env
273 |
274 | def _generate_tf_plan(self):
275 | """
276 | Generation of Terraform manifests specific for this run and user preferences.
277 | """
278 | PlanGenerator(self._app, self._testcase).generate(self._tf_plan_dir)
279 |
280 | def _cluster_report(self):
281 | return json_load(self._cluster_report_file)
282 |
283 | def _check_private_key_permissions(self):
284 | """
285 | Checks whether groups and others have 0 access to private key
286 | """
287 | # oct -'0o77', bin - '0b000111111', which is the same as ----rwxrwx
288 | NOT_OWNER_PERMISSION = stat.S_IRWXG + stat.S_IRWXO
289 |
290 | file_stat: os.stat_result = os.stat(self._app.cloud_settings.provider_vars['pvt_key'])
291 | file_mode = stat.S_IMODE(file_stat.st_mode)
292 |
293 | if file_mode & NOT_OWNER_PERMISSION != 0:
294 | raise TankConfigError('Private key has wrong permission mask.')
295 |
296 | @property
297 | def _dir(self) -> str:
298 | return fs.join(self.__class__._runs_dir(self._app), self.run_id)
299 |
300 | @property
301 | def _lock(self) -> FileLock:
302 | return FileLock(fs.join(self._dir, '.lock'))
303 |
304 | @property
305 | def _tf_data_dir(self) -> str:
306 | return fs.join(self._dir, 'tf_data')
307 |
308 | @property
309 | def _tf_plan_dir(self) -> str:
310 | return fs.join(self._dir, 'tf_plan')
311 |
312 | @property
313 | def _tf_state_file(self) -> str:
314 | return fs.join(self._dir, "blockchain.tfstate")
315 |
316 | @property
317 | def _log_dir(self) -> str:
318 | return fs.join(self._dir, 'log')
319 |
320 | @property
321 | def _roles_path(self) -> str:
322 | return fs.join(self._dir, "ansible_roles")
323 |
324 | @property
325 | def _cluster_report_file(self) -> str:
326 | return fs.join(self._dir, 'cluster_ansible_report.json')
327 |
--------------------------------------------------------------------------------
/tank/core/testcase.py:
--------------------------------------------------------------------------------
1 | #
2 | # module tank.core.testcase
3 | #
4 | import copy
5 | from typing import List
6 |
7 | from jsonschema import Draft4Validator, ValidationError
8 |
9 | from tank.core import resource_path
10 | from tank.core.exc import TankTestCaseError
11 | from tank.core.regions import RegionsConfig
12 | from tank.core.utils import yaml_load, yaml_dump, ratio_from_percent, split_evenly
13 |
14 |
15 | class InstancesCanonizer(object):
16 | """Canonize config for instances.
17 |
18 | Accept only valid instances content.
19 | Transforms dict to following format:
20 |
21 | Role:
22 | Region1:
23 | count: ...
24 | type: ...
25 | packetloss: ...
26 | Region2:
27 | count: ...
28 | type: ...
29 | packetloss: ...
30 | """
31 |
32 | _GENERAL_OPTIONS = {
33 | 'type': 'small',
34 | 'packetloss': 0,
35 | }
36 |
37 | def __init__(self, instances_content: dict):
38 | """Load content and defaults."""
39 | self._content = instances_content
40 | self._global_defaults = self._load_defaults()
41 |
42 | def _load_defaults(self) -> dict:
43 | """Provide defaults for config.
44 |
45 | Now they are default region and type.
46 | """
47 | defaults = dict()
48 |
49 | for option, default in self._GENERAL_OPTIONS.items():
50 | defaults[option] = self._content.get(option, default)
51 | if option in self._content.keys():
52 | self._content.pop(option)
53 |
54 | defaults['region'] = 'default'
55 | return defaults
56 |
57 | def _build_configuration(self, count: int, type: str = None, packetloss: int = None) -> dict:
58 | """Build minimal configuration from parameters."""
59 | configuration = {
60 | 'count': count,
61 | 'type': self._global_defaults['type'] if type is None else type,
62 | 'packetloss': ratio_from_percent(self._global_defaults['packetloss'] if packetloss is None else packetloss),
63 | }
64 |
65 | return configuration
66 |
67 | def canonize(self) -> dict:
68 | """Convert to canonized config."""
69 | canonized_dict = dict()
70 |
71 | for role, config in self._content.items():
72 | if isinstance(config, int): # shortest number configuration
73 | canonized_dict[role] = {
74 | self._global_defaults['region']: self._build_configuration(count=config),
75 | }
76 | elif 'regions' in config: # dict configuration with regions
77 | canonized_dict[role] = dict()
78 |
79 | for region, region_config in config['regions'].items():
80 | if isinstance(region_config, int):
81 | canonized_dict[role][region] = self._build_configuration(
82 | count=region_config,
83 | type=config.get('type'),
84 | packetloss=config.get('packetloss'),
85 | )
86 | else:
87 | canonized_dict[role][region] = self._build_configuration(
88 | count=region_config['count'],
89 | type=region_config.get('type', config.get('type')),
90 | packetloss=region_config.get('packetloss', config.get('packetloss')),
91 | )
92 | else: # dict configuration without regions (config must contain count param in this case)
93 | canonized_dict[role] = {
94 | self._global_defaults['region']: self._build_configuration(
95 | count=config['count'],
96 | type=config.get('type'),
97 | packetloss=config.get('packetloss'),
98 | )
99 | }
100 |
101 | return canonized_dict
102 |
103 |
104 | class RegionsConverter(object):
105 | """Convert and merge regions in canonized instances configuration.
106 |
107 | Convert regions to machine-readable format.
108 | random = equal amount for all regions.
109 |
110 | Example below:
111 | Role:
112 | default:
113 | count: 1
114 | type: small
115 | packetloss: 0
116 | ...
117 |
118 | Convert to:
119 | Role:
120 | - {
121 | region: FRA1
122 | count: 1
123 | type: small
124 | packetloss: 0
125 | }
126 | - ...
127 | - ...
128 | ...
129 | """
130 |
131 | _GROUP_PARAMETERS = ('region', 'type', 'packetloss',)
132 |
133 | def __init__(self, app):
134 | """Save provider, load RegionsConfig."""
135 | self._provider = app.provider
136 | self._regions_config = RegionsConfig(app).config
137 | self._available_regions = RegionsConfig.REGIONS
138 |
139 | def _merge_configurations(self, machine_configurations: List[dict]) -> List[dict]:
140 | """Merge machine configurations with equal parameters."""
141 | configurations_dict = dict()
142 |
143 | for configuration in machine_configurations:
144 | key = tuple(configuration[param] for param in self._GROUP_PARAMETERS)
145 |
146 | if key in configurations_dict:
147 | configurations_dict[key]['count'] += configuration['count']
148 | else:
149 | configurations_dict[key] = configuration
150 |
151 | return list(configurations_dict.values())
152 |
153 | def _convert_region(self, human_readable: str) -> str:
154 | """Convert region from human readable type to machine readable via regions config."""
155 | return self._regions_config[self._provider][human_readable]
156 |
157 | def convert(self, instances_config: dict) -> dict:
158 | """Convert configuration to machine readable."""
159 | converted_config = dict()
160 |
161 | for role, config in instances_config.items():
162 | machines_configurations = []
163 |
164 | for region, region_config in config.items():
165 | if region == 'random':
166 | for i, count in enumerate(split_evenly(region_config['count'], len(self._available_regions))):
167 | if count:
168 | machines_configurations.append(
169 | {
170 | 'region': self._convert_region(self._available_regions[i]),
171 | 'count': count,
172 | 'packetloss': region_config['packetloss'],
173 | 'type': region_config['type'],
174 | }
175 | )
176 | else:
177 | machines_configurations.append(
178 | {
179 | 'region': self._convert_region(region),
180 | **region_config
181 | },
182 | )
183 |
184 | converted_config[role] = self._merge_configurations(machines_configurations)
185 |
186 | return converted_config
187 |
188 |
189 | class TestCaseValidator(object):
190 | """Class for validation TestCase object."""
191 |
192 | SCHEMA_FILE = resource_path('testcase_schema.yml')
193 |
194 | def __init__(self, content: dict, filename):
195 | """Save filename with content."""
196 | self._content = content
197 | self._filename = filename
198 |
199 | def validate(self):
200 | """Perform all validations."""
201 | self._validate_schema()
202 | self._check_reserved_names()
203 | self._check_counts_equality()
204 |
205 | def _validate_schema(self):
206 | """Validate via JSON schema."""
207 | try:
208 | Draft4Validator(yaml_load(self.SCHEMA_FILE)).validate(self._content)
209 | except ValidationError as e:
210 | raise TankTestCaseError('Failed to validate testcase {}'.format(self._filename), e)
211 |
212 | def _check_reserved_names(self):
213 | """Check reserved names existence."""
214 | reserved_names = {'count', 'monitoring'}
215 |
216 | for role in self._content['instances'].keys():
217 | if role.lower() in reserved_names:
218 | raise TankTestCaseError('\'{name}\' instance name is reserved'.format(name=role))
219 |
220 | def _check_counts_equality(self):
221 | """Check if total count is equal with sum of counts in regions."""
222 | for role, config in self._content['instances'].items():
223 | if isinstance(config, dict) and all(key in config for key in ['count', 'regions']):
224 | total_count = config['count']
225 | regions_count = 0
226 |
227 | for region, region_config in config['regions'].items():
228 | if isinstance(region_config, int):
229 | regions_count += region_config
230 | else:
231 | regions_count += region_config['count']
232 |
233 | if total_count != regions_count:
234 | raise TankTestCaseError(
235 | 'The total count does not match sum of count in regions in role {}'.format(role)
236 | )
237 |
238 |
239 | class TestCase(object):
240 | """Entity describing single test performed by Tank."""
241 |
242 | def __init__(self, filename, app):
243 | """Load content."""
244 | self._app = app
245 | self._filename = filename
246 | self._original_content = yaml_load(filename)
247 |
248 | self._content = copy.deepcopy(self._original_content)
249 | TestCaseValidator(self._content, filename).validate()
250 | self._content = self._prepare_content()
251 |
252 | @property
253 | def filename(self) -> str:
254 | return self._filename
255 |
256 | @property
257 | def binding(self) -> str:
258 | """Return provided binding."""
259 | return self._content['binding']
260 |
261 | @property
262 | def instances(self) -> dict:
263 | """Return copy of instances."""
264 | return copy.deepcopy(self._content['instances'])
265 |
266 | @property
267 | def total_instances(self) -> int:
268 | """Calculate amount of all instances.
269 |
270 | It works only after instances config canonization and converting.
271 | """
272 | instances_amount = 0
273 | for config in self._content['instances'].values():
274 | for item in config:
275 | instances_amount += item['count']
276 |
277 | return instances_amount
278 |
279 | @property
280 | def ansible(self) -> dict:
281 | """Return copy of ansible config."""
282 | return copy.deepcopy(self._content['ansible'])
283 |
284 | @property
285 | def content(self) -> dict:
286 | """Return copy of all content."""
287 | return copy.deepcopy(self._content)
288 |
289 | def save(self, filename):
290 | """Save original content to file."""
291 | yaml_dump(filename, self._original_content)
292 |
293 | def _prepare_content(self):
294 | """Convert to canonized config."""
295 | result = dict()
296 | canonized_instances = InstancesCanonizer(self._content['instances']).canonize()
297 | result['instances'] = RegionsConverter(self._app).convert(canonized_instances)
298 | result['binding'] = self._content['binding']
299 | result['ansible'] = self._content.get('ansible', dict())
300 | return result
301 |
--------------------------------------------------------------------------------
/tank/core/tf.py:
--------------------------------------------------------------------------------
1 | #
2 | # module tank.core.tf
3 | #
4 | # Terraform-related code.
5 | #
6 |
7 | from os.path import dirname, isdir
8 |
9 | from cement.utils import fs
10 |
11 | from tank.core import resource_path
12 | from tank.core.exc import TankError, TankTestCaseError
13 | from tank.core.testcase import TestCase
14 |
15 |
16 | class PlanGenerator:
17 | """
18 | Generates a Terraform plan for the run based on the testcase and the user settings.
19 | """
20 |
21 | def __init__(self, app, testcase: TestCase):
22 | self._app = app
23 | self.testcase = testcase
24 |
25 | if not isdir(self._provider_templates):
26 | raise TankError('Failed to find Terraform templates for cloud provider {} at {}'.format(
27 | self._app.cloud_settings.provider.value, self._provider_templates
28 | ))
29 |
30 | def generate(self, plan_dir: str):
31 | if self.testcase.total_instances <= 10:
32 | monitoring_machine_type = 'small'
33 | elif self.testcase.total_instances < 50:
34 | monitoring_machine_type = 'standard'
35 | else:
36 | monitoring_machine_type = 'large'
37 |
38 | self._app.template.copy(self._provider_templates, plan_dir, {
39 | 'instances': self.testcase.instances,
40 | 'monitoring_machine_type': monitoring_machine_type,
41 | })
42 |
43 |
44 | @property
45 | def _provider_templates(self) -> str:
46 | return resource_path('providers', self._app.cloud_settings.provider.value)
47 |
--------------------------------------------------------------------------------
/tank/core/utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # module tank.core.utils
3 | #
4 | # Misc. utils.
5 | #
6 | import os
7 | import re
8 | import json
9 | import hashlib
10 | from typing import List
11 |
12 | import yaml
13 |
14 |
15 | def yaml_load(filename: str):
16 | with open(filename) as fh:
17 | return yaml.safe_load(fh)
18 |
19 |
20 | def yaml_dump(filename: str, data):
21 | with open(filename, 'w') as fh:
22 | return yaml.dump(data, fh, default_flow_style=False)
23 |
24 |
25 | def json_load(filename: str):
26 | with open(filename) as fh:
27 | return json.load(fh)
28 |
29 |
30 | def sha256(bin_data) -> str:
31 | return hashlib.sha256(bin_data).hexdigest()
32 |
33 |
34 | def grep_dir(dirname: str, filter_regex: str = None, isdir: bool = False):
35 | """
36 | Enumerate and filter contents of a directory.
37 | """
38 | contents = os.listdir(dirname)
39 |
40 | if filter_regex is not None:
41 | filter_re = re.compile(filter_regex)
42 | contents = filter(lambda name: filter_re.match(name) is not None, contents)
43 |
44 | if isdir is not None:
45 | contents = filter(lambda name: os.path.isdir(os.path.join(dirname, name)), contents)
46 |
47 | return contents
48 |
49 |
50 | def ratio_from_percent(percent: int) -> float:
51 | """Convert percent to ratio."""
52 | return percent / 100
53 |
54 |
55 | def split_evenly(number: int, count: int) -> List[int]:
56 | """Return mostly equal parts.
57 |
58 | Example: number = 11, count = 3, result = [4, 4, 3]
59 | """
60 | parts = []
61 |
62 | for _ in range(count):
63 | if number % count:
64 | parts.append(number // count + 1)
65 | number -= parts[-1]
66 | else:
67 | parts.append(number // count)
68 | number -= parts[-1]
69 |
70 | count -= 1
71 |
72 | return parts
73 |
--------------------------------------------------------------------------------
/tank/ext/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mixbytes/tank/b5595df8c97a101e074696a93898ccbfa210261d/tank/ext/__init__.py
--------------------------------------------------------------------------------
/tank/logging_conf.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 |
4 | def build_logging_conf(logs_dir: str) -> dict:
5 | """Builds dict logging config."""
6 | if not os.path.exists(logs_dir):
7 | os.makedirs(logs_dir)
8 |
9 | logging_conf = {
10 | 'version': 1,
11 | 'disable_existing_loggers': False,
12 | 'formatters': {
13 | 'simple': {
14 | 'format': '%(asctime)s %(name)s(%(lineno)d) - %(levelname)s: %(message)s',
15 | },
16 | },
17 | 'handlers': {
18 | 'console': {
19 | 'class': 'logging.StreamHandler',
20 | 'level': 'WARNING',
21 | 'formatter': 'simple',
22 | 'stream': 'ext://sys.stdout',
23 | },
24 | 'info_file_handler': {
25 | 'class': 'logging.handlers.RotatingFileHandler',
26 | 'level': 'DEBUG',
27 | 'formatter': 'simple',
28 | 'filename': os.path.join(logs_dir, 'info.log'),
29 | 'maxBytes': 10 * 1024 * 1024, # 10 MB
30 | 'backupCount': 3,
31 | 'encoding': 'utf8',
32 | },
33 | 'error_file_handler': {
34 | 'class': 'logging.handlers.RotatingFileHandler',
35 | 'level': 'ERROR',
36 | 'formatter': 'simple',
37 | 'filename': os.path.join(logs_dir, 'error.log'),
38 | 'maxBytes': 10 * 1024 * 1024, # 10 MB
39 | 'backupCount': 3,
40 | 'encoding': 'utf8',
41 | },
42 | },
43 | 'root': {
44 | 'level': 'DEBUG',
45 | 'handlers': ['console', 'info_file_handler', 'error_file_handler'],
46 | },
47 | }
48 |
49 | return logging_conf
50 |
--------------------------------------------------------------------------------
/tank/main.py:
--------------------------------------------------------------------------------
1 | import logging.config
2 | import os
3 | from typing import Dict
4 | import pathlib
5 |
6 | from cement import App, TestApp, init_defaults
7 | from cement.core.exc import CaughtSignal
8 | from cement.utils import fs
9 |
10 | from tank.core.cloud_settings import CloudUserSettings
11 | from tank.core.exc import TankError
12 | from tank.controllers.base import Base
13 | from tank.controllers.cluster import NestedCluster, EmbeddedCluster
14 | from tank.logging_conf import build_logging_conf
15 |
16 |
17 | logger = logging.getLogger(__name__)
18 |
19 |
20 | def _default_config() -> Dict:
21 | config = init_defaults('tank', 'log.logging')
22 |
23 | config['tank'] = {
24 | 'ansible': {
25 | 'forks': 50,
26 | },
27 | }
28 |
29 | config['tank']['monitoring'] = {
30 | "admin_user": "tank",
31 | "admin_password": "tank"
32 | }
33 |
34 | config['log.logging']['level'] = 'WARNING'
35 | return config
36 |
37 |
38 | class MixbytesTank(App):
39 | """MixBytes Tank primary application."""
40 |
41 | class Meta:
42 | label = 'tank'
43 |
44 | # configuration defaults
45 | config_defaults = _default_config()
46 |
47 | # call sys.exit() on close
48 | close_on_exit = True
49 |
50 | # load additional framework extensions
51 | extensions = [
52 | 'yaml',
53 | 'colorlog',
54 | 'jinja2',
55 | ]
56 |
57 | # List of configuration directory
58 | config_dirs = ['~/.tank']
59 |
60 | # configuration handler
61 | config_handler = 'yaml'
62 |
63 | # configuration file suffix
64 | config_file_suffix = '.yml'
65 |
66 | # set the log handler
67 | log_handler = 'colorlog'
68 |
69 | # set the output handler
70 | output_handler = 'jinja2'
71 |
72 | template_handler = 'jinja2'
73 |
74 | # register handlers
75 | handlers = [
76 | Base,
77 | EmbeddedCluster,
78 | NestedCluster,
79 | ]
80 |
81 | # register hooks
82 | hooks = [
83 | ]
84 |
85 | def __init__(self):
86 | super().__init__()
87 | self._cloud_settings = None
88 |
89 | def setup(self):
90 | super(MixbytesTank, self).setup()
91 | fs.ensure_dir_exists(self.user_dir)
92 |
93 | additional_config_defaults = {
94 | 'tank': {
95 | 'terraform_run_command': os.path.join(self.installation_dir, 'terraform'),
96 | 'terraform_inventory_run_command': os.path.join(self.installation_dir, 'terraform-inventory'),
97 | },
98 | }
99 |
100 | for section, variables_dict in additional_config_defaults.items():
101 | for key, value in variables_dict.items():
102 | if key not in self.config.keys(section):
103 | self.config.set(section, key, value)
104 |
105 | @property
106 | def app_env(self) -> Dict:
107 | env = os.environ.copy()
108 | env["TF_LOG"] = "TRACE"
109 | env["TF_IN_AUTOMATION"] = "true"
110 | return env
111 |
112 | @property
113 | def cloud_settings(self) -> CloudUserSettings:
114 | if self._cloud_settings is None:
115 | self._cloud_settings = CloudUserSettings(self.config)
116 |
117 | return self._cloud_settings
118 |
119 | @property
120 | def provider(self) -> str:
121 | return self.cloud_settings.provider.value
122 |
123 | @property
124 | def terraform_run_command(self) -> str:
125 | return self.config.get(self.Meta.label, 'terraform_run_command')
126 |
127 | @property
128 | def terraform_inventory_run_command(self) -> str:
129 | return self.config.get(self.Meta.label, 'terraform_inventory_run_command')
130 |
131 | @property
132 | def user_dir(self) -> str:
133 | return fs.abspath(fs.join(pathlib.Path.home(), '.tank'))
134 |
135 | @property
136 | def installation_dir(self) -> str:
137 | return fs.abspath(fs.join(self.user_dir, 'bin'))
138 |
139 | @property
140 | def ansible_config(self) -> dict:
141 | """Return dict with ansible parameters."""
142 | return self.config.get(self.Meta.label, 'ansible')
143 |
144 |
145 | class MixbytesTankTest(TestApp, MixbytesTank):
146 | """A sub-class of MixbytesTank that is better suited for testing."""
147 |
148 | class Meta:
149 | label = 'tank'
150 |
151 |
152 | def main():
153 | with MixbytesTank() as app:
154 | logs_dir = os.path.join(app.user_dir, 'logs')
155 | logging.config.dictConfig(build_logging_conf(logs_dir=logs_dir))
156 |
157 | try:
158 | app.run()
159 |
160 | except TankError as e:
161 | print('{}: {}'.format(e.__class__.__name__, e))
162 | app.exit_code = 1
163 |
164 | if app.debug is True:
165 | import traceback
166 | traceback.print_exc()
167 |
168 | # FIXME better signal handling
169 | except CaughtSignal as e:
170 | # Default Cement signals are SIGINT and SIGTERM, exit 0 (non-error)
171 |
172 | print(f'{e}')
173 |
174 | app.exit_code = 0
175 |
176 |
177 | if __name__ == '__main__':
178 | main()
179 |
--------------------------------------------------------------------------------
/tank/plugins/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mixbytes/tank/b5595df8c97a101e074696a93898ccbfa210261d/tank/plugins/__init__.py
--------------------------------------------------------------------------------
/tank/resources/ansible/ansible-requirements.yml:
--------------------------------------------------------------------------------
1 | - src: https://github.com/mixbytes/tank.ansible-core
2 | version: master
3 | name: tank.ansible-core
4 |
5 | - src: https://github.com/geerlingguy/ansible-role-docker
6 | version: master
7 | name: tank.docker
8 |
9 | - src: https://github.com/mixbytes/tank.ansible-promstack
10 | version: master
11 | name: mixbytes.promstack
12 |
--------------------------------------------------------------------------------
/tank/resources/ansible/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | interpreter_python = auto
3 | #callback_whitelist = profile_roles
4 | retry_files_enabled = False
5 | host_key_checking = False
6 | allow_world_readable_tmpfiles = True
7 | timeout = 5
8 | use_persistent_connections = True
9 | deprecation_warnings=False
10 |
11 | stdout_callback=unixy
12 | #display_failed_stderr = no
13 | display_ok_hosts = no
14 | display_skipped_hosts = no
15 |
16 | [ssh_connection]
17 | pipelining = true
18 | retries=10
19 | #ssh_args = -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
20 | # FIXME: [fixed] ControlMaster option doesn't work (Connection refused\r\nFailed to connect to new control master", "unreachable": true)
21 | ssh_args = -C -o ControlPersist=60s -o ControlMaster=auto -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
22 | # TODO: only for dockerized version. may not working w/o docker. need add condition in the future.
23 | #control_path = /dev/shm/cp%%h-%%p-%%r
24 |
--------------------------------------------------------------------------------
/tank/resources/ansible/common.yml:
--------------------------------------------------------------------------------
1 | - name: Converge monitoring node
2 | hosts: "*monitoring*"
3 | become: true
4 | vars:
5 | bc_private_interface: "eth0"
6 | lsyncd_master_hostname: "{{ groups['monitoring_peer'] | map('extract', hostvars, ['ansible_'+bc_private_interface, 'ipv4', 'address']) | list | join('') }}"
7 | lsyncd_slave_hosts: "{{ groups['allnodes'] | map('extract', hostvars, ['ansible_hostname']) | list }}"
8 | roles:
9 | - role: tank.ansible-core
10 |
11 | - name: Converge boot node
12 | hosts: "all"
13 | become: true
14 | vars:
15 | bc_private_interface: "eth0"
16 | lsyncd_master_hostname: "{{ groups['monitoring_peer'] | map('extract', hostvars, ['ansible_'+bc_private_interface, 'ipv4', 'address']) | list | join('') }}"
17 | lsyncd_slave_hosts: "{{ groups['allnodes'] | map('extract', hostvars, ['ansible_'+bc_private_interface, 'ipv4', 'address']) | list }}"
18 | roles:
19 | - role: tank.ansible-core
--------------------------------------------------------------------------------
/tank/resources/ansible/core.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Collect facts
3 | hosts: all
4 | become: true
5 | gather_facts: smart
6 | tasks:
7 | - debug: msg="Fetching facts from cluster instances"
8 | tags: [print_action]
9 | - setup:
10 |
11 | - name: Create groups
12 | hosts: localhost
13 | gather_facts: false
14 | tasks:
15 | - debug: msg="Calculate"
16 | tags: [print_action]
17 | - name: "Add boot node to group bcboot"
18 | tags:
19 | - always
20 | add_host:
21 | name: "{{ item }}"
22 | groups: bcboot
23 | inventory_dir: "{{ hostvars[item].inventory_dir }}"
24 | loop: "{{ groups['all'] }}"
25 | when: '"boot" in hostvars[item]["ansible_hostname"]'
26 | changed_when: false
27 | - name: "Add all nodes to group bcpeers"
28 | tags:
29 | - always
30 | add_host:
31 | name: "{{ item }}"
32 | groups: bcpeers
33 | inventory_dir: "{{ hostvars[item].inventory_dir }}"
34 | loop: "{{ groups['all'] }}"
35 | when: '"prod" in hostvars[item]["ansible_hostname"] or "boot" in hostvars[item]["ansible_hostname"]'
36 | changed_when: false
37 | - name: "Add monitnoring nodes to group monitoring_peer"
38 | tags:
39 | - always
40 | register: _create_group_monitoring
41 | add_host:
42 | name: "{{ item }}"
43 | groups: monitoring_peer
44 | inventory_dir: "{{ hostvars[item].inventory_dir }}"
45 | loop: "{{ groups['all'] }}"
46 | when: '"monitoring" in hostvars[item]["ansible_hostname"]'
47 | changed_when: false
48 | - name: "Add all nodes to group allnodes"
49 | tags:
50 | - always
51 | add_host:
52 | name: "{{ item }}"
53 | groups: allnodes
54 | inventory_dir: "{{ hostvars[item].inventory_dir }}"
55 | loop: "{{ groups['all'] }}"
56 | when: '"prod" in hostvars[item]["ansible_hostname"] or "boot" in hostvars[item]["ansible_hostname"] or "full" in hostvars[item]["ansible_hostname"]'
57 | changed_when: false
58 |
59 | - name: "Updating packages on instances"
60 | hosts: all
61 | become: true
62 | gather_facts: smart
63 | tasks:
64 | - name: "Updating apt-get before installing packages"
65 | apt:
66 | cache_valid_time: 8640
67 | changed_when: false
68 |
69 | - name: "Install Docker and requirement packages"
70 | hosts: all
71 | # strategy: mitogen_free
72 | become: true
73 | gather_facts: smart
74 | roles:
75 | - role: tank.docker
76 | tasks:
77 | - name: "Install python packages"
78 | apt:
79 | name: python3-pip
80 | - debug: msg="Docker Engine installed"
81 | tags: [print_action]
82 |
83 | - import_playbook: "{{ blockchain_ansible_playbook }}"
84 |
85 | - name: Converge monitoring node
86 | hosts: "*monitoring*"
87 | # strategy: mitogen_free
88 | become: true
89 | gather_facts: smart
90 | vars:
91 | bc_polkadot_comp_state_monitoring: present
92 | install_promstack: true
93 | admin_user: "{{ monitoring_user_login }}"
94 | admin_password: "{{ monitoring_user_password }}"
95 | grafana_http_port: 80
96 | roles:
97 | - role: tank.blockchain
98 |
99 | - name: Create cluster report
100 | hosts: localhost
101 | gather_facts: false
102 | tasks:
103 | - name: "Generate report"
104 | template:
105 | src: templates/ansible-report.json.j2
106 | dest: "{{ _cluster_ansible_report }}"
107 | run_once: true
108 |
--------------------------------------------------------------------------------
/tank/resources/ansible/templates/ansible-report.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | {% for i in groups["all"] %}
3 | "{{ i }}": {
4 | "hostname": "{{ hostvars[i].ansible_hostname }}",
5 | "bench_present": {{ 'true' if hostvars[i]['bench_present'] | default(False) else 'false' }}
6 | }
7 | {% if not loop.last %},{% endif %}
8 | {% endfor %}
9 | }
10 |
--------------------------------------------------------------------------------
/tank/resources/bindings.yml:
--------------------------------------------------------------------------------
1 | # Binding names are not predefined, you can introduce any bindings you feel suitable
2 | polkadot:
3 |
4 | # Ansible part of the binding
5 | ansible:
6 | # Git link to fetch the binding
7 | src: https://github.com/mixbytes/tank.ansible-polkadot
8 |
9 | # Optionally - branch/tag/commit to check out
10 | version: master
11 |
12 | haya:
13 | ansible:
14 | src: https://github.com/mixbytes/tank.ansible-haya
15 |
--------------------------------------------------------------------------------
/tank/resources/providers/digitalocean/backend.tf:
--------------------------------------------------------------------------------
1 | variable "state_path" {}
2 |
3 | terraform {
4 | backend "local" {}
5 | }
6 |
7 | data "terraform_remote_state" "state" {
8 | backend = "local"
9 | config {
10 | path = "${var.state_path}"
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/tank/resources/providers/digitalocean/main.tf:
--------------------------------------------------------------------------------
1 | {% raw %}
2 | # user-specific settings
3 | variable "token" {}
4 | variable "pvt_key" {}
5 | variable "ssh_fingerprint" {}
6 | variable "scripts_path" {}
7 |
8 | # test case-specific settings
9 | variable "blockchain_name" {}
10 |
11 | # run-specific settings
12 | variable "setup_id" {}
13 |
14 | provider "digitalocean" {
15 | version = "~> 1.1"
16 | token = "${var.token}"
17 | }
18 | {% endraw %}
19 |
20 |
21 | {% macro machine_type(type) -%}
22 | {% if type == 'micro' %}
23 | size = "512mb"
24 | {% elif type == 'small' %}
25 | size = "2gb"
26 | {% elif type == 'standard' %}
27 | size = "4gb"
28 | {% elif type == 'large' %}
29 | size = "8gb"
30 | {% elif type == 'xlarge' %}
31 | size = "16gb"
32 | {% elif type == 'xxlarge' %}
33 | size = "32gb"
34 | {% elif type == 'huge' %}
35 | size = "64gb"
36 | {% else %}
37 | unsupported instance type: {{ type }}
38 | {% endif %}
39 | {%- endmacro %}
40 |
41 |
42 | # Dynamic resources
43 | {% for name, instance_configs in instances.items() %}
44 | {% for cfg in instance_configs %}
45 |
46 | resource "digitalocean_droplet" "tank-{{ name }}-{{ loop.index }}" {
47 | image = "ubuntu-18-04-x64"
48 | name = "tank-${var.blockchain_name}-${var.setup_id}-{{ name }}-{{ loop.index }}-${count.index}"
49 | count = "{{ cfg.count }}"
50 | {{ machine_type(cfg.type) }}
51 | region = "{{ cfg.region }}"
52 |
53 | {% raw %}
54 | private_networking = true
55 | ssh_keys = [
56 | "${var.ssh_fingerprint}"
57 | ]
58 | connection {
59 | user = "root"
60 | type = "ssh"
61 | private_key = "${file(var.pvt_key)}"
62 | timeout = "10m"
63 | }
64 | provisioner "file" {
65 | source = "${var.scripts_path}/tank-packetloss"
66 | destination = "/usr/local/bin/tank-packetloss"
67 | }
68 | provisioner "remote-exec" {
69 | inline = [
70 | "chmod +x /usr/local/bin/tank-packetloss",
71 | {% endraw %}
72 | "/usr/local/bin/tank-packetloss add {{ cfg.packetloss }}",
73 | {% raw %}
74 | ]
75 | }
76 | }
77 | {% endraw %}
78 |
79 | {% endfor %}
80 | {% endfor %}
81 | # End of dynamic resources
82 |
83 |
84 | {% raw %}
85 | resource "digitalocean_droplet" "tank-monitoring" {
86 | image = "ubuntu-18-04-x64"
87 | name = "tank-${var.blockchain_name}-${var.setup_id}-monitoring"
88 | {% endraw %}
89 | {{ machine_type(monitoring_machine_type) }}
90 | {% raw %}
91 |
92 | region = "fra1"
93 | private_networking = true
94 | ssh_keys = [
95 | "${var.ssh_fingerprint}"
96 | ]
97 | connection {
98 | user = "root"
99 | type = "ssh"
100 | private_key = "${file(var.pvt_key)}"
101 | timeout = "10m"
102 | }
103 | provisioner "remote-exec" {
104 | inline = [
105 | "export PATH=$PATH:/usr/bin",
106 | ]
107 | }
108 | }
109 | {% endraw %}
110 |
111 |
112 | # Dynamic output
113 | {% for name, instance_configs in instances.items() %}
114 | {% for cfg in instance_configs %}
115 |
116 | output "{{ name }}-{{ loop.index }} node IP addresses" {
117 | value = "${digitalocean_droplet.tank-{{ name }}-{{ loop.index }}.*.ipv4_address}"
118 | }
119 |
120 | {% endfor %}
121 | {% endfor %}
122 | # End of dynamic output
123 |
124 |
125 | {% raw %}
126 | output "Monitoring instance IP address" {
127 | value = "${digitalocean_droplet.tank-monitoring.ipv4_address}"
128 | }
129 |
130 | output "Blockchain name" {
131 | value = "${var.blockchain_name}"
132 | }
133 |
134 | output "Setup ID" {
135 | value = "${var.setup_id}"
136 | }
137 | {% endraw %}
138 |
--------------------------------------------------------------------------------
/tank/resources/providers/gce/backend.tf:
--------------------------------------------------------------------------------
1 | {% raw %}
2 | variable "state_path" {}
3 |
4 | terraform {
5 | backend "local" {}
6 | }
7 |
8 | data "terraform_remote_state" "state" {
9 | backend = "local"
10 | config {
11 | path = "${var.state_path}"
12 | }
13 | }
14 | {% endraw %}
--------------------------------------------------------------------------------
/tank/resources/providers/gce/main.tf:
--------------------------------------------------------------------------------
1 | {% raw %}
2 | # user-specific settings
3 | variable "pub_key" {
4 | description = "Path to file containing public key"
5 | }
6 | variable "pvt_key" {
7 | description = "Path to file containing private key"
8 | }
9 | variable "cred_path" {}
10 | variable "project" {}
11 |
12 | # test case-specific settings
13 | variable "blockchain_name" {}
14 |
15 | variable "region_zone" {
16 | default = "europe-west4-a"
17 | }
18 |
19 | variable "region" {
20 | default = "europe-west4"
21 | }
22 |
23 | variable "scripts_path" {}
24 |
25 | # run-specific settings
26 | variable "setup_id" {}
27 |
28 |
29 | provider "google" {
30 | version = "~> 2.5"
31 | credentials = "${file("${var.cred_path}")}"
32 | project = "${var.project}"
33 | region = "${var.region}"
34 | }
35 |
36 |
37 | resource "google_compute_firewall" "default" {
38 | name = "firewall"
39 | network = "default"
40 |
41 | allow {
42 | protocol = "icmp"
43 | }
44 |
45 | allow {
46 | protocol = "tcp"
47 | }
48 |
49 | source_ranges = ["0.0.0.0/0"]
50 | source_tags = ["monitoring", "blockchain"]
51 | }
52 | {% endraw %}
53 |
54 |
55 | {% macro machine_type(type) -%}
56 | {% if type == 'micro' %}
57 | machine_type = "f1-micro"
58 | {% elif type == 'small' %}
59 | machine_type = "g1-small"
60 | {% elif type == 'standard' %}
61 | machine_type = "n1-standard-1"
62 | {% elif type == 'large' %}
63 | machine_type = "n1-standard-2"
64 | {% elif type == 'xlarge' %}
65 | machine_type = "n1-standard-4"
66 | {% elif type == 'xxlarge' %}
67 | machine_type = "n1-standard-8"
68 | {% elif type == 'huge' %}
69 | machine_type = "n1-standard-16"
70 | {% else %}
71 | unsupported instance type: {{ type }}
72 | {% endif %}
73 | {%- endmacro %}
74 |
75 |
76 | # Dynamic resources
77 | {% for name, instance_configs in instances.items() %}
78 | {% for cfg in instance_configs %}
79 |
80 | resource "google_compute_instance" "tank-{{ name }}-{{ loop.index }}" {
81 | name = "tank-${var.blockchain_name}-${var.setup_id}-{{ name }}-{{ loop.index }}-${count.index}"
82 | count = "{{ cfg.count }}"
83 | {{ machine_type(cfg.type) }}
84 | zone = "{{ cfg.region }}-a"
85 |
86 | {% raw %}
87 | tags = ["blockchain"]
88 |
89 | boot_disk {
90 | initialize_params {
91 | image = "ubuntu-os-cloud/ubuntu-minimal-1804-lts"
92 | }
93 | }
94 |
95 | network_interface {
96 | network = "default"
97 |
98 | access_config {
99 | // Ephemeral IP
100 | }
101 | }
102 |
103 | metadata {
104 | ssh-keys = "root:${file("${var.pub_key}")}"
105 | }
106 |
107 | connection {
108 | user = "root"
109 | type = "ssh"
110 | private_key = "${file(var.pvt_key)}"
111 | timeout = "10m"
112 | }
113 |
114 | provisioner "file" {
115 | source = "${var.scripts_path}/tank-packetloss"
116 | destination = "/usr/local/bin/tank-packetloss"
117 | }
118 | provisioner "remote-exec" {
119 | inline = [
120 | "chmod +x /usr/local/bin/tank-packetloss",
121 | {% endraw %}
122 | "/usr/local/bin/tank-packetloss add {{ cfg.packetloss }}",
123 | {% raw %}
124 | ]
125 | }
126 | }
127 | {% endraw %}
128 |
129 | {% endfor %}
130 | {% endfor %}
131 | # End of dynamic resources
132 |
133 |
134 | {% raw %}
135 | resource "google_compute_instance" "monitoring" {
136 | name = "tank-${var.blockchain_name}-${var.setup_id}-monitoring"
137 | {% endraw %}
138 | {{ machine_type(monitoring_machine_type) }}
139 | {% raw %}
140 |
141 | zone = "${var.region_zone}"
142 | tags = ["mtrg"]
143 |
144 | boot_disk {
145 | initialize_params {
146 | image = "ubuntu-os-cloud/ubuntu-minimal-1804-lts"
147 | }
148 | }
149 |
150 | network_interface {
151 | network = "default"
152 |
153 | access_config {
154 | nat_ip = ""
155 | //nat_ip = "${google_compute_instance.monitoring.network_interface.0.access_config.0.nat_ip}"
156 | }
157 | }
158 |
159 | metadata {
160 | ssh-keys = "root:${file("${var.pub_key}")}"
161 | }
162 |
163 | provisioner "remote-exec" {
164 | connection {
165 | user = "root"
166 | type = "ssh"
167 | private_key = "${file(var.pvt_key)}"
168 | timeout = "10m"
169 | }
170 | inline = [
171 | "export PATH=$PATH:/usr/bin",
172 | ]
173 | }
174 | }
175 | {% endraw %}
176 |
177 |
178 | # Dynamic output
179 | {% for name, instance_configs in instances.items() %}
180 | {% for cfg in instance_configs %}
181 |
182 | output "{{ name }}-{{ loop.index }} nodes IP addresses" {
183 | value = "${google_compute_instance.tank-{{ name }}-{{ loop.index }}.*.network_interface.0.access_config.0.nat_ip}"
184 | }
185 |
186 | {% endfor %}
187 | {% endfor %}
188 | # End of dynamic output
189 |
190 |
191 | {% raw %}
192 | output "Monitoring instance IP address" {
193 | value = "${google_compute_instance.monitoring.network_interface.0.access_config.0.nat_ip}"
194 | }
195 |
196 | output "Blockchain name" {
197 | value = "${var.blockchain_name}"
198 | }
199 |
200 | output "Setup ID" {
201 | value = "${var.setup_id}"
202 | }
203 | {% endraw %}
204 |
--------------------------------------------------------------------------------
/tank/resources/regions.yml:
--------------------------------------------------------------------------------
1 | digitalocean:
2 | Europe: FRA1
3 | Asia: SGP1
4 | NorthAmerica: NYC1
5 | default: FRA1
6 |
7 | gce:
8 | Europe: europe-west4
9 | Asia: asia-northeast1
10 | NorthAmerica: us-east4
11 | default: europe-west4
12 |
--------------------------------------------------------------------------------
/tank/resources/scripts/tank-packetloss:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | loss_percentage=$2
4 |
5 | function add(){
6 | iptables -A INPUT -m comment --comment "TANK: packetloss" \
7 | -i lo \
8 | -j ACCEPT
9 | iptables -A OUTPUT -m comment --comment "TANK: packetloss" \
10 | -o lo \
11 | -j ACCEPT
12 | iptables -A INPUT -m comment --comment "TANK: packetloss" \
13 | -p tcp -m multiport --dports 8080,9100,3000,9090 \
14 | -m state --state NEW,ESTABLISHED \
15 | -j ACCEPT
16 | iptables -A OUTPUT -m comment --comment "TANK: packetloss" \
17 | -p tcp -m multiport --sports 8080,9100,3000,9090 \
18 | -m state --state ESTABLISHED \
19 | -j ACCEPT
20 | iptables -A INPUT -m comment --comment "TANK: packetloss" \
21 | -m multiport -p tcp --sports 1:1024 \
22 | -j ACCEPT
23 | iptables -A OUTPUT -m comment --comment "TANK: packetloss" \
24 | -m multiport -p tcp --sports 1:1024 \
25 | -j ACCEPT
26 | iptables -A INPUT -m comment --comment "TANK: packetloss" \
27 | -m multiport -p tcp --dports 1025:65535 \
28 | -m statistic --mode random --probability ${loss_percentage} \
29 | -j DROP
30 | iptables -A OUTPUT -m comment --comment "TANK: packetloss" \
31 | -m multiport -p tcp --dports 1025:65535 \
32 | -m statistic --mode random --probability ${loss_percentage} \
33 | -j DROP
34 | }
35 |
36 | function delete(){
37 | _current_packetloss_rules_input=$(iptables -L INPUT --line-numbers | grep "TANK: packetloss"|awk '{print $1}')
38 | _current_packetloss_rules_output=$(iptables -L OUTPUT --line-numbers | grep "TANK: packetloss"|awk '{print $1}')
39 | while read -r input_rule_num;
40 | do
41 | iptables -D INPUT ${input_rule_num}
42 | done <<< $_current_packetloss_rules_input
43 | while read -r output_rule_num;
44 | do
45 | iptables -D OUTPUT ${output_rule_num}
46 | done <<< $_current_packetloss_rules_output
47 | }
48 |
49 | function main(){
50 | _sub_cmd=$1
51 | _sub_params=$2
52 | case ${_sub_cmd} in
53 | add)
54 | add $2
55 | ;;
56 | delete)
57 | delete
58 | ;;
59 | *)
60 | echo "subcommand not found"
61 | esac
62 | }
63 |
64 | main ${@}
--------------------------------------------------------------------------------
/tank/resources/testcase_schema.yml:
--------------------------------------------------------------------------------
1 | type: object
2 | additionalProperties: False
3 | required:
4 | - binding
5 | - instances
6 |
7 | definitions:
8 | count:
9 | type: integer
10 | minimum: 1 # count must by greater than 0
11 |
12 | packetloss:
13 | type: integer
14 | minimum: 0
15 | maximum: 99
16 |
17 | type:
18 | type: string
19 | enum: ["micro", "small", "standard", "large", "xlarge", "xxlarge", "huge"]
20 |
21 | regions:
22 | type: object
23 |
24 | properties:
25 | Europe: {$ref: "#/definitions/minimal-configuration"}
26 | Asia: {$ref: "#/definitions/minimal-configuration"}
27 | NorthAmerica: {$ref: "#/definitions/minimal-configuration"}
28 | random: {$ref: "#/definitions/minimal-configuration"}
29 |
30 | additionalProperties: False
31 |
32 | params:
33 | type: object
34 |
35 | properties:
36 | count: {$ref: "#/definitions/count"}
37 | type: {$ref: "#/definitions/type"}
38 | packetloss: {$ref: "#/definitions/packetloss"}
39 |
40 | required:
41 | - count
42 |
43 | additionalProperties: False
44 |
45 | minimal-configuration:
46 | oneOf:
47 | - $ref: "#/definitions/count"
48 | - $ref: "#/definitions/params"
49 |
50 | with-regions-configuration:
51 | type: object
52 |
53 | properties:
54 | count: {$ref: "#/definitions/count"}
55 | regions: {$ref: "#/definitions/regions"}
56 | type: {$ref: "#/definitions/type"}
57 | packetloss: {$ref: "#/definitions/packetloss"}
58 |
59 | required:
60 | - regions
61 |
62 | additionalProperties: False
63 |
64 | properties:
65 | binding:
66 | type: string
67 |
68 | instances:
69 | type: object
70 |
71 | properties:
72 | type: {$ref: "#/definitions/type"}
73 | packetloss: {$ref: "#/definitions/packetloss"}
74 |
75 | propertyNames:
76 | pattern: "^[A-Za-z_]{1,28}$"
77 |
78 | additionalProperties:
79 | oneOf:
80 | - $ref: "#/definitions/count"
81 | - $ref: "#/definitions/params"
82 | - $ref: "#/definitions/with-regions-configuration"
83 |
84 | ansible:
85 | type: object
86 | additionalProperties:
87 | oneOf:
88 | - type: integer
89 | - type: string
90 |
--------------------------------------------------------------------------------
/tank/terraform_installer.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import stat
4 | import sys
5 | import zipfile
6 | from urllib.request import urlopen
7 |
8 | import sh
9 |
10 |
11 | _logger = logging.getLogger(__name__)
12 |
13 |
14 | class BaseInstaller(object):
15 | """Base installer.
16 |
17 | 1. Download ZIP archive.
18 | 2. Unzip it.
19 | 3. Remove ZIP file.
20 | 4. Move file to storage_path directory.
21 | 5. Make file executable
22 | 6. Add variable to $PATH
23 | """
24 |
25 | version: str
26 | url: str
27 | archive_name: str
28 | file_name: str # name of file in archive
29 |
30 | def __init__(self, storage_path: str):
31 | """Build archive full path."""
32 | self._storage_path = storage_path
33 | if not os.path.exists(storage_path):
34 | os.makedirs(storage_path)
35 |
36 | self._archive_full_path = os.path.join(storage_path, self.archive_name)
37 | self._file_full_path = os.path.join(storage_path, self.file_name)
38 |
39 | def _is_installed(self) -> bool:
40 | """Returns True if tool is installed else False."""
41 | try:
42 | sh.Command(self._file_full_path)
43 | return True
44 | except sh.CommandNotFound:
45 | return False
46 |
47 | def _download_archive(self):
48 | """Download archive from provided url."""
49 | _logger.debug('Downloading archive...')
50 | response = urlopen(self.url)
51 |
52 | with open(self._archive_full_path, 'wb') as archive_file:
53 | chunk_size = 1024 * 1024 # 1 MB
54 | chunk = response.read(chunk_size)
55 |
56 | while chunk:
57 | archive_file.write(chunk)
58 | chunk = response.read(chunk_size)
59 |
60 | _logger.debug('Archive {name} has been successfully downloaded.'.format(name=self.archive_name))
61 |
62 | def _unpack_archive(self):
63 | """Unpack archive with provided name."""
64 | with zipfile.ZipFile(self._archive_full_path, 'r') as zip_ref:
65 | zip_ref.extractall(self._storage_path)
66 |
67 | _logger.debug('Archive has been unpacked.')
68 |
69 | def _remove_archive(self):
70 | """Remove archive after unpacking."""
71 | os.remove(self._archive_full_path)
72 | _logger.debug('Archive has been removed.')
73 |
74 | def _make_executable(self):
75 | """Makes file executable."""
76 | file_stat = os.stat(self._file_full_path)
77 | os.chmod(self._file_full_path, file_stat.st_mode | stat.S_IEXEC)
78 |
79 | def _add_variables(self):
80 | """Add variables to $PATH."""
81 | path_variable = os.environ.get('PATH', '')
82 | paths = path_variable.split(os.pathsep)
83 |
84 | if self._storage_path not in paths:
85 | if path_variable:
86 | os.environ['PATH'] = os.pathsep.join([self._storage_path, path_variable])
87 | else:
88 | os.environ['PATH'] = self._storage_path
89 |
90 | _logger.debug('Variable has been added to $PATH.')
91 |
92 | def install(self):
93 | """Installation logic is here."""
94 | if not self._is_installed():
95 | _logger.debug('Installing {name}...'.format(name=self.file_name))
96 | self._download_archive()
97 | self._unpack_archive()
98 | self._remove_archive()
99 | self._make_executable()
100 | else:
101 | _logger.debug('{name} is already installed.'.format(name=self.file_name))
102 |
103 | self._add_variables()
104 |
105 |
106 | class TerraformInstaller(BaseInstaller):
107 | """Terraform installer."""
108 |
109 | version = '0.11.13'
110 | file_name = 'terraform'
111 | archive_name = 'terraform_{v}_{platform}_amd64.zip'.format(v=version, platform=sys.platform.lower())
112 | url = 'https://releases.hashicorp.com/terraform/{v}/{filename}'.format(v=version, filename=archive_name)
113 |
114 |
115 | class TerraformInventoryInstaller(BaseInstaller):
116 | """Terraform inventory installer."""
117 |
118 | version = 'v0.8'
119 | file_name = 'terraform-inventory'
120 | archive_name = 'terraform-inventory_{v}_{platform}_amd64.zip'.format(v=version, platform=sys.platform.lower())
121 | url = (
122 | 'https://github.com/adammck/terraform-inventory/releases/download/{v}/{filename}'
123 | ).format(v=version, filename=archive_name)
124 |
125 |
126 | if __name__ == '__main__':
127 | default_directory = os.path.join(os.path.expanduser('~'), '.tank', 'bin')
128 | TerraformInstaller(default_directory).install()
129 | TerraformInventoryInstaller(default_directory).install()
130 |
--------------------------------------------------------------------------------
/tank/version.py:
--------------------------------------------------------------------------------
1 |
2 | # Copyright (c) Django Software Foundation and individual contributors.
3 | # All rights reserved.
4 | #
5 | # Redistribution and use in source and binary forms, with or without
6 | # modification, are permitted provided that the following conditions are met:
7 | #
8 | # 1. Redistributions of source code must retain the above copyright notice,
9 | # this list of conditions and the following disclaimer.
10 | #
11 | # 2. Redistributions in binary form must reproduce the above copyright
12 | # notice, this list of conditions and the following disclaimer in the
13 | # documentation and/or other materials provided with the distribution.
14 | #
15 | # 3. Neither the name of Django nor the names of its contributors may be
16 | # used to endorse or promote products derived from this software without
17 | # specific prior written permission.
18 | #
19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 | # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 | # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 | # POSSIBILITY OF SUCH DAMAGE.
30 | #
31 |
32 | # The following code was copied from the Django project, and only lightly
33 | # modified. Please adhere to the above copyright and license for the code
34 | # in this file.
35 |
36 | # Note: Nothing is covered here because this file is imported before nose and
37 | # coverage take over.. and so its a false positive that nothing is covered.
38 |
39 | import datetime # pragma: nocover
40 | import os # pragma: nocover
41 | import subprocess # pragma: nocover
42 |
43 |
44 | VERSION = (1, 0, 2, 'final', 0)
45 |
46 |
47 | def get_version(): # pragma: nocover
48 | "Returns a PEP 386-compliant version number from VERSION."
49 |
50 | version = VERSION
51 |
52 | assert len(version) == 5
53 | assert version[3] in ('alpha', 'beta', 'rc', 'final')
54 |
55 | # Now build the two parts of the version number:
56 | # main = X.Y[.Z]
57 | # sub = .devN - for pre-alpha releases
58 | # | {a|b|c}N - for alpha, beta and rc releases
59 |
60 | # We want to explicitly include all three version/release numbers
61 | # parts = 2 if version[2] == 0 else 3
62 | parts = 3
63 | main = '.'.join(str(x) for x in version[:parts])
64 |
65 | sub = ''
66 | if version[3] == 'alpha' and version[4] == 0:
67 | git_changeset = _get_git_changeset()
68 | if git_changeset:
69 | sub = '.dev%s' % git_changeset
70 |
71 | elif version[3] != 'final':
72 | mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
73 | sub = mapping[version[3]] + str(version[4])
74 |
75 | return main + sub
76 |
77 |
78 | def _get_git_changeset(): # pragma: nocover
79 | """Returns a numeric identifier of the latest git changeset.
80 |
81 | The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
82 | This value isn't guaranteed to be unique, but collisions are very
83 | unlikely, so it's sufficient for generating the development version
84 | numbers.
85 | """
86 | repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
87 | git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
88 | stdout=subprocess.PIPE, stderr=subprocess.PIPE,
89 | shell=True, cwd=repo_dir,
90 | universal_newlines=True)
91 | timestamp = git_log.communicate()[0]
92 | try:
93 | timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
94 | except ValueError: # pragma: nocover
95 | return None # pragma: nocover
96 | return timestamp.strftime('%Y%m%d%H%M%S')
97 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """
2 | PyTest Fixtures.
3 | """
4 |
5 | import pytest
6 | from cement import fs
7 |
8 | @pytest.fixture(scope="function")
9 | def tmp(request):
10 | """
11 | Create a `tmp` object that geneates a unique temporary directory, and file
12 | for each test function that requires it.
13 | """
14 | t = fs.Tmp()
15 | yield t
16 | t.remove()
17 |
--------------------------------------------------------------------------------
/tests/test_main.py:
--------------------------------------------------------------------------------
1 |
2 | from tank.main import MixbytesTankTest
3 |
4 | def test_tank(tmp):
5 | with MixbytesTankTest() as app:
6 | res = app.run()
7 | print(res)
8 | raise Exception
9 |
10 | def test_cluster(tmp):
11 | argv = ['cluster']
12 | with MixbytesTankTest(argv=argv) as app:
13 | app.run()
14 |
--------------------------------------------------------------------------------
/tests/test_tank.py:
--------------------------------------------------------------------------------
1 |
2 | from pytest import raises
3 | from tank.main import MixbytesTankTest
4 |
5 | def test_tank():
6 | # test tank without any subcommands or arguments
7 | with MixbytesTankTest() as app:
8 | app.run()
9 | assert app.exit_code == 0
10 |
11 |
12 | def test_tank_debug():
13 | # test that debug mode is functional
14 | argv = ['--debug']
15 | with MixbytesTankTest(argv=argv) as app:
16 | app.run()
17 | assert app.debug is True
18 |
19 |
20 | def test_cluster():
21 | # test command1 without arguments
22 | argv = ['cluster']
23 | with MixbytesTankTest(argv=argv) as app:
24 | app.run()
25 | data,output = app.last_rendered
26 | # assert data['foo'] == 'bar'
27 | assert output.find('Manipulating of cluster')
28 |
29 |
30 | # test command1 with arguments
31 | argv = ['cluster', '--debug']
32 | with MixbytesTankTest(argv=argv) as app:
33 | app.run()
34 | data,output = app.last_rendered
35 | # assert data['foo'] == 'not-bar'
36 | assert output.find('Foo => not-bar')
37 |
--------------------------------------------------------------------------------
/tests/test_testcase.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import tempfile
3 |
4 | import pytest
5 |
6 | from tank.core.exc import TankTestCaseError
7 | from tank.core import testcase as tc
8 | from tank.core.regions import RegionsConfig
9 | from tank.core.utils import yaml_dump
10 | from tank.main import MixbytesTank
11 |
12 |
13 | content = {
14 | 'binding': 'polkadot',
15 | 'instances': {
16 | 'type': 'standard',
17 | 'packetloss': 10,
18 |
19 | 'boot': {
20 | 'count': 1,
21 | 'type': 'large',
22 | 'packetloss': 5,
23 | },
24 |
25 | 'producer': 1,
26 |
27 | 'test': {
28 | 'count': 1,
29 | },
30 |
31 | 'name': {
32 | 'type': 'small',
33 | 'packetloss': 3,
34 | 'count': 6,
35 | 'regions': {
36 | 'Europe': {
37 | 'count': 1,
38 | 'type': 'large',
39 | 'packetloss': 15,
40 | },
41 | 'Asia': {
42 | 'count': 1,
43 | 'type': 'standard',
44 | },
45 | 'NorthAmerica': 1,
46 | 'random': {
47 | 'type': 'standard',
48 | 'count': 3,
49 | },
50 | }
51 | }
52 | },
53 | 'ansible': {
54 | 'forks': 50,
55 | },
56 | }
57 |
58 |
59 | class TestTestcaseValidation:
60 | """Tests class for TestCaseValidator class."""
61 |
62 | def _test(self, raises = None):
63 | if raises:
64 | with pytest.raises(raises):
65 | tc.TestCaseValidator(self._content, 'filename').validate()
66 | else:
67 | tc.TestCaseValidator(self._content, 'filename').validate()
68 |
69 | def setup(self):
70 | self._content = copy.deepcopy(content)
71 |
72 | def test_invalid_keys(self):
73 | self._content['invalid'] = 0
74 | self._test(raises=TankTestCaseError)
75 |
76 | def test_required_keys(self):
77 | self._content.pop('instances')
78 | self._test(raises=TankTestCaseError)
79 |
80 | def test_reserved_names_check(self):
81 | self._content['instances']['monitoring'] = 1
82 | self._test(raises=TankTestCaseError)
83 |
84 | def test_counts_equality_check(self):
85 | self._test() # test on valid
86 |
87 | self._content['instances']['name']['count'] = 1
88 | self._test(raises=TankTestCaseError)
89 |
90 | def test_invalid_configuration(self):
91 | self._content['instances']['new'] = 'string'
92 | self._test(raises=TankTestCaseError)
93 |
94 | def test_invalid_count(self):
95 | self._content['instances']['new'] = -100
96 | self._test(raises=TankTestCaseError)
97 |
98 | def test_invalid_packetloss(self):
99 | self._content['instances']['name']['packetloss'] = 120
100 | self._test(raises=TankTestCaseError)
101 |
102 | def test_invalid_type(self):
103 | self._content['instances']['type'] = 'a'
104 | self._test(raises=TankTestCaseError)
105 |
106 | def test_invalid_region(self):
107 | self._content['instances']['name']['regions']['a'] = 1
108 | self._test(raises=TankTestCaseError)
109 |
110 | def test_valid_testcase(self):
111 | self._test()
112 |
113 |
114 | class TestInstancesCanonizer:
115 | """Tests class for InstancesCanonizer class."""
116 |
117 | def _test(self, content: dict, expected: dict):
118 | canonized = tc.InstancesCanonizer(content).canonize()
119 | assert canonized == expected
120 |
121 | def test_integer_config_canonization(self):
122 | content = {
123 | 'role': 10,
124 | }
125 |
126 | expected = {
127 | 'role': {
128 | 'default': {
129 | 'type': tc.InstancesCanonizer._GENERAL_OPTIONS['type'],
130 | 'packetloss': tc.InstancesCanonizer._GENERAL_OPTIONS['packetloss'] / 100,
131 | 'count': content['role'],
132 | },
133 | },
134 | }
135 |
136 | self._test(content, expected)
137 |
138 | def test_minimal_object_config_canonization(self):
139 | content = {
140 | 'role': {
141 | 'count': 10,
142 | }
143 | }
144 |
145 | expected = {
146 | 'role': {
147 | 'default': {
148 | 'type': tc.InstancesCanonizer._GENERAL_OPTIONS['type'],
149 | 'packetloss': tc.InstancesCanonizer._GENERAL_OPTIONS['packetloss'] / 100,
150 | 'count': content['role']['count'],
151 | },
152 | },
153 | }
154 |
155 | self._test(content, expected)
156 |
157 | def test_generally_applicable_options_applying(self):
158 | content = {
159 | 'type': 'large',
160 | 'packetloss': 50,
161 |
162 | 'role': 10,
163 | }
164 |
165 | expected = {
166 | 'role': {
167 | 'default': {
168 | 'type': content['type'],
169 | 'packetloss': content['packetloss'] / 100,
170 | 'count': content['role'],
171 | },
172 | },
173 | }
174 |
175 | self._test(content, expected)
176 |
177 | def test_full_object_config_canonization(self):
178 | content = {
179 | 'role': {
180 | 'type': 'small',
181 | 'packetloss': 3,
182 | 'count': 2,
183 |
184 | 'regions': {
185 | 'Europe': {
186 | 'count': 1,
187 | 'type': 'large',
188 | 'packetloss': 15,
189 | },
190 | 'Asia': 1,
191 | },
192 | },
193 | }
194 |
195 | expected = {
196 | 'role': {
197 | 'Europe': {
198 | 'count': content['role']['regions']['Europe']['count'],
199 | 'type': content['role']['regions']['Europe']['type'],
200 | 'packetloss': content['role']['regions']['Europe']['packetloss'] / 100,
201 | },
202 | 'Asia': {
203 | 'count': content['role']['regions']['Asia'],
204 | 'type': content['role']['type'],
205 | 'packetloss': content['role']['packetloss'] / 100,
206 | },
207 | },
208 | }
209 |
210 | self._test(content, expected)
211 |
212 |
213 | class TestRegionsConverter:
214 | """Tests class for RegionsConverter class."""
215 |
216 | def _find_configuration(self, target: dict, container: list) -> bool:
217 | for configuration in container:
218 | if all(configuration[param] == target[param] for param in tc.RegionsConverter._GROUP_PARAMETERS):
219 | return True
220 |
221 | return False
222 |
223 | def _test(self, content: dict, expected: dict):
224 | converted = self._converter.convert(content)
225 | assert len(converted) == len(expected)
226 |
227 | for role, configurations in converted.items():
228 | assert role in expected.keys()
229 | for cfg in configurations:
230 | assert self._find_configuration(cfg, expected[role])
231 |
232 | def setup_class(self):
233 | with MixbytesTank() as app:
234 | self._regions_config = RegionsConfig(app).config
235 | self._provider = app.provider
236 | self._converter = tc.RegionsConverter(app)
237 |
238 | def test_convert_regions(self):
239 | content = {
240 | 'role': {
241 | 'Europe': {
242 | 'count': 1,
243 | 'type': 'small',
244 | 'packetloss': 0,
245 | },
246 | 'Asia': {
247 | 'count': 1,
248 | 'type': 'small',
249 | 'packetloss': 0,
250 | },
251 | }
252 | }
253 |
254 | expected = {
255 | 'role': [
256 | {
257 | 'region': self._regions_config[self._provider]['Europe'],
258 | **content['role']['Europe'],
259 | },
260 | {
261 | 'region': self._regions_config[self._provider]['Asia'],
262 | **content['role']['Asia'],
263 | },
264 | ]
265 | }
266 |
267 | self._test(content, expected)
268 |
269 | def test_merging_configurations(self):
270 | content = {
271 | 'role': {
272 | 'random': {
273 | 'count': 3,
274 | 'type': 'small',
275 | 'packetloss': 0,
276 | },
277 | 'Asia': {
278 | 'count': 1,
279 | 'type': 'small',
280 | 'packetloss': 0,
281 | },
282 | }
283 | }
284 |
285 | expected = {
286 | 'role': [
287 | {
288 | 'region': self._regions_config[self._provider]['NorthAmerica'],
289 | 'count': 1,
290 | 'type': content['role']['random']['type'],
291 | 'packetloss': content['role']['random']['packetloss'],
292 | },
293 | {
294 | 'region': self._regions_config[self._provider]['Asia'],
295 | 'count': 2,
296 | 'type': content['role']['random']['type'],
297 | 'packetloss': content['role']['random']['packetloss'],
298 | },
299 | {
300 | 'region': self._regions_config[self._provider]['Europe'],
301 | 'count': 1,
302 | 'type': content['role']['random']['type'],
303 | 'packetloss': content['role']['random']['packetloss'],
304 | },
305 | ]
306 | }
307 |
308 | self._test(content, expected)
309 |
310 |
311 | class TestTestcaseClass:
312 | """Tests class for TestCase class."""
313 |
314 | def setup_class(self):
315 | testcase_file = tempfile.NamedTemporaryFile()
316 | yaml_dump(testcase_file.name, data=content)
317 |
318 | with MixbytesTank() as app:
319 | self._testcase = tc.TestCase(testcase_file.name, app) # test for initialization is here
320 |
321 | def test_total_instances(self):
322 | assert self._testcase.total_instances == 9
323 |
324 | def test_binding(self):
325 | assert self._testcase.binding == content['binding']
326 |
--------------------------------------------------------------------------------
/web3_foundation_grants_badge_black.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mixbytes/tank/b5595df8c97a101e074696a93898ccbfa210261d/web3_foundation_grants_badge_black.png
--------------------------------------------------------------------------------