├── yandextank
├── __init__.py
├── common
│ ├── __init__.py
│ ├── tests
│ │ ├── __init__.py
│ │ ├── test_monitoring.py
│ │ └── test_interfaces.py
│ ├── const.py
│ └── exceptions.py
├── contrib
│ ├── __init__.py
│ └── netort
│ │ ├── __init__.py
│ │ ├── netort
│ │ ├── __init__.py
│ │ ├── data_manager
│ │ │ ├── common
│ │ │ │ ├── __init__.py
│ │ │ │ ├── tests
│ │ │ │ │ └── test_util.py
│ │ │ │ ├── condition.py
│ │ │ │ └── util.py
│ │ │ ├── metrics
│ │ │ │ ├── __init__.py
│ │ │ │ ├── tests
│ │ │ │ │ ├── metric_data_output_histogram_1.csv
│ │ │ │ │ ├── metric_data_output_distributions_2.csv
│ │ │ │ │ ├── metric_data_input_event_1.csv
│ │ │ │ │ ├── metric_data_input_metric_2.csv
│ │ │ │ │ ├── metric_data_output_quantile_2.csv
│ │ │ │ │ └── test_aggregate.py
│ │ │ │ ├── event.py
│ │ │ │ └── metric.py
│ │ │ ├── clients
│ │ │ │ ├── __init__.py
│ │ │ │ └── tests
│ │ │ │ │ └── test_luna.py
│ │ │ ├── __init__.py
│ │ │ └── tests
│ │ │ │ └── test_router.py
│ │ ├── process.py
│ │ └── data_processing.py
│ │ └── tests
│ │ ├── conftest.py
│ │ └── test_openers.py
├── plugins
│ ├── __init__.py
│ ├── Console
│ │ ├── __init__.py
│ │ ├── config
│ │ │ └── schema.yaml
│ │ └── tests
│ │ │ └── test_spark.py
│ ├── JMeter
│ │ ├── __init__.py
│ │ └── config
│ │ │ ├── jmeter_var_template.xml
│ │ │ ├── schema.yaml
│ │ │ └── jmeter_writer.xml
│ ├── Phantom
│ │ ├── __init__.py
│ │ ├── tests
│ │ │ ├── expected_df.dat
│ │ │ ├── test_log_analyzer.py
│ │ │ └── test_reader.py
│ │ ├── _schema.yaml
│ │ ├── config
│ │ │ ├── phantom_benchmark_additional.tpl
│ │ │ ├── phantom.conf.tpl
│ │ │ └── phantom_benchmark_main.tpl
│ │ └── log_analyzer.py
│ ├── Platform
│ │ └── __init__.py
│ ├── RCAssert
│ │ ├── __init__.py
│ │ ├── config
│ │ │ └── schema.yaml
│ │ └── plugin.py
│ ├── Telegraf
│ │ ├── __init__.py
│ │ ├── tests
│ │ │ ├── target_hint_no_hosts.yaml
│ │ │ ├── target_hint.yaml
│ │ │ ├── target_hint.xml
│ │ │ ├── old_mon.xml
│ │ │ ├── telegraf_mon.xml
│ │ │ ├── telegraf_mon.yaml
│ │ │ ├── telegraf_global_inputs.yaml
│ │ │ ├── test_plugin.py
│ │ │ └── test_config_parser.py
│ │ ├── agent
│ │ │ └── __init__.py
│ │ ├── config
│ │ │ ├── monitoring_default_config.xml
│ │ │ └── schema.yaml
│ │ ├── decoder.py
│ │ └── config_parser.py
│ ├── Bfg
│ │ ├── __init__.py
│ │ ├── example
│ │ │ ├── ultimate_gun.py
│ │ │ └── scenario_gun.py
│ │ ├── widgets.py
│ │ └── reader.py
│ ├── Pandora
│ │ ├── __init__.py
│ │ ├── config
│ │ │ ├── pandora_pool_default.json
│ │ │ └── schema.yaml
│ │ ├── reader.py
│ │ └── tests
│ │ │ └── test_pandora_plugin.py
│ ├── ResourceCheck
│ │ ├── __init__.py
│ │ ├── config
│ │ │ └── schema.yaml
│ │ └── plugin.py
│ ├── ShellExec
│ │ ├── __init__.py
│ │ ├── tests
│ │ │ └── test_shellexec_plugin.py
│ │ ├── config
│ │ │ └── schema.yaml
│ │ └── plugin.py
│ ├── DataUploader
│ │ ├── __init__.py
│ │ ├── config
│ │ │ └── postloader_schema.yaml
│ │ └── tests
│ │ │ ├── test_uploader_plugin.py
│ │ │ ├── test_postloader
│ │ │ ├── test_empty
│ │ │ │ └── validated_conf.yaml
│ │ │ ├── test_disabled
│ │ │ │ └── validated_conf.yaml
│ │ │ └── test_full
│ │ │ │ └── validated_conf.yaml
│ │ │ └── test_postloader.py
│ ├── JsonReport
│ │ ├── __init__.py
│ │ ├── config
│ │ │ └── schema.yaml
│ │ └── plugin.py
│ ├── OfflineReport
│ │ ├── __init__.py
│ │ └── config
│ │ │ └── schema.yaml
│ ├── ShootExec
│ │ ├── __init__.py
│ │ ├── config
│ │ │ └── schema.yaml
│ │ └── tests
│ │ │ └── test_reader.py
│ ├── YCMonitoring
│ │ ├── __init__.py
│ │ ├── tests
│ │ │ ├── COUNTER.json
│ │ │ ├── RATE.json
│ │ │ ├── IGAUGE.json
│ │ │ ├── DGAUGE.json
│ │ │ └── test_sensor.py
│ │ ├── config
│ │ │ └── schema.yaml
│ │ └── plugin.py
│ ├── OpenTSDBUploader
│ │ ├── __init__.py
│ │ ├── client
│ │ │ └── __init__.py
│ │ ├── config
│ │ │ └── schema.yaml
│ │ ├── tests
│ │ │ └── test_opentsdb_decoder.py
│ │ └── plugin.py
│ ├── InfluxUploader
│ │ ├── __init__.py
│ │ ├── config
│ │ │ └── schema.yaml
│ │ ├── tests
│ │ │ └── test_influxdb_decoder.py
│ │ └── plugin.py
│ └── Autostop
│ │ ├── __init__.py
│ │ └── config
│ │ └── schema.yaml
├── validator
│ ├── __init__.py
│ └── tests
│ │ └── test_schema.yaml
├── config_converter
│ └── __init__.py
├── version.py
├── stepper
│ ├── tests
│ │ ├── test-caseline.txt
│ │ ├── protobuf-expected.stpd
│ │ ├── test-protobuf-autocases.txt
│ │ ├── loop1.stpd
│ │ ├── test-unicode.txt
│ │ ├── instances1.stpd
│ │ ├── test-uripost.txt
│ │ ├── test-ammo.txt
│ │ └── caseline-expected.stpd
│ ├── __init__.py
│ ├── module_exceptions.py
│ ├── util.py
│ ├── format.py
│ └── mark.py
├── core
│ ├── tests
│ │ ├── phantom_mock.sh
│ │ ├── test_monitoring.xml
│ │ ├── test_multi_cfg.yaml
│ │ └── test_lock.py
│ ├── __init__.py
│ ├── config
│ │ ├── plugins_schema.yaml
│ │ ├── 00-base.ini
│ │ ├── 00-base.yaml
│ │ └── schema.yaml
│ └── expvar.py
├── ammo_validator
│ ├── validators
│ │ ├── __init__.py
│ │ ├── uri_inline.py
│ │ └── uri.py
│ ├── __init__.py
│ └── tests
│ │ ├── test-uri-2
│ │ ├── test-uri-bad
│ │ ├── test-phantom-binary
│ │ ├── test-uri
│ │ ├── test-uripost-bad-2
│ │ ├── test-uripost-bad-1
│ │ ├── test-uripost
│ │ ├── test-json-grpc
│ │ ├── test-phantom
│ │ ├── conftest.py
│ │ ├── test-json-http
│ │ └── test-pandora-inline.yaml
├── aggregator
│ ├── __init__.py
│ ├── tests
│ │ ├── phout2927
│ │ ├── test_test.py
│ │ ├── conftest.py
│ │ ├── test_chopper.py
│ │ ├── test_pipeline.py
│ │ └── test_aggregator.py
│ ├── config
│ │ └── phout.json
│ └── chopper.py
└── api
│ └── config
│ ├── 00-base.ini
│ └── 00-base.yaml
├── MANIFEST.in
├── setup.cfg
├── phantom_mock.sh
├── mocks
├── README.md
└── shootexec-shooter.py
├── .piglet-meta.json
├── docs
├── README.md
├── pic
│ ├── tank-bfg.png
│ ├── tank-lifecycle.png
│ ├── tank-stepper.png
│ ├── overload-screen.png
│ ├── tank-architecture.png
│ └── monitoring_backward_compatibility_grapf.png
├── YandexTankConfigMemo.odt
├── requirements.txt
├── docs_gen.sh
├── example_cfgs
│ └── tutorial_load1.yaml
├── index.rst
└── intro.rst
├── logos
├── screen.png
├── tank16.jpeg
├── tank192.jpg
├── tank200.jpg
└── tank64.jpg
├── pytest.ini
├── stdeb.cfg
├── .style.yapf
├── .travis.yml
├── docker
├── files
│ ├── inputrc
│ └── bashrc
├── Dockerfile.mobile
├── Dockerfile.jmeter
└── Dockerfile
├── Dockerfile-test
├── .flake8
├── contrib
└── gatling
│ └── gatling.conf
├── LICENSE.md
├── data
├── yandex-tank.completion
├── PKGBUILD
└── yandex-tank.spec
├── .readthedocs.yaml
├── AUTHORS.md
└── README.md
/yandextank/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/yandextank/common/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/yandextank/contrib/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/yandextank/plugins/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/yandextank/validator/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | recursive-exclude tests *
--------------------------------------------------------------------------------
/yandextank/common/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/yandextank/contrib/netort/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [aliases]
2 | test=pytest
3 |
--------------------------------------------------------------------------------
/yandextank/config_converter/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/yandextank/version.py:
--------------------------------------------------------------------------------
1 | VERSION = '2.0.14'
2 |
--------------------------------------------------------------------------------
/phantom_mock.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | echo "foo"
--------------------------------------------------------------------------------
/mocks/README.md:
--------------------------------------------------------------------------------
1 | Mocks to test & develop various tank components
2 |
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/common/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/yandextank/stepper/tests/test-caseline.txt:
--------------------------------------------------------------------------------
1 | test1 test1
2 | test2
3 |
--------------------------------------------------------------------------------
/yandextank/core/tests/phantom_mock.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | echo "Foo"
--------------------------------------------------------------------------------
/yandextank/plugins/Console/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import * # noqa
2 |
--------------------------------------------------------------------------------
/yandextank/plugins/JMeter/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import * # noqa
2 |
--------------------------------------------------------------------------------
/yandextank/plugins/Phantom/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import * # noqa
2 |
--------------------------------------------------------------------------------
/yandextank/plugins/Platform/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import * # noqa
2 |
--------------------------------------------------------------------------------
/yandextank/plugins/RCAssert/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import * # noqa
2 |
--------------------------------------------------------------------------------
/yandextank/plugins/Telegraf/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import * # noqa
2 |
--------------------------------------------------------------------------------
/.piglet-meta.json:
--------------------------------------------------------------------------------
1 | {
2 | "project":"tank",
3 | "repository":"arcadia"
4 | }
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | 
--------------------------------------------------------------------------------
/yandextank/ammo_validator/validators/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
--------------------------------------------------------------------------------
/yandextank/plugins/Bfg/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import Plugin # noqa:F401
2 |
--------------------------------------------------------------------------------
/yandextank/plugins/Pandora/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import Plugin # noqa
2 |
--------------------------------------------------------------------------------
/yandextank/plugins/ResourceCheck/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import * # noqa
2 |
--------------------------------------------------------------------------------
/yandextank/plugins/ShellExec/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import * # noqa
2 |
--------------------------------------------------------------------------------
/yandextank/plugins/DataUploader/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import Plugin # noqa
2 |
--------------------------------------------------------------------------------
/yandextank/plugins/JsonReport/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import Plugin # noqa
2 |
--------------------------------------------------------------------------------
/yandextank/plugins/OfflineReport/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import Plugin # noqa
2 |
--------------------------------------------------------------------------------
/yandextank/plugins/ShootExec/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import Plugin # noqa
2 |
--------------------------------------------------------------------------------
/yandextank/plugins/YCMonitoring/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import Plugin # noqa
2 |
--------------------------------------------------------------------------------
/logos/screen.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yandex/yandex-tank/HEAD/logos/screen.png
--------------------------------------------------------------------------------
/logos/tank16.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yandex/yandex-tank/HEAD/logos/tank16.jpeg
--------------------------------------------------------------------------------
/logos/tank192.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yandex/yandex-tank/HEAD/logos/tank192.jpg
--------------------------------------------------------------------------------
/logos/tank200.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yandex/yandex-tank/HEAD/logos/tank200.jpg
--------------------------------------------------------------------------------
/logos/tank64.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yandex/yandex-tank/HEAD/logos/tank64.jpg
--------------------------------------------------------------------------------
/yandextank/plugins/OpenTSDBUploader/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import Plugin # noqa
2 |
--------------------------------------------------------------------------------
/yandextank/plugins/InfluxUploader/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import Plugin # noqa:F401
2 |
--------------------------------------------------------------------------------
/docs/pic/tank-bfg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yandex/yandex-tank/HEAD/docs/pic/tank-bfg.png
--------------------------------------------------------------------------------
/yandextank/plugins/Telegraf/tests/target_hint_no_hosts.yaml:
--------------------------------------------------------------------------------
1 | metrics:
2 | cpu:
3 | system:
4 |
--------------------------------------------------------------------------------
/yandextank/plugins/OpenTSDBUploader/client/__init__.py:
--------------------------------------------------------------------------------
1 | from .client import OpenTSDBClient # noqa
2 |
--------------------------------------------------------------------------------
/docs/pic/tank-lifecycle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yandex/yandex-tank/HEAD/docs/pic/tank-lifecycle.png
--------------------------------------------------------------------------------
/docs/pic/tank-stepper.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yandex/yandex-tank/HEAD/docs/pic/tank-stepper.png
--------------------------------------------------------------------------------
/yandextank/common/const.py:
--------------------------------------------------------------------------------
1 | class RetCode:
2 | CONTINUE = -1
3 | SUCCESS = 0
4 | ERROR = 1
5 |
--------------------------------------------------------------------------------
/yandextank/plugins/Telegraf/agent/__init__.py:
--------------------------------------------------------------------------------
1 | '''
2 | Agent to be installed at remote server
3 | '''
4 |
--------------------------------------------------------------------------------
/docs/YandexTankConfigMemo.odt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yandex/yandex-tank/HEAD/docs/YandexTankConfigMemo.odt
--------------------------------------------------------------------------------
/docs/pic/overload-screen.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yandex/yandex-tank/HEAD/docs/pic/overload-screen.png
--------------------------------------------------------------------------------
/docs/pic/tank-architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yandex/yandex-tank/HEAD/docs/pic/tank-architecture.png
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | norecursedirs = build dist .eggs .tox .env tmp .env3
3 | addopts = --doctest-glob=*_doctest.txt
4 |
--------------------------------------------------------------------------------
/yandextank/ammo_validator/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from .validator import validate
3 |
4 | __all__ = ['validate']
5 |
--------------------------------------------------------------------------------
/yandextank/plugins/Autostop/__init__.py:
--------------------------------------------------------------------------------
1 | from .plugin import Plugin # noqa
2 | from .criterions import AbstractCriterion # noqa
3 |
--------------------------------------------------------------------------------
/yandextank/plugins/Telegraf/tests/target_hint.yaml:
--------------------------------------------------------------------------------
1 | hosts:
2 | "[target]":
3 | metrics:
4 | cpu:
5 | system:
6 |
--------------------------------------------------------------------------------
/yandextank/aggregator/__init__.py:
--------------------------------------------------------------------------------
1 | from .chopper import TimeChopper # noqa
2 | from .tank_aggregator import TankAggregator # noqa
3 |
--------------------------------------------------------------------------------
/yandextank/core/__init__.py:
--------------------------------------------------------------------------------
1 | '''
2 | Package contains all tank tool core code
3 | '''
4 |
5 | from .tankcore import TankCore # noqa
6 |
--------------------------------------------------------------------------------
/yandextank/ammo_validator/tests/test-uri-2:
--------------------------------------------------------------------------------
1 | /apiSome?discount=true&filter=name=Элеганс
2 | /someApi?discount=true&filter=name=français tag
3 |
--------------------------------------------------------------------------------
/yandextank/ammo_validator/tests/test-uri-bad:
--------------------------------------------------------------------------------
1 | [Cookie: value1]
2 | [Cookie: value2
3 | /someApi?discount=true&filter=name=français tag1 tag2
4 |
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | from .metric import Metric # noqa
2 | from .event import Event # noqa
3 |
--------------------------------------------------------------------------------
/yandextank/plugins/RCAssert/config/schema.yaml:
--------------------------------------------------------------------------------
1 | pass:
2 | type: string
3 | default: ''
4 | fail_code:
5 | type: integer
6 | default: 10
--------------------------------------------------------------------------------
/yandextank/stepper/tests/protobuf-expected.stpd:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yandex/yandex-tank/HEAD/yandextank/stepper/tests/protobuf-expected.stpd
--------------------------------------------------------------------------------
/yandextank/plugins/Phantom/tests/expected_df.dat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yandex/yandex-tank/HEAD/yandextank/plugins/Phantom/tests/expected_df.dat
--------------------------------------------------------------------------------
/docs/pic/monitoring_backward_compatibility_grapf.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yandex/yandex-tank/HEAD/docs/pic/monitoring_backward_compatibility_grapf.png
--------------------------------------------------------------------------------
/yandextank/ammo_validator/tests/test-phantom-binary:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yandex/yandex-tank/HEAD/yandextank/ammo_validator/tests/test-phantom-binary
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/metrics/tests/metric_data_output_histogram_1.csv:
--------------------------------------------------------------------------------
1 | ts,category,cnt
2 | 1574164720,200,4
3 | 1574164721,200,3
4 |
--------------------------------------------------------------------------------
/yandextank/stepper/tests/test-protobuf-autocases.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yandex/yandex-tank/HEAD/yandextank/stepper/tests/test-protobuf-autocases.txt
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | # Defining the exact version will make sure things don't break
2 | sphinx==5.3.0
3 | sphinx_rtd_theme==1.1.1
4 | readthedocs-sphinx-search==0.1.1
5 |
--------------------------------------------------------------------------------
/stdeb.cfg:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | Package = yandex-tank
3 | Depends = phantom (=0.14.0~pre65load2nmu), phantom-ssl(=0.14.0~pre65load2nmu)
4 | Conflicts = yandex-load-tank-base
5 |
--------------------------------------------------------------------------------
/yandextank/stepper/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | from .main import Stepper, StepperWrapper # noqa
3 | from .info import StepperInfo # noqa
4 | from .format import StpdReader # noqa
5 |
--------------------------------------------------------------------------------
/yandextank/ammo_validator/tests/test-uri:
--------------------------------------------------------------------------------
1 | [Http-Header: value1]
2 | [Cookie: value2]
3 | /apiSome?discount=true&filter=name=Элеганс
4 | /someApi?discount=true&filter=name=français tag
5 |
--------------------------------------------------------------------------------
/yandextank/plugins/Telegraf/config/monitoring_default_config.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/metrics/tests/metric_data_output_distributions_2.csv:
--------------------------------------------------------------------------------
1 | second,cnt,l,r,ts
2 | 569,1,29000,30000,1574164720
3 | 570,3,30000,31000,1574164720
4 | 570,3,30000,31000,1574164721
5 |
--------------------------------------------------------------------------------
/.style.yapf:
--------------------------------------------------------------------------------
1 | [style]
2 | based_on_style = pep8
3 | COALESCE_BRACKETS = True
4 | COLUMN_LIMIT = 80
5 | DEDENT_CLOSING_BRACKETS = False
6 | SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = True
7 | SPLIT_BEFORE_FIRST_ARGUMENT = True
--------------------------------------------------------------------------------
/yandextank/aggregator/tests/phout2927:
--------------------------------------------------------------------------------
1 | 1502376593.698 "Technology 797 208 12 521 56 670 31 315 0 404
2 | 1502376594.699 "/v1/tech/ru-RU/latest/maps/jsapi", 750 206 11 452 81 602 24 315 0 404
3 | 1502376597.698 #3 669 146 9 410 104 581 18 315 0 404
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | python:
3 | - "3.7"
4 | install:
5 | - "pip install --upgrade setuptools"
6 | - "pip install flake8"
7 | - "pip install ."
8 | script:
9 | - "flake8 --config .flake8 ."
10 | - "python setup.py test"
11 |
--------------------------------------------------------------------------------
/yandextank/plugins/JsonReport/config/schema.yaml:
--------------------------------------------------------------------------------
1 | monitoring_log:
2 | description: file name for monitoring log
3 | type: string
4 | default: monitoring.log
5 | test_data_log:
6 | description: file name for test data log
7 | type: string
8 | default: test_data.log
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/metrics/tests/metric_data_input_event_1.csv:
--------------------------------------------------------------------------------
1 | ,ts,value
2 | 0,1574164720249248,200
3 | 1,1574164720450572,200
4 | 2,1574164720649228,200
5 | 3,1574164720849529,200
6 | 4,1574164721049295,200
7 | 5,1574164721249847,200
8 | 6,1574164721451305,200
9 |
--------------------------------------------------------------------------------
/docker/files/inputrc:
--------------------------------------------------------------------------------
1 | "\e[A": history-search-backward
2 | "\e[B": history-search-forward
3 | "\e[1;5C": shell-forward-word
4 | "\e[1;5D": shell-backward-word
5 | set completion-ignore-case On
6 | set completion-query-items 300
7 | set show-all-if-ambiguous On
8 | set skip-completed-text On
9 |
10 |
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/metrics/tests/metric_data_input_metric_2.csv:
--------------------------------------------------------------------------------
1 | ,ts,value
2 | 0,1574164720249248,30021
3 | 1,1574164720450572,30764
4 | 2,1574164720649228,29917
5 | 3,1574164720849529,30307
6 | 4,1574164721049295,30081
7 | 5,1574164721249847,30466
8 | 6,1574164721451305,30990
9 |
--------------------------------------------------------------------------------
/yandextank/stepper/tests/loop1.stpd:
--------------------------------------------------------------------------------
1 | 18 0
2 | GET / HTTP/1.1
3 |
4 |
5 | 21 1000
6 | GET /foo HTTP/1.1
7 |
8 |
9 | 18 2000
10 | GET / HTTP/1.1
11 |
12 |
13 | 21 3000
14 | GET /foo HTTP/1.1
15 |
16 |
17 | 18 4000
18 | GET / HTTP/1.1
19 |
20 |
21 | 21 5000
22 | GET /foo HTTP/1.1
23 |
24 |
25 |
--------------------------------------------------------------------------------
/yandextank/stepper/tests/test-unicode.txt:
--------------------------------------------------------------------------------
1 | /apiCars?image=true&dealer_org_type=4&dealer_org_type=1&dealer_org_type=2&sort_offers=fresh_relevance_1-desc&custom_state_key=CLEARED&custom_state_key=NOT_CLEARED&rid=47&in_stock=false&geo_radius=100&offer_grouping=false&page_num_offers=1&with_discount=true&catalog_filter=complectation_name=Элеганс
2 |
--------------------------------------------------------------------------------
/yandextank/validator/tests/test_schema.yaml:
--------------------------------------------------------------------------------
1 | kill_old:
2 | type: list
3 | default: [foo, bar]
4 | elements:
5 | type: string
6 | allowed: [foo, bar]
7 |
8 | default_target:
9 | default: localhost
10 | any of:
11 | - type: list
12 | elements:
13 | type: string
14 | - allowed: auto
15 | type: string
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/clients/__init__.py:
--------------------------------------------------------------------------------
1 | from .local import LocalStorageClient
2 | from .luna import LunaClient
3 | from .lunapark_volta import LunaparkVoltaClient
4 |
5 | available_clients = {
6 | 'luna': LunaClient,
7 | 'local_storage': LocalStorageClient,
8 | 'lunapark_volta': LunaparkVoltaClient,
9 | }
10 |
--------------------------------------------------------------------------------
/yandextank/ammo_validator/tests/test-uripost-bad-2:
--------------------------------------------------------------------------------
1 | [host: qwertyuiop.test.tst.yandex.net]
2 | [Content-Type: application/json]
3 | [Connection: Keep-Alive]
4 | 70 /v1/payment/check
5 | {"card": {"bin": "bin"}, "order_id": "order_id", "payment": {"type": "card"}}
6 | 77 /v1/payment/check
7 | {"card": {"bin": "bin"}, "order_id": "order_id", "payment": {"type": "card"}}
8 |
--------------------------------------------------------------------------------
/Dockerfile-test:
--------------------------------------------------------------------------------
1 | FROM load/yandex-tank-pip:testing
2 | WORKDIR /yandextank
3 | RUN apt-get update && \
4 | apt-get install -y python3-pip
5 | RUN pip3 install --upgrade setuptools
6 | RUN pip3 install --upgrade pip
7 | RUN pip3 install pytest
8 | CMD pip3 install . && pytest -s
9 | # docker run -v /path/to/yandextank:/yandextank --name my_container my_image
10 |
--------------------------------------------------------------------------------
/yandextank/ammo_validator/tests/test-uripost-bad-1:
--------------------------------------------------------------------------------
1 | [host: qwertyuiop.test.tst.yandex.net]
2 | [Content-Type: application/json]
3 | [Connection: Keep-Alive]
4 | 777 /v1/payment/check
5 | {"card": {"bin": "bin"}, "order_id": "order_id", "payment": {"type": "card"}}
6 | 77 /v1/payment/check
7 | {"card": {"bin": "bin"}, "order_id": "order_id", "payment": {"type": "card"}}
8 |
--------------------------------------------------------------------------------
/yandextank/core/config/plugins_schema.yaml:
--------------------------------------------------------------------------------
1 | allow_unknown: true
2 | schema:
3 | enabled:
4 | description: enable/disable the execution of the plugin
5 | type: boolean
6 | required: true
7 | package:
8 | description: plugin\'s python package
9 | empty: false
10 | regex: '[^/]+'
11 | required: true
12 | type: string
13 | type: dict
14 |
--------------------------------------------------------------------------------
/yandextank/ammo_validator/tests/test-uripost:
--------------------------------------------------------------------------------
1 | [host: qwertyuiop.test.tst.yandex.net]
2 | [Content-Type: application/json]
3 | [Connection: Keep-Alive]
4 | 77 /v1/payment/check
5 | {"card": {"bin": "bin"}, "order_id": "order_id", "payment": {"type": "card"}}
6 | [Cookie: somecookie]
7 | 77 /v1/payment/check tag
8 | {"card": {"bin": "bin"}, "order_id": "order_id", "payment": {"type": "card"}}
9 |
--------------------------------------------------------------------------------
/yandextank/plugins/JMeter/config/jmeter_var_template.xml:
--------------------------------------------------------------------------------
1 |
2 | %s
3 | %s
4 | Auto from 'jmeter' section of Yandex.Tank
5 | =
6 |
7 |
--------------------------------------------------------------------------------
/yandextank/plugins/ResourceCheck/config/schema.yaml:
--------------------------------------------------------------------------------
1 | interval:
2 | description: Frequency of checking free resources
3 | type: string
4 | default: 10s
5 | disk_limit:
6 | description: Run if there is more disk space than (MB)
7 | type: integer
8 | default: 2048
9 | mem_limit:
10 | description: Run if there is more free memory than (MB)
11 | type: integer
12 | default: 512
13 |
--------------------------------------------------------------------------------
/yandextank/plugins/ShootExec/config/schema.yaml:
--------------------------------------------------------------------------------
1 | cmd:
2 | description: command that produces test results and stats in Phantom format
3 | type: string
4 | required: true
5 | output_path:
6 | description: path to test results
7 | type: string
8 | required: true
9 | stats_path:
10 | description: path to tests stats
11 | type: string
12 | default: null
13 | nullable: true
14 |
--------------------------------------------------------------------------------
/yandextank/plugins/Telegraf/tests/target_hint.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/yandextank/plugins/Telegraf/tests/old_mon.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/docs/docs_gen.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | printf "================\nConfig reference\n================\n" > config_reference.rst
3 | tank-docs-gen -o config_reference.rst --title "Core" -a "../yandextank/core/config/schema.yaml"
4 | for p in `find ../yandextank/plugins/ \( -name "schema.py" -o -name "schema.yaml" \)`
5 | do
6 | tank-docs-gen -o config_reference.rst --title `echo $p | awk '{split($0,a,"\/"); print a[4]}'` -a $p
7 | done
8 |
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/__init__.py:
--------------------------------------------------------------------------------
1 | """Manages your test data
2 |
3 | * create the DataSession
4 | * specify metrics you want to save
5 | * specify the backends
6 | * this module will collect your data and save them to specified backends
7 | """
8 |
9 | # TODO: import only specific things that we really need to export
10 | from .manager import * # noqa
11 | from .common.interfaces import MetricData # noqa
12 |
--------------------------------------------------------------------------------
/yandextank/aggregator/tests/test_test.py:
--------------------------------------------------------------------------------
1 | from conftest import random_split
2 | import pandas as pd
3 | import numpy as np
4 |
5 |
6 | def test_random_split(data):
7 | dataframes = list(random_split(data))
8 | assert len(dataframes) > 1
9 | concatenated = pd.concat(dataframes)
10 | assert len(concatenated) == len(data), "We did not lose anything"
11 | assert np.allclose(concatenated.values, data.values), "We did not corrupt the data"
12 |
--------------------------------------------------------------------------------
/docs/example_cfgs/tutorial_load1.yaml:
--------------------------------------------------------------------------------
1 | phantom:
2 | address: 203.0.113.1:80 # [Target's address]:[target's port]
3 | uris:
4 | - /
5 | load_profile:
6 | load_type: rps # schedule load by defining requests per second
7 | schedule: line(1, 10, 10m) # starting from 1rps growing linearly to 10rps during 10 minutes
8 | console:
9 | enabled: true # enable console output
10 | telegraf:
11 | enabled: false # let's disable telegraf monitoring for the first time
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .env,
4 | .env3,
5 | .git,
6 | .cache,
7 | .eggs,
8 | .idea,
9 | .Python,
10 | docs/,
11 | tmp/,
12 | env/,
13 | build/,
14 | *.build/,
15 | develop-eggs/,
16 | dist/,
17 | downloads/,
18 | eggs/,
19 | .eggs/,
20 | lib/,
21 | lib64/,
22 | parts/,
23 | sdist/,
24 | var/,
25 | *.egg-info/,
26 | .installed.cfg,
27 | *.egg
28 | ignore = E501 W503 W605
29 |
--------------------------------------------------------------------------------
/yandextank/stepper/module_exceptions.py:
--------------------------------------------------------------------------------
1 | class StepperConfigurationError(Exception):
2 | '''
3 | Raised when error in stepper configuration found.
4 | '''
5 |
6 |
7 | class AmmoFileError(Exception):
8 | '''
9 | Raised when failed to read ammo file properly.
10 | '''
11 |
12 |
13 | class StpdFileError(Exception):
14 | '''
15 | Raised when failed to read stpd file properly.
16 | '''
17 |
18 |
19 | class DiskLimitError(Exception):
20 | pass
21 |
--------------------------------------------------------------------------------
/yandextank/plugins/DataUploader/config/postloader_schema.yaml:
--------------------------------------------------------------------------------
1 | api_address:
2 | type: string
3 | required: true
4 | target_host:
5 | type: string
6 | default: undefined
7 | target_port:
8 | anyof:
9 | - type: string
10 | - type: integer
11 | default: 80
12 | operator:
13 | type: string
14 | nullable: true
15 | default: null
16 | task:
17 | type: string
18 | job_name:
19 | type: string
20 | default: ''
21 | job_dsc:
22 | type: string
23 | default: ''
24 | token_file:
25 | type: string
--------------------------------------------------------------------------------
/yandextank/common/exceptions.py:
--------------------------------------------------------------------------------
1 | class PluginImplementationError(RuntimeError):
2 | """
3 | Error in plugin implementation
4 | """
5 |
6 | pass
7 |
8 |
9 | class PluginNotPrepared(Exception):
10 | """
11 | Can't find plugin's info in core.job
12 | """
13 |
14 | def __init__(self, msg=None):
15 | self.message = "%s\n%s" % (self.__doc__, msg)
16 |
17 |
18 | class GeneratorNotFound(Exception):
19 | """
20 | Can't find generator in config
21 | """
22 |
23 | pass
24 |
--------------------------------------------------------------------------------
/yandextank/common/tests/test_monitoring.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from yandextank.common.monitoring import monitoring_data
3 |
4 |
5 | @pytest.mark.parametrize(
6 | 'metrics, result',
7 | [
8 | (
9 | {1: {'sens1': 1, 'sens2': 2}},
10 | {'timestamp': 1, 'data': {'test': {'comment': '', 'metrics': {'custom:sens1': 1, 'custom:sens2': 2}}}},
11 | )
12 | ],
13 | )
14 | def test_monitoring_data(metrics, result):
15 | assert monitoring_data('test', metrics, '') == result
16 |
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/metrics/tests/metric_data_output_quantile_2.csv:
--------------------------------------------------------------------------------
1 | second,cnt,average,stddev,min,q0,q10,q25,q50,q75,q80,q85,q90,q95,q98,q99,q100,max,ts,sum
2 | 1574164720,4,30252.250000,378.926004,29917.0,29917.0,29948.2,29995.0,30164.0,30421.25,30489.8,30558.35,30626.9,30695.45,30736.58,30750.29,30764.0,30764.0,1574164720,121009.0
3 | 1574164721,3,30512.333333,456.267831,30081.0,30081.0,30158.0,30273.5,30466.0,30728.00,30780.4,30832.80,30885.2,30937.60,30969.04,30979.52,30990.0,30990.0,1574164721,91537.0
4 |
--------------------------------------------------------------------------------
/contrib/gatling/gatling.conf:
--------------------------------------------------------------------------------
1 | # default gatling config: https://github.com/gatling/gatling/blob/main/gatling-core/src/main/resources/gatling-defaults.conf
2 |
3 | gatling {
4 | http {
5 | warmUpUrl = "" # The URL to use to warm-up the HTTP stack (blank means disabled). https://gatling.io by default
6 | }
7 |
8 | data {
9 | file {
10 | bufferSize = 512 # FileDataWriter's internal data buffer size, in bytes. Decreased to encourage Gatling to flush simulation.log
11 | }
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/yandextank/ammo_validator/tests/test-json-grpc:
--------------------------------------------------------------------------------
1 | {"tag": "/Add", "call": "api.Adder.Add", "metadata": {"Authorization": "Bearer $YC_TOKEN"}, "payload": {"x": 21, "y": 12}}
2 | {"tag": "/Add", "call": "api.Adder.Add", "metadata": {"Authorization": "Bearer $YC_TOKEN"}, "payload": {"x": 22, "y": 13}}
3 | {"tag": "/Add", "call": "api.Adder.Add", "metadata": {"Authorization": "Bearer $YC_TOKEN"}, "payload": {"x": 23, "y": 14}}
4 |
5 | {"tag": "bad requests", "payload": {"x": 23, "y": 14}}
6 | {"tag": "bad requests", "call": "api.Adder.Add" , }
7 | {}
8 |
--------------------------------------------------------------------------------
/yandextank/plugins/OfflineReport/config/schema.yaml:
--------------------------------------------------------------------------------
1 | offline_data_log:
2 | description: file name for offline data log
3 | type: string
4 | default: offline_data.log
5 | offline_json_report:
6 | description: file name for offline json report
7 | type: string
8 | default: offline_report.json
9 | offline_text_report:
10 | description: file name for offline text report
11 | type: string
12 | default: offline_report.txt
13 | print_report:
14 | description: print offline text report at the end
15 | type: boolean
16 | default: false
17 |
--------------------------------------------------------------------------------
/yandextank/aggregator/config/phout.json:
--------------------------------------------------------------------------------
1 | {
2 | "interval_real": ["total", "max", "min", "hist", "q", "len"],
3 | "connect_time": ["total", "max", "min", "len"],
4 | "send_time": ["total", "max", "min", "len"],
5 | "latency": ["total", "max", "min", "len"],
6 | "receive_time": ["total", "max", "min", "len"],
7 | "interval_event": ["total", "max", "min", "len"],
8 | "size_out": ["total", "max", "min", "len"],
9 | "size_in": ["total", "max", "min", "len"],
10 | "net_code": ["count"],
11 | "proto_code": ["count"]
12 | }
13 |
--------------------------------------------------------------------------------
/yandextank/plugins/ShellExec/tests/test_shellexec_plugin.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from unittest.mock import MagicMock
3 | from yandextank.plugins.ShellExec import Plugin
4 |
5 |
6 | def test_plugin_execute():
7 | plugin = Plugin(MagicMock(), {}, 'shellexec')
8 | assert plugin.execute('echo foo') == 0
9 |
10 |
11 | def test_plugin_execute_raises():
12 | plugin = Plugin(MagicMock(), {}, 'shellexec')
13 | with pytest.raises(RuntimeError) as error:
14 | plugin.execute('echo "foo')
15 | assert 'Subprocess returned 2' in error.message
16 |
--------------------------------------------------------------------------------
/yandextank/plugins/Autostop/config/schema.yaml:
--------------------------------------------------------------------------------
1 | autostop:
2 | description: list of autostop constraints
3 | type: list
4 | schema:
5 | type: string
6 | description: autostop constraint
7 | examples: {'http(4xx,50%,5)': 'stop when rate of 4xx http codes is 50% or more during 5 seconds'}
8 | default: []
9 | examples: {'[quantile(50,100,20), http(4xx,50%,5)]': 'stop when either quantile 50% or 4xx http codes exceeds specified levels'}
10 | report_file:
11 | description: path to file to store autostop report
12 | type: string
13 | default: autostop_report.txt
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/metrics/event.py:
--------------------------------------------------------------------------------
1 | from ..common.interfaces import AbstractMetric, TypeEvents, TypeHistogram
2 | import numpy as np
3 |
4 |
5 | class Event(AbstractMetric):
6 | def __init__(self, **kw):
7 | super(Event, self).__init__(**kw)
8 | self.dtypes = {
9 | 'ts': np.int64,
10 | 'value': np.str,
11 | }
12 | self.columns = ['ts', 'value']
13 |
14 | @property
15 | def type(self):
16 | return TypeEvents
17 |
18 | @property
19 | def aggregate_types(self):
20 | return [TypeHistogram]
21 |
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/metrics/metric.py:
--------------------------------------------------------------------------------
1 | from ..common.interfaces import AbstractMetric, TypeTimeSeries, TypeQuantiles, TypeDistribution
2 | import numpy as np
3 |
4 |
5 | class Metric(AbstractMetric):
6 | def __init__(self, **kw):
7 | super(Metric, self).__init__(**kw)
8 | self.dtypes = {'ts': np.int64, 'value': np.float64}
9 | self.columns = ['ts', 'value']
10 |
11 | @property
12 | def type(self):
13 | return TypeTimeSeries
14 |
15 | @property
16 | def aggregate_types(self):
17 | return [TypeQuantiles, TypeDistribution]
18 |
--------------------------------------------------------------------------------
/yandextank/core/tests/test_monitoring.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | for c in `pgrep -f analyzer-segmentshandler`; do awk '{print $2*4096}' /proc/$c/statm; done | awk '{a=a+$1} END {print a}'
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | Copyright 2012 YANDEX LLC
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
--------------------------------------------------------------------------------
/yandextank/plugins/Bfg/example/ultimate_gun.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | log = logging.getLogger(__name__)
4 |
5 |
6 | class LoadTest(object):
7 | def __init__(self, gun):
8 | self.gun = gun
9 |
10 | def case1(self, missile):
11 | with self.gun.measure("case1"):
12 | log.info("Shoot case 1: %s", missile)
13 |
14 | def case2(self, missile):
15 | with self.gun.measure("case2"):
16 | log.info("Shoot case 2: %s", missile)
17 |
18 | def setup(self):
19 | log.info("Setting up LoadTest")
20 |
21 | def teardown(self):
22 | log.info("Tearing down LoadTest")
23 |
--------------------------------------------------------------------------------
/yandextank/plugins/InfluxUploader/config/schema.yaml:
--------------------------------------------------------------------------------
1 | tank_tag:
2 | default: unknown
3 | type: string
4 | address:
5 | default: localhost
6 | type: string
7 | port:
8 | default: 8086
9 | type: integer
10 | database:
11 | default: mydb
12 | type: string
13 | username:
14 | default: root
15 | type: string
16 | password:
17 | default: root
18 | type: string
19 | chunk_size:
20 | default: 500000
21 | type: integer
22 | labeled:
23 | default: false
24 | type: boolean
25 | histograms:
26 | default: false
27 | type: boolean
28 | prefix_measurement:
29 | default: ""
30 | type: string
31 | custom_tags:
32 | default: {}
33 | type: dict
--------------------------------------------------------------------------------
/yandextank/aggregator/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import pandas as pd
4 | import pytest
5 |
6 | from yandextank.common.util import get_test_path
7 |
8 | MAX_TS = 1000
9 |
10 |
11 | def random_split(df):
12 | i = 0
13 | while i < max(df.index):
14 | step = np.random.randint(100, 200)
15 | if i + step < max(df.index):
16 | yield df.loc[i : i + step - 1]
17 | else:
18 | yield df.loc[i:]
19 | i += step
20 |
21 |
22 | @pytest.fixture
23 | def data():
24 | df = pd.read_csv(os.path.join(get_test_path(), 'yandextank/aggregator/tests/data.csv'), delimiter=',', index_col=0)
25 | return df
26 |
--------------------------------------------------------------------------------
/data/yandex-tank.completion:
--------------------------------------------------------------------------------
1 | have yandex-tank &&
2 | _yandex_tank()
3 | {
4 | local cur prev opts
5 | COMPREPLY=()
6 | cur="${COMP_WORDS[COMP_CWORD]}"
7 | prev="${COMP_WORDS[COMP_CWORD-1]}"
8 |
9 | if [[ ${cur} == -* ]] ; then
10 | opts=`yandex-tank --bash-switches-list`
11 | COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
12 | return 0
13 | fi
14 |
15 | if [[ ${prev} == -o ]] ; then
16 | opts=`yandex-tank --bash-options-prev="${prev}" --bash-options-cur="${cur}"`
17 | COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
18 | return 0
19 | fi
20 | COMPREPLY=()
21 | } &&
22 | complete -o default -F _yandex_tank yandex-tank
23 |
--------------------------------------------------------------------------------
/yandextank/plugins/OpenTSDBUploader/config/schema.yaml:
--------------------------------------------------------------------------------
1 | tank_tag:
2 | default: unknown
3 | type: string
4 | address:
5 | default: localhost
6 | type: string
7 | port:
8 | default: 4242
9 | type: integer
10 | username:
11 | default: root
12 | type: string
13 | password:
14 | default: root
15 | type: string
16 | chunk_size:
17 | default: 4096
18 | type: integer
19 | labeled:
20 | default: false
21 | type: boolean
22 | histograms:
23 | default: false
24 | type: boolean
25 | prefix_metric:
26 | default: ""
27 | type: string
28 | custom_tags:
29 | default: {}
30 | type: dict
31 | ssl:
32 | default: true
33 | type: boolean
34 | verify_ssl:
35 | default: true
36 | type: boolean
37 |
--------------------------------------------------------------------------------
/yandextank/stepper/tests/instances1.stpd:
--------------------------------------------------------------------------------
1 | 18 0
2 | GET / HTTP/1.1
3 |
4 |
5 | 18 500
6 | GET / HTTP/1.1
7 |
8 |
9 | 18 1000
10 | GET / HTTP/1.1
11 |
12 |
13 | 18 1500
14 | GET / HTTP/1.1
15 |
16 |
17 | 18 2000
18 | GET / HTTP/1.1
19 |
20 |
21 | 18 2500
22 | GET / HTTP/1.1
23 |
24 |
25 | 18 3000
26 | GET / HTTP/1.1
27 |
28 |
29 | 18 3500
30 | GET / HTTP/1.1
31 |
32 |
33 | 18 4000
34 | GET / HTTP/1.1
35 |
36 |
37 | 18 4500
38 | GET / HTTP/1.1
39 |
40 |
41 | 18 5000
42 | GET / HTTP/1.1
43 |
44 |
45 | 18 0
46 | GET / HTTP/1.1
47 |
48 |
49 | 18 0
50 | GET / HTTP/1.1
51 |
52 |
53 | 18 0
54 | GET / HTTP/1.1
55 |
56 |
57 | 18 0
58 | GET / HTTP/1.1
59 |
60 |
61 |
--------------------------------------------------------------------------------
/docker/files/bashrc:
--------------------------------------------------------------------------------
1 | # If not running interactively, don't do anything
2 | case $- in
3 | *i*) ;;
4 | *) return;;
5 | esac
6 |
7 | # don't put duplicate lines or lines starting with space in the history.
8 | # See bash(1) for more options
9 | HISTCONTROL=ignoredups
10 |
11 | # append to the history file, don't overwrite it
12 | shopt -s histappend
13 |
14 | # for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
15 | HISTSIZE=8000
16 | HISTFILESIZE=8000
17 |
18 | export EDITOR=vi
19 | export PAGER=less
20 | export MANPAGER='less -X'
21 |
22 | PS1='\[\e[1;31m\]$(echo "["${?/0/}"]" | sed "s/\\[\\]//")$(echo "\[\e[32m\][tank]\[\e[37m\]")\u@\h: \[\e[00m\]\w \$ '
23 |
24 | echo "Yandex.Tank Docker image"
25 |
--------------------------------------------------------------------------------
/yandextank/plugins/DataUploader/tests/test_uploader_plugin.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 |
4 | from yandextank.plugins.DataUploader.plugin import BackendTypes
5 |
6 |
7 | class TestBackendTypes(object):
8 |
9 | @pytest.mark.parametrize(
10 | 'api_address, section_name, expected_type',
11 | [
12 | ('lunapark.foo-bar.ru', 'uploader', BackendTypes.LUNAPARK),
13 | ('lunapark.test.foo-bar.ru', 'overload', BackendTypes.LUNAPARK),
14 | ('overload.yandex.net', 'uploade', BackendTypes.OVERLOAD),
15 | ('localhost', 'lunapark', BackendTypes.LUNAPARK),
16 | ],
17 | )
18 | def test_identify(self, api_address, section_name, expected_type):
19 | assert BackendTypes.identify_backend(api_address, section_name) == expected_type
20 |
--------------------------------------------------------------------------------
/yandextank/plugins/ShellExec/config/schema.yaml:
--------------------------------------------------------------------------------
1 | catch_out:
2 | description: show commands stdout
3 | type: boolean
4 | default: False
5 | prepare:
6 | description: shell command to execute on prepare stage
7 | type: string
8 | default: ''
9 | start:
10 | description: shell command to execute on start
11 | type: string
12 | default: ''
13 | end:
14 | description: shell command to execute after test end
15 | type: string
16 | default: ''
17 | poll:
18 | description: shell command to execute every second while test is running
19 | type: string
20 | default: ''
21 | post_process:
22 | description: shell command to execute on post process stage
23 | type: string
24 | default: ''
25 | shell:
26 | description: shell binary to use
27 | type: string
28 | default: '/bin/bash'
29 |
--------------------------------------------------------------------------------
/data/PKGBUILD:
--------------------------------------------------------------------------------
1 | # Maintainer: Konstantin Shalygin (k0ste@opentech.ru)
2 |
3 | pkgname='yandex-tank'
4 | pkgver='1.7.10'
5 | pkgrel='1'
6 | pkgdesc='Performance measurement tool'
7 | arch=('any')
8 | url='https://github.com/yandex/yandex-tank'
9 | license=('GPL')
10 | depends=('python3' 'python3-psutil' 'python3-ipaddr' 'phantom-engine-git')
11 | source=("http://ppa.launchpad.net/yandex-load/main/ubuntu/pool/main/y/yandextank/yandextank_${pkgver}.tar.gz")
12 | sha256sums=("5c2d9d948e1583183a623f430bf0b5327baca4fcbc9d7893f230b364a7aedc70")
13 |
14 | build() {
15 | cd "$srcdir/$pkgname"
16 | python2 setup.py build
17 | }
18 |
19 | package() {
20 | pushd "$srcdir/$pkgname"
21 | python2 setup.py install -O1 --root="$pkgdir"
22 | install -Dm644 "COPYING" "$pkgdir/usr/share/doc/$pkgname/COPYING"
23 | popd
24 | }
25 |
--------------------------------------------------------------------------------
/yandextank/plugins/Pandora/config/pandora_pool_default.json:
--------------------------------------------------------------------------------
1 | {
2 | "Name": "Pool#0",
3 | "Gun": {
4 | "GunType": "http",
5 | "Parameters": {
6 | "Target": "localhost:3000"
7 | }
8 | },
9 | "AmmoProvider": {
10 | "AmmoType": "jsonline/http",
11 | "AmmoSource": "./ammo.jsonline",
12 | "Passes": 1
13 | },
14 | "ResultListener": {
15 | "ListenerType": "log/phout",
16 | "Destination": "./phout.log"
17 | },
18 | "UserLimiter": {
19 | "LimiterType": "periodic",
20 | "Parameters": {
21 | "BatchSize": 3,
22 | "MaxCount": 18,
23 | "Period": 1
24 | }
25 | },
26 | "StartupLimiter": {
27 | "LimiterType": "periodic",
28 | "Parameters": {
29 | "BatchSize": 2,
30 | "MaxCount": 6,
31 | "Period": 2
32 | }
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/yandextank/core/config/00-base.ini:
--------------------------------------------------------------------------------
1 | ### base config with Yandex-specific tool settings ###
2 | [tank]
3 | plugin_rcheck=yandextank.plugins.ResourceCheck
4 | plugin_ShellExec=yandextank.plugins.ShellExec
5 | plugin_phantom=yandextank.plugins.Phantom
6 | plugin_aggregate=yandextank.plugins.Aggregator
7 | plugin_autostop=yandextank.plugins.Autostop
8 | plugin_telegraf=yandextank.plugins.Telegraf
9 | plugin_console=yandextank.plugins.Console
10 | plugin_rcassert=yandextank.plugins.RCAssert
11 | plugin_jsonreport=yandextank.plugins.JsonReport
12 | plugin_offlinereport=yandextank.plugins.OfflineReport
13 | artifacts_base_dir=logs
14 |
15 | [bfg]
16 | ammo_type=caseline
17 |
18 | [overload]
19 | api_address=https://overload.yandex.net/
20 |
21 | [telegraf]
22 | disguise_hostnames=1
23 |
24 | [monitoring]
25 | disguise_hostnames=1
26 |
--------------------------------------------------------------------------------
/yandextank/plugins/YCMonitoring/tests/COUNTER.json:
--------------------------------------------------------------------------------
1 | {
2 | "metrics": [
3 | {
4 | "name": "pod.network.sent_bytes_count",
5 | "labels": {
6 | "cluster_id": "load-mock-kuber",
7 | "pod": "kube-proxy-bb6kp",
8 | "service": "managed-kubernetes",
9 | "namespace": "kube-system"
10 | },
11 | "type": "COUNTER",
12 | "timeseries": {
13 | "timestamps": [
14 | 1713801680000,
15 | 1713801690000,
16 | 1713801700000
17 | ],
18 | "int64Values": [
19 | 581884083,
20 | 582131973,
21 | 582131973
22 | ]
23 | }
24 | }
25 | ]
26 | }
--------------------------------------------------------------------------------
/yandextank/plugins/Phantom/_schema.yaml:
--------------------------------------------------------------------------------
1 | additional_libs:
2 | type: string
3 | default: ""
4 | address:
5 | type: string
6 | required: true
7 | affinity:
8 | type: string
9 | default: ""
10 | buffered_seconds:
11 | type: integer
12 | default: 2
13 | enum_ammo:
14 | type: boolean
15 | default: false
16 | header_http:
17 | type: string
18 | headers:
19 | type: string
20 | phantom_modules_path:
21 | type: string
22 | default: /usr/lib/phantom
23 | phantom_path:
24 | type: string
25 | default: phantom
26 | phout_file:
27 | type: string
28 | default: ""
29 | rps_schedule:
30 | type: string
31 | threads:
32 | type: integer
33 | default: null
34 | nullable: true
35 | timeout:
36 | type: string
37 | default: 11s
38 | uris:
39 | type: string
40 | writelog:
41 | type: string
42 | default: none
43 |
44 |
45 |
--------------------------------------------------------------------------------
/yandextank/stepper/tests/test-uripost.txt:
--------------------------------------------------------------------------------
1 | [host: qwertyuiop.test.tst.yandex.net]
2 | [Content-Type: application/json]
3 | [My--Awesome--Header: 3:serv:CN9vEOeIxPN6EJ2IeyDKgJuRpAQ:HrSX7P7MgItUFi3SbEJF5jQqPMyeiFWiM9PO7eVfilz44dVl7tftz_YovVfeH-RdrVH2JVBverB1KVa_Y6fjD0CS_3PVO4Ie2D3_DsU0asViYcOM_kQcC2HtnKqL1hhlPg44Nhuc-TBeoyidfGw-uhebmMB2ImdlK0LwbA-r2bHzehH7ZyQZMyDM-QCBcU0fhbPIo7aRGmMWXtn8JBdHVBplCfsEQbWWhv-MJ9CGZv4hH3wh3XgVn0n5_BADL0HRKy0qrY63eN2y-8U_Jy50Y8Q4pA]
4 | [Connection: Keep-Alive]
5 | 347 /v1/payment/check
6 | {"card": {"bin": "bin"}, "order_id": "order_id", "payment": {"method": "payment_method", "type": "card"}, "platform": "android", "request_id": "request_id", "service_id": "service_id", "user_agent": "user_agent", "transaction": {"amount": "500.100", "currency": "RUB"}, "user": {"id": "user_id", "ip": "127.0.0.1", "passport_uid": "passport_uid"}}
7 |
--------------------------------------------------------------------------------
/yandextank/api/config/00-base.ini:
--------------------------------------------------------------------------------
1 | ### base config with Yandex-specific tool settings ###
2 | [tank]
3 | plugin_rcheck=yandextank.plugins.ResourceCheck
4 | plugin_ShellExec=yandextank.plugins.ShellExec
5 | plugin_phantom=yandextank.plugins.Phantom
6 | plugin_aggregate=yandextank.plugins.Aggregator
7 | plugin_autostop=yandextank.plugins.Autostop
8 | plugin_telegraf=yandextank.plugins.Telegraf
9 | plugin_console=yandextank.plugins.Console
10 | plugin_tips=yandextank.plugins.TipsAndTricks
11 | plugin_rcassert=yandextank.plugins.RCAssert
12 | plugin_jsonreport=yandextank.plugins.JsonReport
13 | artifacts_base_dir=logs
14 |
15 | [console]
16 | short_only=1
17 |
18 | [bfg]
19 | ammo_type=caseline
20 |
21 | [overload]
22 | api_address=https://overload.yandex.net/
23 |
24 | [telegraf]
25 | disguise_hostnames=1
26 |
27 | [monitoring]
28 | disguise_hostnames=1
29 |
--------------------------------------------------------------------------------
/yandextank/aggregator/tests/test_chopper.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 |
3 | from conftest import MAX_TS, random_split
4 | from yandextank.aggregator.chopper import TimeChopper
5 |
6 |
7 | class TestChopper(object):
8 | def test_one_chunk(self, data):
9 | chopper = TimeChopper([iter([data])])
10 | result = list(chopper)
11 | assert len(result) == MAX_TS
12 | concatinated = pd.concat(r[1] for r in result)
13 | assert len(data) == len(concatinated), "We did not lose anything"
14 |
15 | def test_multiple_chunks(self, data):
16 | chunks = random_split(data)
17 | chopper = TimeChopper([iter(chunks)])
18 | result = list(chopper)
19 | assert len(result) == MAX_TS
20 | concatinated = pd.concat(r[1] for r in result)
21 | assert len(data) == len(concatinated), "We did not lose anything"
22 |
--------------------------------------------------------------------------------
/yandextank/stepper/tests/test-ammo.txt:
--------------------------------------------------------------------------------
1 | 601 case1
2 | POST /case1?sleep=10 HTTP/1.1
3 | Host: localhost
4 | Accept: */*
5 | Connection: keep-alive
6 | Content-Length: 490
7 |
8 | F5SnJeqM23XfFG6mrTFmIMF7EBf9wUm9enPDtqRoIemHjWzfIYHyOU8XGOw7banXy9IJ9Of0j0prI6gmLAWtpW6mbxfk0ZR5QxqNyrR1JZ2auzfQQhfco0AH3eASObrSGOsbqaKaAcemhTzSRAY3M53Wwy4IevGdxOzALsDa91KsiBFX9R8hZy8I40VG80LiE6PGBMIrS5I1UqYQ7UeTrcFwZLe7yZEmVOtnpvU8v0qxbfJYxsQXS1mQA3SY5eDdD9l5IAcnTNQbTsWTYN5SdZjmAIRskGRl6iPo2bmihFnmXTZvRPHfLPtJGfpJ2O4ZYGLDPSiTZUK8fSGJQZO864ZHfcOJsLBVXaNfe1XEfUauCtjjqogZ2JWu43sy8Q3j5zXti3Pfd62q6ycoqjJ7SVSwTGYTCSpwZoWDbul0NGd9URRWAc4Bzi3bX9U944HF2pUUh9BWagE4rQqTUfzvLUXjoSS9IWtej4ydAnW9az
9 | 88 case2
10 | GET /case2?sleep=100 HTTP/1.1
11 | Host: localhost
12 | Accept: */*
13 | Connection: keep-alive
14 |
15 | 88 case3
16 | GET /case3?sleep=150 HTTP/1.1
17 | Host: localhost
18 | Accept: */*
19 | Connection: keep-alive
20 |
21 |
--------------------------------------------------------------------------------
/yandextank/ammo_validator/tests/test-phantom:
--------------------------------------------------------------------------------
1 | 601 case1
2 | POST /case1?sleep=10 HTTP/1.1
3 | Host: localhost
4 | Accept: */*
5 | Connection: keep-alive
6 | Content-Length: 490
7 |
8 | F5SnJeqM23XfFG6mrTFmIMF7EBf9wUm9enPDtqRoIemHjWzfIYHyOU8XGOw7banXy9IJ9Of0j0prI6gmLAWtpW6mbxfk0ZR5QxqNyrR1JZ2auzfQQhfco0AH3eASObrSGOsbqaKaAcemhTzSRAY3M53Wwy4IevGdxOzALsDa91KsiBFX9R8hZy8I40VG80LiE6PGBMIrS5I1UqYQ7UeTrcFwZLe7yZEmVOtnpvU8v0qxbfJYxsQXS1mQA3SY5eDdD9l5IAcnTNQbTsWTYN5SdZjmAIRskGRl6iPo2bmihFnmXTZvRPHfLPtJGfpJ2O4ZYGLDPSiTZUK8fSGJQZO864ZHfcOJsLBVXaNfe1XEfUauCtjjqogZ2JWu43sy8Q3j5zXti3Pfd62q6ycoqjJ7SVSwTGYTCSpwZoWDbul0NGd9URRWAc4Bzi3bX9U944HF2pUUh9BWagE4rQqTUfzvLUXjoSS9IWtej4ydAnW9az
9 | 88 case2
10 | GET /case2?sleep=100 HTTP/1.1
11 | Host: localhost
12 | Accept: */*
13 | Connection: keep-alive
14 |
15 | 88 case3
16 | GET /case3?sleep=150 HTTP/1.1
17 | Host: localhost
18 | Accept: */*
19 | Connection: keep-alive
20 |
21 |
--------------------------------------------------------------------------------
/yandextank/plugins/DataUploader/tests/test_postloader/test_empty/validated_conf.yaml:
--------------------------------------------------------------------------------
1 | core:
2 | lock_dir: .
3 | phantom:
4 | enabled: false
5 | bfg:
6 | package: yandextank.plugins.Bfg
7 | enabled: true
8 | address: http://lunapark.test.yandex-team.ru
9 | load_profile:
10 | load_type: rps
11 | schedule: const(2, 30s)
12 | instances: 1
13 | header_http: '1.1'
14 | ammofile: simple_ammo.txt
15 | ammo_type: line
16 | loop: 1000
17 | headers: |
18 | [Host: lunapark.test.yandex-team.ru]
19 | [Connection: close]
20 | gun_type: http
21 | gun_config:
22 | base_address: http://lunapark.test.yandex-team.ru
23 | telegraf:
24 | enabled: true
25 | config: monitoring.xml
26 | disguise_hostnames: false
27 | autostop:
28 | package: yandextank.plugins.Autostop
29 | enabled: false
30 | autostop:
31 | - total_time(70ms, 3%, 10s)
32 | - http(4xx,25%,10)
33 | console:
34 | short_only: true
--------------------------------------------------------------------------------
/yandextank/ammo_validator/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from unittest.mock import patch
3 |
4 | from yandextank.contrib.netort.netort.resource import (
5 | FileOpener,
6 | HttpOpener,
7 | ResourceManager,
8 | S3Opener,
9 | )
10 |
11 |
12 | @pytest.fixture
13 | def patch_file_opener():
14 | with patch.object(FileOpener, 'open') as p:
15 | yield p
16 |
17 |
18 | @pytest.fixture
19 | def patch_http_opener():
20 | with patch.object(HttpOpener, 'open'):
21 | with patch.object(HttpOpener, 'get_request_info') as p:
22 | yield p
23 |
24 |
25 | @pytest.fixture
26 | def patch_s3_opener():
27 | with patch.object(S3Opener, 'open') as p:
28 | yield p
29 |
30 |
31 | @pytest.fixture
32 | def patch_resource_manager(patch_s3_opener, patch_http_opener, patch_file_opener):
33 | with patch.object(ResourceManager, 'load_config_safe') as p:
34 | p.load_config_safe.return_value = {}
35 | yield p
36 |
--------------------------------------------------------------------------------
/yandextank/contrib/netort/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from unittest.mock import patch
3 |
4 | from yandextank.contrib.netort.netort.resource import (
5 | FileOpener,
6 | HttpOpener,
7 | ResourceManager,
8 | S3Opener,
9 | )
10 |
11 |
12 | @pytest.fixture
13 | def patch_file_opener():
14 | with patch.object(FileOpener, 'open') as p:
15 | yield p
16 |
17 |
18 | @pytest.fixture
19 | def patch_http_opener():
20 | with patch.object(HttpOpener, 'open'):
21 | with patch.object(HttpOpener, 'get_request_info') as p:
22 | yield p
23 |
24 |
25 | @pytest.fixture
26 | def patch_s3_opener():
27 | with patch.object(S3Opener, 'open') as p:
28 | yield p
29 |
30 |
31 | @pytest.fixture
32 | def patch_resource_manager(patch_s3_opener, patch_http_opener, patch_file_opener):
33 | with patch.object(ResourceManager, 'load_config_safe') as p:
34 | p.load_config_safe.return_value = {}
35 | yield p
36 |
--------------------------------------------------------------------------------
/yandextank/plugins/Pandora/config/schema.yaml:
--------------------------------------------------------------------------------
1 | affinity:
2 | description: Use to set CPU affinity
3 | type: string
4 | nullable: true
5 | default: ''
6 | pandora_cmd:
7 | type: string
8 | default: pandora
9 | description: Pandora executable path or link to it
10 | buffered_seconds:
11 | type: integer
12 | default: 2
13 | config_content:
14 | type: dict
15 | default: {}
16 | description: Pandora config contents
17 | config_file:
18 | type: string
19 | default: ''
20 | description: Pandora config file path
21 | expvar:
22 | type: boolean
23 | default: false
24 | resources:
25 | default: []
26 | type: list
27 | description: additional resources you need to download before test
28 | resource:
29 | type: dict
30 | description: dict with attributes for additional resources
31 | report_file:
32 | type: string
33 | nullable: true
34 | default: null
35 | description: Pandora phout path (normally will be taken from pandora config)
36 |
--------------------------------------------------------------------------------
/yandextank/plugins/Phantom/config/phantom_benchmark_additional.tpl:
--------------------------------------------------------------------------------
1 | io_t benchmark_io$sequence_no = io_benchmark_t {
2 | method_t stream_method = $method_stream {
3 | loggers = {
4 | brief_logger
5 | $comment_answ benchmark_logger
6 | }
7 |
8 | ${source_log_prefix}source_t source_log = ${source_log_prefix}source_log_t {
9 | filename = "$stpd"
10 | }
11 |
12 | $ssl_transport
13 |
14 | $comment_proto proto_t http_proto$sequence_no = proto_http_t {
15 | $reply_limits
16 | $comment_proto }
17 |
18 | $comment_proto proto_t none_proto = proto_none_t { }
19 |
20 | $proto
21 |
22 | $method_options
23 |
24 | address = $ip
25 | port = $port
26 | $bind
27 | timeout = $timeout
28 | source = source_log
29 | }
30 | method = stream_method
31 |
32 | times = simple_times
33 |
34 | instances = $instances
35 | #human_readable_report = false
36 | scheduler = main_scheduler
37 | }
38 |
--------------------------------------------------------------------------------
/yandextank/plugins/DataUploader/tests/test_postloader/test_disabled/validated_conf.yaml:
--------------------------------------------------------------------------------
1 | core:
2 | lock_dir: .
3 | phantom:
4 | enabled: false
5 | bfg:
6 | package: yandextank.plugins.Bfg
7 | enabled: true
8 | address: http://lunapark.test.yandex-team.ru
9 | load_profile:
10 | load_type: rps
11 | schedule: const(2, 30s)
12 | instances: 1
13 | header_http: '1.1'
14 | ammofile: simple_ammo.txt
15 | ammo_type: line
16 | loop: 1000
17 | headers: |
18 | [Host: lunapark.test.yandex-team.ru]
19 | [Connection: close]
20 | gun_type: http
21 | gun_config:
22 | base_address: http://lunapark.test.yandex-team.ru
23 | uploader:
24 | package: yandextank.plugins.DataUploader
25 | enabled: false
26 | telegraf:
27 | enabled: true
28 | config: monitoring.xml
29 | disguise_hostnames: false
30 | autostop:
31 | package: yandextank.plugins.Autostop
32 | enabled: false
33 | autostop:
34 | - total_time(70ms, 3%, 10s)
35 | - http(4xx,25%,10)
36 | console:
37 | short_only: true
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/clients/tests/test_luna.py:
--------------------------------------------------------------------------------
1 | from yandextank.contrib.netort.netort.data_manager.clients import LunaClient
2 | from yandextank.contrib.netort.netort.data_manager.common.interfaces import TypeEvents
3 | import pandas as pd
4 | import pytest
5 |
6 |
7 | class TestLunaClient(object):
8 |
9 | def setup_method(self):
10 | self.luna_client = LunaClient(meta={'api_address': 'localhost'}, job=None)
11 | self.df1 = pd.read_csv('netort/data_manager/tests/df1MetricData.csv')
12 | self.df2 = pd.read_csv('netort/data_manager/tests/df2MetricData.csv')
13 | self.events = TypeEvents()
14 |
15 | @pytest.mark.xfail
16 | def test_two(self):
17 | self.luna_client.pending_queue.put([self.events, self.df1])
18 | assert 5 == 5
19 |
20 | def teardown(self):
21 | # self.luna_client.register_worker.stop()
22 | # self.luna_client.register_worker.join()
23 | # self.luna_client.worker.stop()
24 | # self.luna_client.worker.join()
25 | pass
26 |
--------------------------------------------------------------------------------
/yandextank/core/tests/test_multi_cfg.yaml:
--------------------------------------------------------------------------------
1 | version: 1.8.36
2 | core:
3 | operator: fomars
4 | lock_dir: "./"
5 | telegraf:
6 | package: yandextank.plugins.Telegraf
7 | enabled: True
8 | config: test_monitoring.xml
9 | disguise_hostnames: True
10 | phantom:
11 | package: yandextank.plugins.Phantom
12 | enabled: True
13 | address: lunapark.test.yandex-team.ru
14 | phantom_path: "./phantom_mock.sh"
15 | header_http: "1.1"
16 | uris: "/"
17 | loop: 100
18 | load_profile: {load_type: rps, schedule: "line(1, 10, 1m)"}
19 | connection_test: false
20 | multi:
21 | - address: localhost
22 | load_profile: {load_type: instances, schedule: "const(10, 1m)"}
23 | connection_test: false
24 | lunapark:
25 | package: yandextank.plugins.DataUploader
26 | enabled: True
27 | api_address: "https://lunapark.test.yandex-team.ru/"
28 | copy_config_to: test_config_copy.yaml
29 | ignore_target_lock: true
30 | task: LOAD-204
31 | aggregator:
32 | package: yandextank.plugins.Aggregator
33 | enabled: True
34 | verbose_histogram: True
35 |
--------------------------------------------------------------------------------
/yandextank/ammo_validator/tests/test-json-http:
--------------------------------------------------------------------------------
1 | {"host": "example.com", "method": "GET", "uri": "/api/url1", "tag": "url1", "headers": {"User-agent": "Tank", "Connection": "close"}}
2 | {"host": "example.com", "method": "POST", "uri": "/api/url2", "tag": "url2", "headers": {"User-agent": "Tank", "Connection": "close"}, "body": "body_data"}
3 | {"host": "example.com", "method": "POST", "uri": "/api/url2", "tag": "url2", "headers": {"User-agent": "Tank", "Connection": "close"}, "body": "{\"data\": \"some_data\"}"}
4 |
5 | {"tag": "bad requests", "host": "example.com", "method": "GET", "uri": "/api/url1" }
6 | {"tag": "bad requests", "host": "example.com", "method": "POST" "body": "body_data"}
7 | {"tag": "bad requests", "host": "example.com", "uri": "/api/url2", "body": "body_data"}
8 | {"tag": "bad requests", "method": "POST", "uri": "/api/url2", "body": "body_data"}
9 | {"tag": "bad requests", "host": "example.com", "method": "GET", "uri": "/api/url1", "body": "body_data" , }
10 | {}
11 |
--------------------------------------------------------------------------------
/yandextank/api/config/00-base.yaml:
--------------------------------------------------------------------------------
1 | aggregator:
2 | enabled: true
3 | package: yandextank.plugins.Aggregator
4 | autostop:
5 | enabled: true
6 | package: yandextank.plugins.Autostop
7 | bfg:
8 | enabled: false
9 | package: yandextank.plugins.Bfg
10 | console:
11 | enabled: true
12 | package: yandextank.plugins.Console
13 | short_only: true
14 | jmeter:
15 | enabled: false
16 | package: JMeter
17 | jsonreport:
18 | enabled: true
19 | package: yandextank.plugins.JsonReport
20 | offlinereport:
21 | enabled: true
22 | package: yandextank.plugins.OfflineReport
23 | phantom:
24 | enabled: true
25 | package: yandextank.plugins.Phantom
26 | rcheck:
27 | enabled: true
28 | package: yandextank.plugins.ResourceCheck
29 | shellexec:
30 | enabled: true
31 | package: yandextank.plugins.ShellExec
32 | telegraf:
33 | enabled: true
34 | package: yandextank.plugins.Telegraf
35 | tips:
36 | enabled: true
37 | package: yandextank.plugins.TipsAndTricks
38 | rcassert:
39 | enabled: true
40 | package: yandextank.plugins.RCAssert
41 | overload:
42 | enabled: false
43 | package: yandextank.plugins.DataUploader
44 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | Welcome to Yandex.Tank's documentation!
2 | =======================================
3 |
4 | :Author: `Alexey Lavrenuke `_
5 | :Version: |release|
6 | :Date: |today|
7 | :Homepage: `Yandex.Tank Homepage on Github `_
8 | :Download: `Launchpad PPA `_ `Pypi `_
9 | :Documentation: `PDF Documentation `_
10 | :License: `GNU LGPLv3 `_
11 | :Issue tracker: `GitHub Issues `_
12 |
13 | Contents:
14 |
15 | .. toctree::
16 | :maxdepth: 3
17 |
18 | intro
19 | install
20 | generator_tuning
21 | tutorial
22 | configuration
23 | core_and_modules
24 | ammo_generators
25 | config_reference
26 |
27 |
28 | Indices and tables
29 | ==================
30 |
31 | * :ref:`genindex`
32 | * :ref:`modindex`
33 | * :ref:`search`
34 |
35 | .. image::
36 | http://mc.yandex.ru/watch/23073253
37 | :align: right
38 |
--------------------------------------------------------------------------------
/yandextank/plugins/YCMonitoring/config/schema.yaml:
--------------------------------------------------------------------------------
1 | panels:
2 | required: true
3 | type: dict
4 | valueschema:
5 | type: dict
6 | schema:
7 | group_name:
8 | type: string
9 | description: Optional grouping name for backend to group panels on same page
10 | queries:
11 | required: true
12 | type: list
13 | schema:
14 | type: string
15 | folder_id:
16 | type: string
17 | api_host:
18 | type: string
19 | default: monitoring.api.cloud.yandex.net:443
20 | token:
21 | type: string
22 | default: LOADTESTING_YC_TOKEN
23 | description: path to file with Monitoring API token, or LOADTESTING_YC_TOKEN to retrieve token from env variable
24 | timeout:
25 | type: string
26 | default: 5s
27 | request_timeout:
28 | type: string
29 | default: 10s
30 | poll_interval:
31 | type: string
32 | default: 60s
33 | ignore_labels:
34 | type: ['string', 'list']
35 | default: ['service', 'resource_type', 'device', 'interface_number', 'source_metric', 'subcluster_name', 'shard', 'dc']
36 | priority_labels:
37 | type: ['string', 'list']
38 | default: ['cpu_name', 'label']
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # Read the Docs configuration file for Sphinx projects
2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
3 |
4 | # Required
5 | version: 2
6 |
7 | # Set the OS, Python version and other tools you might need
8 | build:
9 | os: ubuntu-22.04
10 | tools:
11 | python: "3.12"
12 | # You can also specify other tool versions:
13 | # nodejs: "20"
14 | # rust: "1.70"
15 | # golang: "1.20"
16 |
17 | # Build documentation in the "docs/" directory with Sphinx
18 | sphinx:
19 | configuration: docs/conf.py
20 | # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs
21 | # builder: "dirhtml"
22 | # Fail on all warnings to avoid broken references
23 | # fail_on_warning: true
24 |
25 | # Optionally build your docs in additional formats such as PDF and ePub
26 | # formats:
27 | # - pdf
28 | # - epub
29 |
30 | # Optional but recommended, declare the Python requirements required
31 | # to build your documentation
32 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
33 | python:
34 | install:
35 | - requirements: docs/requirements.txt
36 |
--------------------------------------------------------------------------------
/yandextank/plugins/Console/config/schema.yaml:
--------------------------------------------------------------------------------
1 | info_panel_width:
2 | type: integer
3 | default: 33
4 | description: width of right panel
5 | short_only:
6 | type: boolean
7 | default: false
8 | description: do not draw full console screen, write short info for each second
9 | disable_all_colors:
10 | type: boolean
11 | default: false
12 | description: disable colors in full output
13 | disable_colors:
14 | type: string
15 | default: ''
16 | sizes_max_spark:
17 | type: integer
18 | default: 120
19 | description: max length of sparkline for request/response sizes, 0 to disable
20 | times_max_spark:
21 | type: integer
22 | default: 120
23 | description: max length of sparkline for fractions of request time, 0 to disable
24 | cases_max_spark:
25 | type: integer
26 | default: 120
27 | description: length of sparkline for each case, 0 to disable
28 | cases_sort_by:
29 | type: string
30 | default: 'count'
31 | description: field for cases data sort
32 | allowed:
33 | - 'count'
34 | - 'net_err'
35 | - 'http_err'
36 | max_case_len:
37 | type: integer
38 | default: 32
39 | description: max lenght of case name, longer names will be cut in console output
40 |
--------------------------------------------------------------------------------
/yandextank/plugins/Telegraf/config/schema.yaml:
--------------------------------------------------------------------------------
1 | config:
2 | type: ['string', 'dict']
3 | default: auto
4 | description: config or path to monitoring config file.
5 | values_description:
6 | auto: collect default metrics from default_target host
7 | : path to telegraf configuration file - yaml or xml
8 | none: disable monitoring
9 | config_contents:
10 | type: string
11 | description: used to repeat tests from Overload, not for manual editing
12 | default_target:
13 | type: string
14 | default: localhost
15 | description: 'host to collect default metrics from (if "config: auto" specified)'
16 | disguise_hostnames:
17 | type: boolean
18 | default: true
19 | description: Disguise real host names - use this if you upload results to Overload and dont want others to see your hostnames
20 | ssh_timeout:
21 | type: string
22 | default: 5s
23 | description: timeout of ssh connection to target(s)
24 | examples:
25 | 10s: 10 seconds
26 | 2m: 2 minutes
27 | ssh_key_path:
28 | type: string
29 | description: path to ssh key file or folder with keys
30 | kill_old:
31 | type: boolean
32 | default: false
33 | description: kill old hanging agents on target(s)
--------------------------------------------------------------------------------
/mocks/shootexec-shooter.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import sys
4 | import time
5 | import random
6 |
7 |
8 | def main():
9 | stdout = open(sys.argv[1], "w")
10 | stderr = open(sys.argv[2], "w")
11 | waitfor = time.time() + 60 * 2
12 | fake_rps = 1
13 |
14 | while time.time() < waitfor:
15 | # shooting results
16 | output = [
17 | time.time(),
18 | random.choice(["tag1", "tag2", "tag3"]),
19 | int(500 * random.random()),
20 | 10,
21 | 10,
22 | int(400 * random.random()),
23 | 10,
24 | 0,
25 | int(1024 * random.random()),
26 | int(1024 * random.random()),
27 | 0,
28 | random.choice([200, 404, 503]),
29 | ]
30 | stdout.write("\t".join([str(x) for x in output]) + "\n")
31 | stdout.flush()
32 |
33 | # shooter stats
34 | stats = [time.time(), fake_rps, 1]
35 | stderr.write("\t".join([str(x) for x in stats]) + "\n")
36 | stderr.flush()
37 | fake_rps += 100
38 | time.sleep(0.3)
39 |
40 | sys.exit(0)
41 |
42 |
43 | if __name__ == '__main__':
44 | main()
45 |
--------------------------------------------------------------------------------
/yandextank/plugins/Phantom/config/phantom.conf.tpl:
--------------------------------------------------------------------------------
1 | setup_t module_setup = setup_module_t {
2 | dir = "$phantom_modules_path"
3 | list = {
4 | io_monitor
5 | io_benchmark
6 | io_benchmark_method_stream
7 | io_benchmark_method_stream_ipv4
8 | io_benchmark_method_stream_ipv6
9 | io_benchmark_method_stream_source_log
10 | io_benchmark_method_stream_proto_none
11 | io_benchmark_method_stream_proto_http
12 |
13 | $additional_libs
14 | }
15 | }
16 |
17 | scheduler_t main_scheduler = scheduler_simple_t {
18 | threads = $threads
19 | event_buf_size = 20
20 | timeout_prec = 1
21 | }
22 |
23 | logger_t phantom_logger = logger_file_t {
24 | filename = "$phantom_log"
25 | level = info
26 | scheduler = main_scheduler
27 | }
28 |
29 | logger = phantom_logger
30 |
31 | $benchmarks_block
32 |
33 | setup_t stat_setup = setup_stat_t {
34 | list = { default }
35 | }
36 |
37 |
38 | io_t monitor_io = io_monitor_t {
39 | list = { main_scheduler benchmark_io $stat_benchmarks }
40 | stat_id = default
41 |
42 | period = 1s
43 | clear = true
44 |
45 | scheduler = main_scheduler
46 | filename = "$stat_log"
47 | }
48 |
--------------------------------------------------------------------------------
/yandextank/plugins/YCMonitoring/tests/RATE.json:
--------------------------------------------------------------------------------
1 | {
2 | "metrics": [
3 | {
4 | "name": "disk.read_latency",
5 | "labels": {
6 | "disk": "dqtcr5464dbkv1s1rtrt",
7 | "instance": "instance_example",
8 | "bin": "2",
9 | "service": "compute"
10 | },
11 | "type": "RATE",
12 | "timeseries": {
13 | "timestamps": [
14 | 1714226079000
15 | ],
16 | "doubleValues": [
17 | 0
18 | ]
19 | }
20 | },
21 | {
22 | "name": "disk.read_latency",
23 | "labels": {
24 | "disk": "dqtcr5464dbkv1s1rtrt",
25 | "instance": "instance_example",
26 | "bin": "5",
27 | "service": "compute"
28 | },
29 | "type": "RATE",
30 | "timeseries": {
31 | "timestamps": [
32 | 1714226079000
33 | ],
34 | "doubleValues": [
35 | 0
36 | ]
37 | }
38 | }
39 | ]
40 | }
--------------------------------------------------------------------------------
/yandextank/ammo_validator/tests/test-pandora-inline.yaml:
--------------------------------------------------------------------------------
1 | console: {enabled: false, package: yandextank.plugins.Console}
2 | telegraf: {enabled: false, package: yandextank.plugins.Telegraf}
3 | phantom: {enabled: false, package: yandextank.plugins.Phantom}
4 | pandora:
5 | package: yandextank.plugins.Pandora
6 | enabled: true
7 | config_content:
8 | pools:
9 | - id: HTTP pool
10 | gun:
11 | type: http
12 | target: localhost:443
13 | ssl: true
14 | ammo:
15 | headers:
16 | - '[Host: localhost]'
17 | - '[Connection: close]'
18 | type: uri
19 | uris:
20 | - /test
21 | result:
22 | type: phout
23 | destination: phout.log
24 | rps:
25 | - {duration: 2s, type: step, from: 1, to: 1000, step: 2}
26 | startup:
27 | type: once
28 | times: 100
29 | log:
30 | level: error
31 | monitoring:
32 | expvar:
33 | enabled: true
34 | port: 1234
35 | autostop:
36 | enabled: true
37 | package: yandextank.plugins.Autostop
38 | autostop:
39 | - instances(50%,10s)
40 | rcassert:
41 | enabled: true
42 | package: yandextank.plugins.RCAssert
43 | pass: '24'
44 |
--------------------------------------------------------------------------------
/yandextank/plugins/YCMonitoring/tests/IGAUGE.json:
--------------------------------------------------------------------------------
1 | {
2 | "metrics": [
3 | {
4 | "name": "instances_count",
5 | "labels": {
6 | "resource_type": "instance_group",
7 | "resource_id": "test1-instance-group",
8 | "service": "compute"
9 | },
10 | "type": "IGAUGE",
11 | "timeseries": {
12 | "timestamps": [
13 | 1714227414000,
14 | 1714227435000
15 | ],
16 | "int64Values": [
17 | 1,
18 | 1
19 | ]
20 | }
21 | },
22 | {
23 | "name": "instances_count",
24 | "labels": {
25 | "resource_type": "instance_group",
26 | "resource_id": "test2-instance-group",
27 | "service": "compute"
28 | },
29 | "type": "IGAUGE",
30 | "timeseries": {
31 | "timestamps": [
32 | 1714227414000,
33 | 1714227435000
34 | ],
35 | "int64Values": [
36 | 1,
37 | 1
38 | ]
39 | }
40 | }
41 | ]
42 | }
--------------------------------------------------------------------------------
/yandextank/contrib/netort/tests/test_openers.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from yandextank.contrib.netort.netort.resource import (
4 | FileOpener,
5 | HttpOpener,
6 | open_file,
7 | ResourceManager,
8 | ResourceManagerConfig,
9 | S3Opener,
10 | )
11 |
12 |
13 | @pytest.mark.parametrize(
14 | 'filename, expected_opener',
15 | [
16 | ('/home/user/ammo.file', FileOpener),
17 | ('https://some-proxy-for-ammo/12345678', HttpOpener),
18 | ('s3://test-data/request.ammo', S3Opener),
19 | ],
20 | )
21 | def test_get_correct_opener(filename, expected_opener, patch_resource_manager):
22 | rm = ResourceManager(ResourceManagerConfig())
23 | opener = rm.get_opener(filename)
24 | assert isinstance(opener, expected_opener)
25 |
26 |
27 | @pytest.mark.parametrize(
28 | 'filename, opener_args',
29 | [
30 | ('/home/user/ammo.file', []),
31 | ('https://some-proxy-for-ammo/12345678', [True]),
32 | ('s3://test-data/request.ammo', [True]),
33 | ],
34 | )
35 | def test_open_file_with_opener(filename, opener_args, patch_resource_manager):
36 | rm = ResourceManager(ResourceManagerConfig())
37 | opener = rm.get_opener(filename)
38 | with open_file(opener, use_cache=True):
39 | pass
40 | opener.open.assert_called_once_with(*opener_args)
41 |
--------------------------------------------------------------------------------
/yandextank/plugins/Telegraf/tests/telegraf_mon.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 | curl -s 'http://localhost:6100/stat' | python3 -c 'import sys, json; j = json.load(sys.stdin); print("\n".join(rerp(c["values"]["accept"]) for c in j["charts"] if c["name"] == "localqueue_wait_time"))'
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/yandextank/plugins/DataUploader/tests/test_postloader/test_full/validated_conf.yaml:
--------------------------------------------------------------------------------
1 | core:
2 | lock_dir: .
3 | phantom:
4 | enabled: false
5 | bfg:
6 | package: yandextank.plugins.Bfg
7 | enabled: true
8 | address: http://lunapark.test.yandex-team.ru
9 | load_profile:
10 | load_type: rps
11 | schedule: const(2, 30s)
12 | instances: 1
13 | header_http: '1.1'
14 | ammofile: simple_ammo.txt
15 | ammo_type: line
16 | loop: 1000
17 | headers: |
18 | [Host: lunapark.test.yandex-team.ru]
19 | [Connection: close]
20 | gun_type: http
21 | gun_config:
22 | base_address: http://lunapark.test.yandex-team.ru
23 | uploader:
24 | package: yandextank.plugins.DataUploader
25 | enabled: true
26 | api_address: https://lunapark.yandex-team.ru/
27 | api_attempts: 2
28 | api_timeout: 5
29 | job_name: Hello kitty
30 | job_dsc: hell of a kitty
31 | lock_targets: foo.bar
32 | # log_other_requests: true
33 | maintenance_timeout: 5
34 | network_attempts: 2
35 | operator: fomars
36 | task: LOAD-204
37 | jobno_file: jobno.txt
38 | telegraf:
39 | enabled: true
40 | config: monitoring.xml
41 | disguise_hostnames: false
42 | autostop:
43 | package: yandextank.plugins.Autostop
44 | enabled: false
45 | autostop:
46 | - total_time(70ms, 3%, 10s)
47 | - http(4xx,25%,10)
48 | console:
49 | short_only: true
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/common/tests/test_util.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 | import yaml
4 | from yandextank.contrib.netort.netort.data_manager.common.util import expandvars, YamlEnvSubstConfigLoader
5 | from unittest.mock import patch
6 |
7 |
8 | @pytest.mark.parametrize(
9 | 'in_, default, expected',
10 | [
11 | ('STR_${ENV}', None, 'STR_${ENV}'),
12 | ('STR_${ENV}', '', 'STR_'),
13 | ('STR_${ENV3}_${ENV2}', 'aa', 'STR_aa_env2_value'),
14 | ('STR_${SOME_OTHER}_$ENV2', '', 'STR_some other value_$ENV2'),
15 | ],
16 | )
17 | def test_expandvars(in_, default, expected):
18 | with patch.dict(os.environ, ENV1='env1_value', ENV2='env2_value', SOME_OTHER='some other value'):
19 | assert expandvars(in_, default) == expected
20 |
21 |
22 | def test_YamlEnvSubstConfigLoader():
23 | config = '''
24 | key:
25 | subkey: ${ENV1}
26 | subkey2: token ${ENV2}
27 | subkey3: not replace
28 | key_reuse: token ${ENV1}
29 | '''
30 | with patch.dict(os.environ, ENV1='env1_value', ENV2='env2_value'):
31 | d: dict = yaml.load(config, Loader=YamlEnvSubstConfigLoader)
32 | assert d['key']['subkey'] == 'env1_value'
33 | assert d['key']['subkey2'] == 'token env2_value'
34 | assert d['key']['subkey3'] == 'not replace'
35 | assert d['key_reuse'] == 'token env1_value'
36 |
--------------------------------------------------------------------------------
/yandextank/plugins/Bfg/example/scenario_gun.py:
--------------------------------------------------------------------------------
1 | def __init__():
2 | """
3 | This is a module initialization function.
4 | """
5 |
6 |
7 | """
8 | Yandex.Tank will call these scenarios
9 | passing 3 parameters to them:
10 |
11 | missile: missile from ammo file
12 | marker: marker from ammo file
13 | measure: measuring context
14 | """
15 |
16 |
17 | def scenario_1(missile, marker, measure):
18 | with measure("scenario_1_step_1") as m:
19 | # make step 1 and set result codes
20 | m["proto_code"] = 200
21 | m["net_code"] = 0
22 | with measure("scenario_1_step_2") as m:
23 | # make step 2 and set result codes
24 | m["proto_code"] = 200
25 | m["net_code"] = 0
26 |
27 |
28 | def scenario_2(missile, marker, measure):
29 | with measure("scenario_2_step_1") as m:
30 | # make step 1 and set result codes
31 | m["proto_code"] = 200
32 | m["net_code"] = 0
33 | with measure("scenario_2_step_2") as m:
34 | # make step 2 and set result codes
35 | m["proto_code"] = 200
36 | m["net_code"] = 0
37 |
38 |
39 | """
40 | SCENARIOS module variable is used by Tank to choose the scenario to
41 | shoot with. For each missile Tank will look up missile marker in this dict.
42 | """
43 | SCENARIOS = {
44 | "scenario_1": scenario_1,
45 | "scenario_2": scenario_1,
46 | }
47 |
--------------------------------------------------------------------------------
/yandextank/plugins/Phantom/log_analyzer.py:
--------------------------------------------------------------------------------
1 | import re
2 | from collections import defaultdict
3 | import logging
4 |
5 | LOGGER = logging.getLogger(__file__)
6 |
7 |
8 | class LogFormatError(Exception): ...
9 |
10 |
11 | LINE_FORMAT = r'(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d.\d\d\d [+-]\d\d\d\d) \[([^\]]+)\] \[([^\]]*)\] (.*)'
12 |
13 |
14 | class LogLine:
15 | def __init__(self, line: str):
16 | mach = re.match(LINE_FORMAT, line)
17 | if mach is None:
18 | raise LogFormatError(f"Phantom log line doesn't match format: {LINE_FORMAT}")
19 | self.time_stamp, self.level, self.name, message = mach.groups()
20 | self.message = message.strip()
21 |
22 |
23 | class LogAnalyzer:
24 | def __init__(self, path):
25 | self.path = path
26 |
27 | def get_most_recent_errors(self, limit=10):
28 | counter = defaultdict(int)
29 | with open(self.path) as f:
30 | for line in f:
31 | try:
32 | parsed = LogLine(line)
33 | except LogFormatError:
34 | LOGGER.warning('line %s does not recognized as Phantom log')
35 | continue
36 | if parsed.level not in ['error', 'fatal']:
37 | continue
38 | counter[parsed.message] += 1
39 | return [err for (err, count) in sorted(counter.items(), key=lambda item: -item[1])[:limit]]
40 |
--------------------------------------------------------------------------------
/yandextank/plugins/ShootExec/tests/test_reader.py:
--------------------------------------------------------------------------------
1 | from threading import Event
2 | import os
3 |
4 | import pandas as pd
5 | from yandextank.common.util import get_test_path
6 | from yandextank.common.util import FileMultiReader
7 | from yandextank.plugins.ShootExec.plugin import _ShootExecReader, PhantomReader, DataPoller
8 |
9 |
10 | class TestPhantomReader(object):
11 | def setup_class(self):
12 | stop = Event()
13 | self.multireader = FileMultiReader(
14 | os.path.join(get_test_path(), 'yandextank/plugins/ShootExec/tests/phout.dat'), stop
15 | )
16 | stop.set()
17 |
18 | def teardown_class(self):
19 | self.multireader.close()
20 |
21 | def test_read_all(self):
22 | phantom_reader = PhantomReader(self.multireader.get_file(), cache_size=1024)
23 | reader = _ShootExecReader(phantom_reader, DataPoller(poll_period=0.1, max_wait=2))
24 | df = pd.DataFrame()
25 | sdf = pd.DataFrame()
26 | for chunk in reader:
27 | df = pd.concat([df, pd.DataFrame.from_records(chunk)])
28 | for stat_items in reader.stats_reader:
29 | sdf = pd.concat([sdf, pd.DataFrame.from_records(stat_items)])
30 | assert len(df) == 200
31 | assert df['interval_real'].mean() == 11000714.0
32 | assert len(sdf['ts'].unique()) == 12
33 | assert sdf['ts'].min() == 1482159938
34 | assert sdf['ts'].max() == 1482159949
35 |
--------------------------------------------------------------------------------
/yandextank/plugins/Phantom/config/phantom_benchmark_main.tpl:
--------------------------------------------------------------------------------
1 | io_t benchmark_io = io_benchmark_t {
2 | method_t stream_method = $method_stream {
3 | logger_t benchmark_logger = logger_default_t {
4 | filename = "$answ_log"
5 | $comment_answ level = $answ_log_level
6 | scheduler = main_scheduler
7 | }
8 |
9 | logger_t brief_logger = logger_brief_t {
10 | filename = "$phout"
11 | time_format = unix
12 | scheduler = main_scheduler
13 | }
14 |
15 | loggers = {
16 | brief_logger
17 | $comment_answ benchmark_logger
18 | }
19 |
20 | ${source_log_prefix}source_t source_log = ${source_log_prefix}source_log_t {
21 | filename = "$stpd"
22 | }
23 |
24 | $ssl_transport
25 |
26 | $comment_proto proto_t http_proto0 = proto_http_t {
27 | $reply_limits
28 | $comment_proto }
29 |
30 | $comment_proto proto_t none_proto = proto_none_t { }
31 |
32 | $proto
33 |
34 | $method_options
35 |
36 | address = $ip
37 | port = $port
38 | $bind
39 | timeout = $timeout
40 | source = source_log
41 | }
42 | method = stream_method
43 |
44 | times_t simple_times = times_simple_t {
45 | max = $timeout
46 | min = 1
47 | steps = 20
48 | }
49 | times = simple_times
50 |
51 | instances = $instances
52 | #human_readable_report = false
53 | scheduler = main_scheduler
54 | }
55 |
--------------------------------------------------------------------------------
/yandextank/plugins/Telegraf/tests/telegraf_mon.yaml:
--------------------------------------------------------------------------------
1 | hosts:
2 | somehost.yandex.tld:
3 | interval: 1
4 | username: netort
5 |
6 | metrics:
7 | cpu:
8 | fielddrop: '["time_*", "usage_guest_nice"]'
9 | kernel:
10 | fielddrop: '["active", "inactive", "total", "used_per*", "avail*"]'
11 | net:
12 | fielddrop: '["icmp*", "ip*", "udplite*", "tcp*", "udp*", "drop*", "err*"]'
13 | interfaces: '["eth0","eth1","lo"]'
14 | system:
15 | fielddrop: '["n_users", "n_cpus", "uptime*"]'
16 | memory:
17 | fielddrop:
18 | - active
19 | - inactive
20 | - total
21 | - used_per*
22 | - avail*
23 | disk:
24 | devices: ["vda1","sda1","sda2","sda3","ahalai-mahalai"]
25 | netstat:
26 | custom:
27 | diff: 1
28 | measure: call
29 | label: test
30 | cmd: curl -s 'http://localhost:6100/stat' | python3 -c 'import sys, json; j = json.load(sys.stdin); print("\n".join(rerp(c["values"]["accept"]) for c in j["charts"] if c["name"] == "localqueue_wait_time"))'
31 |
32 | localhost:
33 | telegraf: "/usr/bin/telegraf"
34 | metrics:
35 | cpu:
36 | percpu: True
37 | net:
38 | fielddrop: '["icmp*", "ip*", "udplite*", "tcp*", "udp*", "drop*", "err*"]'
39 | interfaces: '["eth0","eth1","docker0","lo"]'
40 | netresponse:
41 | address: ya.ru:80
42 | protocol: tcp
43 | timeout: 1s
--------------------------------------------------------------------------------
/yandextank/plugins/Telegraf/tests/telegraf_global_inputs.yaml:
--------------------------------------------------------------------------------
1 | hosts:
2 | somehost.yandex.tld:
3 | interval: 1
4 | username: netort
5 | metrics:
6 | # check overriding with local metrics
7 | memory:
8 | fielddrop:
9 | - active
10 | - inactive
11 | - total
12 | - used_per*
13 | - avail*
14 | localhost:
15 | telegraf: "/usr/bin/telegraf"
16 | metrics:
17 | cpu:
18 | percpu: True
19 | net:
20 | fielddrop: '["icmp*", "ip*", "udplite*", "tcp*", "udp*", "drop*", "err*"]'
21 | interfaces: '["eth0","eth1","docker0","lo"]'
22 | netresponse:
23 | address: ya.ru:80
24 | protocol: tcp
25 | timeout: 1s
26 | metrics:
27 | cpu:
28 | fielddrop: '["time_*", "usage_guest_nice"]'
29 | kernel:
30 | fielddrop: '["active", "inactive", "total", "used_per*", "avail*"]'
31 | net:
32 | fielddrop: '["icmp*", "ip*", "udplite*", "tcp*", "udp*", "drop*", "err*"]'
33 | interfaces: '["eth0","eth1","lo"]'
34 | system:
35 | fielddrop: '["n_users", "n_cpus", "uptime*"]'
36 | memory:
37 | fielddrop:
38 | - total
39 | disk:
40 | devices: ["vda1","sda1","sda2","sda3","ahalai-mahalai"]
41 | netstat:
42 | custom:
43 | diff: 1
44 | measure: call
45 | label: test
46 | cmd: curl -s 'http://localhost:6100/stat' | python3 -c 'import sys, json; j = json.load(sys.stdin); print("\n".join(rerp(c["values"]["accept"]) for c in j["charts"] if c["name"] == "localqueue_wait_time"))'
47 |
--------------------------------------------------------------------------------
/yandextank/core/config/00-base.yaml:
--------------------------------------------------------------------------------
1 | # WARNING: order of plugins is matter
2 | rcheck:
3 | enabled: true
4 | package: yandextank.plugins.ResourceCheck
5 | bfg:
6 | enabled: false
7 | package: yandextank.plugins.Bfg
8 | console:
9 | enabled: true
10 | package: yandextank.plugins.Console
11 | influx:
12 | enabled: false
13 | package: yandextank.plugins.InfluxUploader
14 | jmeter:
15 | enabled: false
16 | package: yandextank.plugins.JMeter
17 | json_report:
18 | enabled: true
19 | package: yandextank.plugins.JsonReport
20 | offline_report:
21 | enabled: false
22 | package: yandextank.plugins.OfflineReport
23 | opentsdb:
24 | enabled: false
25 | package: yandextank.plugins.OpenTSDBUploader
26 | overload:
27 | enabled: false
28 | package: yandextank.plugins.DataUploader
29 | gatling:
30 | enabled: false
31 | package: yandextank.plugins.Gatling
32 | pandora:
33 | enabled: false
34 | package: yandextank.plugins.Pandora
35 | phantom:
36 | enabled: false
37 | package: yandextank.plugins.Phantom
38 | platform:
39 | enabled: false
40 | package: yandextank.plugins.Platform
41 | shellexec:
42 | enabled: true
43 | package: yandextank.plugins.ShellExec
44 | shootexec:
45 | enabled: false
46 | package: yandextank.plugins.ShootExec
47 | telegraf:
48 | enabled: true
49 | package: yandextank.plugins.Telegraf
50 | autostop:
51 | enabled: true
52 | package: yandextank.plugins.Autostop
53 | rcassert:
54 | enabled: true
55 | package: yandextank.plugins.RCAssert
56 | uploader:
57 | enabled: false
58 | package: yandextank.plugins.DataUploader
59 |
--------------------------------------------------------------------------------
/yandextank/common/tests/test_interfaces.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import re
3 | from yandextank.common.interfaces import TankInfo, AbstractCriterion
4 |
5 |
6 | class TestStatus(object):
7 |
8 | @pytest.mark.parametrize(
9 | 'updates, result',
10 | [
11 | ([(['plugin', 'key1'], 'foo'), (['plugin', 'key2'], 42)], {'plugin': {'key1': 'foo', 'key2': 42}}),
12 | (
13 | [(['plugin1', 'key1'], 'foo'), (['plugin1', 'key2'], 42), (['plugin2', 'key1'], 'bar')],
14 | {'plugin1': {'key1': 'foo', 'key2': 42}, 'plugin2': {'key1': 'bar'}},
15 | ),
16 | ],
17 | )
18 | def test_update(self, updates, result):
19 | info = TankInfo(dict())
20 | for args in updates:
21 | info.update(*args)
22 | assert info.get_info_dict() == result
23 |
24 |
25 | @pytest.mark.parametrize(
26 | 'codes_mask, codes_dict, expected_matched_cnt',
27 | [
28 | ('', {'200': 500}, 0), # mask is empty
29 | ('1', {'110': 500}, 0), # mask is prefix
30 | ('1', {'21': 500}, 0), # mask is suffix
31 | ('0.', {'0': 500}, 0), # mask is too long
32 | ('2.', {'200': 500}, 0), # mask is too short
33 | ('2..', {'200': 500}, 500),
34 | ('2..', {'200': 500, '201': 100}, 600),
35 | ('..9', {'999': 500}, 500),
36 | ],
37 | )
38 | def test_match(codes_mask, codes_dict, expected_matched_cnt):
39 | assert (
40 | AbstractCriterion.count_matched_codes(codes_regex=re.compile(codes_mask), codes_dict=codes_dict)
41 | == expected_matched_cnt
42 | )
43 |
--------------------------------------------------------------------------------
/yandextank/plugins/JMeter/config/schema.yaml:
--------------------------------------------------------------------------------
1 | affinity:
2 | description: Use to set CPU affinity
3 | type: string
4 | nullable: true
5 | default: ''
6 | args:
7 | description: additional commandline arguments for JMeter.
8 | type: string
9 | default: ''
10 | buffer_size:
11 | description: jmeter buffer size
12 | type: integer
13 | nullable: true
14 | default: null
15 | buffered_seconds:
16 | description: Aggregator delay - to be sure that everything were read from jmeter results file.
17 | type: integer
18 | default: 3
19 | exclude_markers:
20 | type: list
21 | schema:
22 | type: string
23 | empty: false
24 | default: []
25 | ext_log:
26 | description: additional log, jmeter xml format. Saved in test dir as jmeter_ext_XXXX.jtl
27 | type: string
28 | allowed:
29 | - none
30 | - errors
31 | - all
32 | default: none
33 | extended_log:
34 | description: additional log, jmeter xml format. Saved in test dir as jmeter_ext_XXXX.jtl
35 | type: string
36 | allowed:
37 | - none
38 | - errors
39 | - all
40 | default: none
41 | jmeter_path:
42 | description: Path to JMeter
43 | type: string
44 | default: jmeter
45 | jmeter_ver:
46 | description: Which JMeter version tank should expect. Affects the way connection time is logged.
47 | type: float
48 | default: 3.0
49 | jmx:
50 | description: Testplan for execution.
51 | type: string
52 | shutdown_timeout:
53 | description: timeout for automatic test shutdown
54 | type: integer
55 | default: 10
56 | variables:
57 | description: variables for jmx testplan
58 | type: dict
59 | default: {}
60 |
--------------------------------------------------------------------------------
/docker/Dockerfile.mobile:
--------------------------------------------------------------------------------
1 | # Android performance testing environment with yandex-tank.
2 | # version 0.0.1
3 |
4 | FROM direvius/yandex-tank
5 |
6 | MAINTAINER Alexey Lavrenuke
7 |
8 | ENV DEBIAN_FRONTEND noninteractive
9 |
10 | RUN add-apt-repository ppa:webupd8team/java && apt update && \
11 | echo "oracle-java8-installer shared/accepted-oracle-license-v1-1 select true" | debconf-set-selections && \
12 | apt -y install oracle-java8-installer
13 |
14 | RUN wget https://dl.google.com/android/android-sdk_r24.4.1-linux.tgz && \
15 | tar -xvzf android-sdk_r24.4.1-linux.tgz && \
16 | mv android-sdk-linux /usr/local/android-sdk
17 |
18 |
19 | ENV ANDROID_HOME /usr/local/android-sdk
20 | ENV PATH $PATH:$ANDROID_HOME/tools
21 | ENV PATH $PATH:$ANDROID_HOME/platform-tools
22 | ENV JAVA_HOME /usr/lib/jvm/java-8-oracle
23 |
24 |
25 | ARG MAVEN_VERSION=3.3.9
26 | ARG USER_HOME_DIR="/root"
27 |
28 | RUN mkdir -p /usr/share/maven /usr/share/maven/ref \
29 | && curl -fsSL http://apache.osuosl.org/maven/maven-3/$MAVEN_VERSION/binaries/apache-maven-$MAVEN_VERSION-bin.tar.gz \
30 | | tar -xzC /usr/share/maven --strip-components=1 \
31 | && ln -s /usr/share/maven/bin/mvn /usr/bin/mvn
32 |
33 | ENV MAVEN_HOME /usr/share/maven
34 | ENV MAVEN_CONFIG "$USER_HOME_DIR/.m2"
35 |
36 | VOLUME "$USER_HOME_DIR/.m2"
37 |
38 | # some street magic
39 | RUN echo "y" | android update sdk --no-ui --force --filter platform-tools
40 |
41 | RUN pip3 install uiautomator Appium-Python-Client
42 |
43 | RUN curl -sL https://deb.nodesource.com/setup_6.x | bash - && \
44 | apt install -y nodejs && npm install -g appium
45 |
--------------------------------------------------------------------------------
/AUTHORS.md:
--------------------------------------------------------------------------------
1 | The following authors have created the source code of "Yandex.Tank" published and distributed by YANDEX LLC as the owner:
2 |
3 | Alexey Lavrenuke
4 | Andrey Pohilko
5 | Timur Torubarov
6 | Oles Pisarenko
7 | Nurlan Nugumanov
8 | Mikhail Epikhin
9 | Dmitriy Kuznetsov
10 | Andrey Sekretenko
11 | Andrew Grigorev
12 | Gregory Komissarov
13 | Mikhail Dyomin
14 | Ilya Krylov
15 | Alexey Tishkov
16 | Andrey Osipov
17 | zaratustra
18 | Alexander Shorin
19 | Alexey Kirpichnikov
20 | Andrew Kulakov
21 | Andrew Osipov
22 | Anton
23 | Fadi Hadzh
24 | Gleb E Goncharov
25 | Igor Shishkin
26 | Kirill SIbirev
27 | Konstantin Shalygin
28 | Leonid Evdokimov
29 | Max Taldykin
30 | Oleg Alistratov
31 | Oleg Klimin
32 | Sergey Melnik
33 | Vasily Chekalkin
34 | Victor Ashik
35 | Vladimir Evgrafov
36 | Yuriy Syrovetskiy
37 | kshcherban
38 | Oleg Klimin
39 | Alexander Artemenko
40 | Maxim Bublis
41 |
--------------------------------------------------------------------------------
/yandextank/plugins/RCAssert/plugin.py:
--------------------------------------------------------------------------------
1 | '''Tank exit code check plugin'''
2 |
3 | from yandextank.common.interfaces import AbstractPlugin
4 |
5 |
6 | class Plugin(AbstractPlugin):
7 | SECTION = 'rcassert'
8 |
9 | def __init__(self, core, cfg, name):
10 | AbstractPlugin.__init__(self, core, cfg, name)
11 | self.ok_codes = []
12 | self.fail_code = 10
13 |
14 | @staticmethod
15 | def get_key():
16 | return __file__
17 |
18 | def get_available_options(self):
19 | return ["pass", "fail_code"]
20 |
21 | def configure(self):
22 | codes = self.get_option("pass", '').split(' ')
23 | for code in codes:
24 | if code:
25 | self.ok_codes.append(int(code))
26 | if bool(self.ok_codes) and (0 not in self.ok_codes):
27 | self.log.warning('RCAssert pass is missing zero exit code (0).')
28 | self.fail_code = int(self.get_option("fail_code", self.fail_code))
29 |
30 | def post_process(self, retcode):
31 | if not self.ok_codes:
32 | return retcode
33 |
34 | for code in self.ok_codes:
35 | self.log.debug("Comparing %s with %s codes", code, retcode)
36 | if code == int(retcode):
37 | self.log.info("Exit code %s was changed to 0 by RCAssert plugin", code)
38 | return 0
39 |
40 | self.log.info("Changing exit code to %s because RCAssert pass list was unsatisfied", self.fail_code)
41 | if self.fail_code > 0:
42 | self.errors.append(
43 | f'Changed exit code from {retcode} to {self.fail_code} because RCAssert pass list was unsatisfied'
44 | )
45 | return self.fail_code
46 |
--------------------------------------------------------------------------------
/yandextank/plugins/DataUploader/tests/test_postloader.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import os
3 |
4 | from yandextank.common.util import get_test_path
5 | from yandextank.plugins.DataUploader.cli import from_tank_config, get_logger
6 |
7 |
8 | @pytest.mark.parametrize(
9 | 'test_dir, expected',
10 | [
11 | (os.path.join(get_test_path(), 'yandextank/plugins/DataUploader/tests/test_postloader/test_empty'), (None, {})),
12 | (
13 | os.path.join(get_test_path(), 'yandextank/plugins/DataUploader/tests/test_postloader/test_full'),
14 | (
15 | 'uploader',
16 | {
17 | 'api_address': 'https://lunapark.yandex-team.ru/',
18 | 'api_attempts': 2,
19 | 'api_timeout': 5,
20 | 'enabled': True,
21 | 'job_dsc': 'hell of a kitty',
22 | 'job_name': 'Hello kitty',
23 | 'jobno_file': 'jobno.txt',
24 | 'lock_targets': 'foo.bar',
25 | 'maintenance_timeout': 5,
26 | 'network_attempts': 2,
27 | 'operator': 'fomars',
28 | 'package': 'yandextank.plugins.DataUploader',
29 | 'task': 'LOAD-204',
30 | },
31 | ),
32 | ),
33 | (
34 | os.path.join(get_test_path(), 'yandextank/plugins/DataUploader/tests/test_postloader/test_disabled'),
35 | ('uploader', {'enabled': False, 'package': 'yandextank.plugins.DataUploader'}),
36 | ),
37 | ],
38 | )
39 | def test_from_tank_config(test_dir, expected):
40 | get_logger()
41 | assert from_tank_config(test_dir) == expected
42 |
--------------------------------------------------------------------------------
/yandextank/core/tests/test_lock.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import os
3 | import pytest
4 |
5 | from yandextank.core.tankcore import Lock, LockError
6 |
7 | TEST_DIR = './test_lock'
8 |
9 |
10 | def setup_module(module):
11 | if not os.path.exists(TEST_DIR):
12 | os.makedirs(TEST_DIR)
13 |
14 |
15 | def teardown_module(module):
16 | # clear all .lock files
17 | for fname in glob.glob(os.path.join(TEST_DIR, Lock.LOCK_FILE_WILDCARD)):
18 | os.remove(fname)
19 |
20 |
21 | def setup_function(fn):
22 | # clear all .lock files
23 | for fname in glob.glob(os.path.join(TEST_DIR, Lock.LOCK_FILE_WILDCARD)):
24 | os.remove(fname)
25 |
26 |
27 | def test_acquire():
28 | Lock('123', 'tests/123').acquire(TEST_DIR)
29 | with pytest.raises(LockError):
30 | Lock('124', 'test/124').acquire(TEST_DIR)
31 |
32 |
33 | def test_load():
34 | lock = Lock('123', 'tests/123').acquire(TEST_DIR)
35 | lock_loaded = Lock.load(lock.lock_file)
36 | assert lock_loaded.info == lock.info
37 |
38 |
39 | def test_ignore():
40 | Lock('123', 'tests/123').acquire(TEST_DIR)
41 | Lock('124', 'tests/124').acquire(TEST_DIR, ignore=True)
42 | assert len(glob.glob(os.path.join(TEST_DIR, Lock.LOCK_FILE_WILDCARD))) == 2
43 |
44 |
45 | def test_release():
46 | lock = Lock('123', 'tests/123').acquire(TEST_DIR)
47 | assert len(glob.glob(os.path.join(TEST_DIR, Lock.LOCK_FILE_WILDCARD))) == 1
48 | lock.release()
49 | assert len(glob.glob(os.path.join(TEST_DIR, Lock.LOCK_FILE_WILDCARD))) == 0
50 |
51 |
52 | def test_running_ids():
53 | Lock('123', 'tests/123').acquire(TEST_DIR)
54 | Lock('124', 'tests/124').acquire(TEST_DIR, ignore=True)
55 | assert set(Lock.running_ids(TEST_DIR)) == {'123', '124'}
56 |
--------------------------------------------------------------------------------
/yandextank/plugins/Bfg/widgets.py:
--------------------------------------------------------------------------------
1 | import datetime
2 |
3 | from ...common.interfaces import AbstractInfoWidget
4 |
5 |
6 | class BfgInfoWidget(AbstractInfoWidget):
7 | '''Console widget'''
8 |
9 | def __init__(self):
10 | AbstractInfoWidget.__init__(self)
11 | self.active_threads = 0
12 | self.instances = 0
13 | self.planned = 0
14 | self.RPS = 0
15 | self.selfload = 0
16 | self.time_lag = 0
17 | self.planned_rps_duration = 0
18 |
19 | def get_index(self):
20 | return 0
21 |
22 | def on_aggregated_data(self, data, stat):
23 | self.instances = stat["metrics"]["instances"]
24 |
25 | self.RPS = data["overall"]["interval_real"]["len"]
26 | self.selfload = 0 # TODO
27 | self.time_lag = 0 # TODO
28 |
29 | def render(self, screen):
30 | res = ''
31 |
32 | res += "Active instances: "
33 | res += str(self.instances)
34 |
35 | res += "\nPlanned requests: %s for %s\nActual responses: " % (
36 | self.planned,
37 | datetime.timedelta(seconds=self.planned_rps_duration),
38 | )
39 | if self.planned != self.RPS:
40 | res += screen.markup.YELLOW + str(self.RPS) + screen.markup.RESET
41 | else:
42 | res += str(self.RPS)
43 |
44 | res += "\n Accuracy: "
45 | if self.selfload < 80:
46 | res += screen.markup.RED + ('%.2f' % self.selfload) + screen.markup.RESET
47 | elif self.selfload < 95:
48 | res += screen.markup.YELLOW + ('%.2f' % self.selfload) + screen.markup.RESET
49 | else:
50 | res += '%.2f' % self.selfload
51 |
52 | res += "%\n Time lag: "
53 | res += str(datetime.timedelta(seconds=self.time_lag))
54 |
55 | return res
56 |
--------------------------------------------------------------------------------
/yandextank/plugins/YCMonitoring/tests/DGAUGE.json:
--------------------------------------------------------------------------------
1 | {
2 | "metrics": [
3 | {
4 | "name": "io.disk0.utilization",
5 | "labels": {
6 | "node": "replica",
7 | "service": "managed-postgresql",
8 | "subcluster_name": "postgresql930",
9 | "host": "test_host.net",
10 | "resource_type": "cluster",
11 | "resource_id": "postgresql_loadtesting",
12 | "shard": "yc.loadtesting.service-folder",
13 | "dc": "nodc"
14 | },
15 | "type": "DGAUGE",
16 | "timeseries": {
17 | "timestamps": [
18 | 1714218465000,
19 | 1714218480000
20 | ],
21 | "doubleValues": [
22 | 40.86666666666667,
23 | 4.220292886953635
24 | ]
25 | }
26 | },
27 | {
28 | "name": "io.disk0.utilization",
29 | "labels": {
30 | "node": "primary",
31 | "service": "managed-postgresql",
32 | "subcluster_name": "postgresql930",
33 | "host": "test_host_2.net",
34 | "resource_type": "cluster",
35 | "resource_id": "postgresql_loadtesting",
36 | "shard": "yc.loadtesting.service-folder",
37 | "dc": "nodc"
38 | },
39 | "type": "DGAUGE",
40 | "timeseries": {
41 | "timestamps": [
42 | 1714218465000,
43 | 1714218480000
44 | ],
45 | "doubleValues": [
46 | 61.86666666666667,
47 | 5.166224436732864
48 | ]
49 | }
50 | }
51 | ]
52 | }
--------------------------------------------------------------------------------
/yandextank/plugins/Telegraf/tests/test_plugin.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import os
3 |
4 | from yandextank.common.util import get_test_path
5 | from yandextank.core.tankcore import TankCore
6 | from yandextank.core.tankworker import TankInfo
7 | from yandextank.plugins.Telegraf import Plugin as TelegrafPlugin
8 |
9 |
10 | class TestTelegrafPlugin(object):
11 | def test_plugin_configuration(self):
12 | """testing telegraf plugin configuration"""
13 | cfg = {
14 | 'core': {'skip_generator_check': True},
15 | 'telegraf': {
16 | 'package': 'yandextank.plugins.Telegraf',
17 | 'enabled': True,
18 | 'ssh_key_path': '/some/path',
19 | 'config': os.path.join(get_test_path(), 'yandextank/plugins/Telegraf/tests/telegraf_mon.xml'),
20 | },
21 | }
22 | core = TankCore(cfg, threading.Event(), TankInfo({}))
23 | telegraf_plugin = core.get_plugin_of_type(TelegrafPlugin)
24 | telegraf_plugin.configure()
25 | assert telegraf_plugin.detected_conf == 'telegraf'
26 | assert telegraf_plugin.monitoring.ssh_key_path == '/some/path'
27 |
28 | def test_legacy_plugin_configuration(self):
29 | """testing legacy plugin configuration, old-style monitoring"""
30 | cfg = {
31 | 'core': {'skip_generator_check': True},
32 | 'monitoring': {
33 | 'package': 'yandextank.plugins.Telegraf',
34 | 'enabled': True,
35 | 'config': os.path.join(get_test_path(), 'yandextank/plugins/Telegraf/tests/old_mon.xml'),
36 | },
37 | }
38 | core = TankCore(cfg, threading.Event(), TankInfo({}))
39 | telegraf_plugin = core.get_plugin_of_type(TelegrafPlugin)
40 | telegraf_plugin.configure()
41 | assert telegraf_plugin.detected_conf == 'monitoring'
42 |
--------------------------------------------------------------------------------
/yandextank/stepper/util.py:
--------------------------------------------------------------------------------
1 | '''
2 | Utilities: parsers, converters, etc.
3 | '''
4 |
5 | import re
6 | import logging
7 | import math
8 | from itertools import islice
9 |
10 | from .module_exceptions import StepperConfigurationError
11 |
12 | logging.getLogger("requests").setLevel(logging.WARNING)
13 |
14 |
15 | def take(number, iter):
16 | return list(islice(iter, 0, number))
17 |
18 |
19 | def parse_duration(duration):
20 | '''
21 | Parse duration string, such as '3h2m3s' into milliseconds
22 |
23 | >>> parse_duration('3h2m3s')
24 | 10923000
25 |
26 | >>> parse_duration('0.3s')
27 | 300
28 |
29 | >>> parse_duration('5')
30 | 5000
31 | '''
32 | _re_token = re.compile("([0-9.]+)([dhms]?)")
33 |
34 | def parse_token(time, multiplier):
35 | multipliers = {
36 | 'd': 86400,
37 | 'h': 3600,
38 | 'm': 60,
39 | 's': 1,
40 | }
41 | if multiplier:
42 | if multiplier in multipliers:
43 | return int(float(time) * multipliers[multiplier] * 1000)
44 | else:
45 | raise StepperConfigurationError('Failed to parse duration: %s' % duration)
46 | else:
47 | return int(float(time) * 1000)
48 |
49 | return sum(parse_token(*token) for token in _re_token.findall(duration))
50 |
51 |
52 | def solve_quadratic(a, b, c):
53 | '''
54 | >>> solve_quadratic(1.0, 2.0, 1.0)
55 | (-1.0, -1.0)
56 | '''
57 | discRoot = math.sqrt((b * b) - 4 * a * c)
58 | root1 = (-b - discRoot) / (2 * a)
59 | root2 = (-b + discRoot) / (2 * a)
60 | return (root1, root2)
61 |
62 |
63 | def s_to_ms(f_sec):
64 | return int(f_sec * 1000.0)
65 |
66 |
67 | def proper_round(n):
68 | """
69 | rounds float to closest int
70 | :rtype: int
71 | :param n: float
72 | """
73 | return int(n) + (n / abs(n)) * int(abs(n - int(n)) >= 0.5) if n != 0 else 0
74 |
--------------------------------------------------------------------------------
/docs/intro.rst:
--------------------------------------------------------------------------------
1 | Getting started
2 | =================
3 |
4 | Welcome to Yandex.Tank documentation. Yandex.Tank is an extensible load testing utility for unix systems. It is written in Python and uses different load generator modules in different languages.
5 |
6 | Getting Help
7 | -------------
8 | `Gitter.im `_
9 |
10 | What are the Yandex.Tank components?
11 | -------------------------------------
12 | * ``Core`` - basic steps of test prepare, configuration, execution. Artifacts storing. Controls plugins/modules.
13 | * ``Load generators`` - modules that uses and controls load generators (load generators NOT included).
14 | * ``Artifact uploaders`` - modules that uploads artifacts to external storages and services.
15 | * ``Handy tools`` - monitoring tools, console online screen, autostops and so on.
16 |
17 | .. note::
18 | Using ``phantom`` as a load generator for mild load tests (less then 1000rps) an average laptop with 64bit Ubuntu (10.04/.../13.10) would be sufficient. The tank could be easily used in virtual machine if queries aren't too heavy and load isn't too big. Otherwise it is recommended to request a physical server or a more capable virtual machine from your admin.
19 |
20 | Running Yandex.Tank
21 | -------------------
22 | 1.Install tank to your system :doc:`install`
23 |
24 | 2.Tune your system :doc:`generator_tuning`
25 |
26 | 3.And run the tutorial :doc:`tutorial`
27 |
28 | ----------
29 |
30 | 4.If you are skilled enough, feel free to use :doc:`configuration`.
31 |
32 | 5.For developers :doc:`core_and_modules`.
33 |
34 |
35 | See also
36 | --------
37 |
38 | Evgeniy Mamchits' `phantom `_ -
39 | Phantom scalable IO Engine
40 |
41 | Alexey Lavrenuke's `pandora `_ -
42 | A load generator in Go language
43 |
44 | Gregory Komissarov's
45 | `firebat `_ - test tool
46 | based on Phantom
47 |
--------------------------------------------------------------------------------
/yandextank/ammo_validator/validators/uri_inline.py:
--------------------------------------------------------------------------------
1 | from typing import Callable
2 | from ..common import AmmoType, Messages, InlineFormatValidator
3 |
4 |
5 | class UriInlineValidator(InlineFormatValidator):
6 | AMMO_TYPES = {AmmoType.URI}
7 |
8 | def __init__(self):
9 | self._msgs = Messages()
10 |
11 | def is_suitable(self, ammo_type: AmmoType) -> bool:
12 | return ammo_type == AmmoType.URI
13 |
14 | def _check_header(self, header: str):
15 | result = True
16 | if not (header.startswith('[') and header.endswith(']')):
17 | self._msgs.error(self._msg('Header line must be in square braces'))
18 | result = False
19 | if ':' not in header:
20 | self._msgs.error(self._msg('Header line does not contain ":"'))
21 | result = False
22 | return result
23 |
24 | def _check_field(self, data: dict, field_name: str, value_verifier: Callable[[str], bool]):
25 | success = 0
26 | if field_name not in data:
27 | self._msgs.error(self._msg(f'No {field_name} in ammo'))
28 | elif not isinstance(data[field_name], list):
29 | self._msgs.error(self._msg(f'{field_name} is not a list'))
30 | else:
31 | for value in data[field_name]:
32 | if not isinstance(value, str) or not value:
33 | self._msgs.error(self._msg(f'{field_name} must be non empty string'))
34 | elif value_verifier(value):
35 | success += 1
36 | return success
37 |
38 | def validate(self, data: dict) -> Messages:
39 | messages = Messages()
40 | success = self._check_field(data, 'uris', lambda uri: True)
41 | self._check_field(data, 'headers', self._check_header)
42 |
43 | messages.info(self._msg(f'{success} uris'))
44 | if not success:
45 | messages.error(self._msg('No successful readed packets in ammo'))
46 |
47 | return messages
48 |
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/metrics/tests/test_aggregate.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import os
3 |
4 | from pandas.testing import assert_frame_equal
5 |
6 | from yandextank.contrib.netort.netort.data_manager.common.interfaces import (
7 | TypeQuantiles,
8 | TypeHistogram,
9 | TypeDistribution,
10 | )
11 | import pytest
12 |
13 | PATH = 'netort/data_manager/metrics/tests/'
14 |
15 |
16 | @pytest.mark.xfail
17 | def test_processor():
18 | data = pd.read_csv(os.path.join(PATH, 'df1_buffered.csv'))
19 | aggregated = TypeQuantiles.processor(data, True)
20 | assert all([col in aggregated.columns for col in TypeQuantiles.columns])
21 |
22 |
23 | def test_histograms_processor():
24 | data = pd.read_csv(os.path.join(PATH, 'metric_data_input_event_1.csv'))
25 | data.loc[:, 'second'] = (data['ts'] / 1000000).astype(int)
26 | expected = pd.read_csv(os.path.join(PATH, 'metric_data_output_histogram_1.csv'))
27 | aggregated = TypeHistogram.processor(data)
28 | assert expected.equals(aggregated)
29 |
30 |
31 | def test_quantiles_processor():
32 | data = pd.read_csv(os.path.join(PATH, 'metric_data_input_metric_2.csv'))
33 | data.loc[:, 'second'] = (data['ts'] / 1000000).astype(int)
34 | expected = pd.read_csv(os.path.join(PATH, 'metric_data_output_quantile_2.csv'))
35 | expected = expected.round(2).set_index('second')
36 | aggregated = TypeQuantiles.processor(data).round(2)
37 | assert aggregated.equals(expected)
38 |
39 |
40 | @pytest.mark.skip('broken in arcadia')
41 | def test_distributions_processor():
42 | data = pd.read_csv(os.path.join(PATH, 'metric_data_input_metric_2.csv'))
43 | data.loc[:, 'second'] = (data['ts'] / 1000000).astype(int)
44 | aggregated = TypeDistribution.processor(data).round(2)
45 | expected = pd.read_csv(os.path.join(PATH, 'metric_data_output_distributions_2.csv')).set_index('second')
46 | assert_frame_equal(aggregated.sort_index(axis=1), expected.sort_index(axis=1), check_names=False)
47 |
--------------------------------------------------------------------------------
/yandextank/plugins/JMeter/config/jmeter_writer.xml:
--------------------------------------------------------------------------------
1 |
2 |
4 | false
5 |
6 | saveConfig
7 |
8 |
9 | true
10 | true
11 | true
12 |
13 | true
14 | false
15 | false
16 | false
17 | false
18 | false
19 | false
20 | false
21 | false
22 | false
23 | true
24 | false
25 | false
26 | false
27 | false
28 | 0
29 | true
30 | true
31 | %(save_connect)s
32 |
33 |
34 | %(jtl)s
35 | Added automatically
36 |
37 |
38 |
39 |
40 | %(udv)s
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/data/yandex-tank.spec:
--------------------------------------------------------------------------------
1 | Name: yandex-tank
2 | Version: 1.5.0
3 | Release: 1%{?dist}
4 | Summary: Yandex.Tank (Load Testing Tool)
5 |
6 | License: MIT
7 | URL: https://github.com/yandex-load/yandex-tank
8 | Source0: %{name}-%{version}.tar.gz
9 |
10 | BuildArch: noarch
11 | BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
12 |
13 | Requires: python-psutil
14 | Requires: python-ipaddr
15 |
16 |
17 | %description
18 | Yandex.Tank is an extendable open source load testing tool for advanced linux users which is especially good as a part of automated load testing suit.
19 |
20 | %prep
21 | %setup -q
22 |
23 | %build
24 |
25 | %install
26 | rm -rf %{buildroot}
27 | mkdir -p %{buildroot}
28 | mkdir -p %{buildroot}/%{_bindir}
29 | mkdir -p %{buildroot}/%{_sysconfdir}/yandex-tank/
30 | mkdir -p %{buildroot}/%{_sysconfdir}/bash_completion.d/
31 | mkdir -p %{buildroot}/%{_libdir}/yandex-tank/
32 | cp -ap 00-base.ini %{buildroot}/%{_sysconfdir}/yandex-tank/
33 | cp -ap yandex-tank.completion %{buildroot}/%{_sysconfdir}/bash_completion.d/
34 | cp -arp Tank %{buildroot}/%{_libdir}/yandex-tank/
35 | cp -ap tankcore.py %{buildroot}/%{_libdir}/yandex-tank/
36 | cp -ap tank.py %{buildroot}/%{_libdir}/yandex-tank/
37 | cp -ap *.sh %{buildroot}/%{_libdir}/yandex-tank/
38 | ln -s %{_libdir}/yandex-tank/tank.py %{buildroot}/%{_bindir}/yandex-tank
39 |
40 | %clean
41 | rm -rf %{buildroot}
42 |
43 | %files
44 | %defattr(-,root,root,-)
45 | %{_sysconfdir}/yandex-tank
46 | %{_sysconfdir}/bash_completion.d/yandex-tank.completion
47 | %{_bindir}/lunapark
48 | %{_bindir}/yandex-tank
49 | %{_libdir}/yandex-tank
50 |
51 | %changelog
52 | * Wed Mar 19 2014 Andrey Pohilko (undera) - 1.5.0
53 | - Static HTML report with Highcharts graphs: tank metrics and monitoring
54 | - Highcharts template for Graphite reports
55 | - Better hostname resolve method
56 | - Bugfixes
57 | - collect 'cached' mem by default
58 | - fix collecting disk activity on lxc containers
59 | - add steady_cumulative autostop criteria
60 | - don't fail test on agent shutdown problems
61 | - fix rcheck on remote fs
62 |
--------------------------------------------------------------------------------
/yandextank/plugins/OpenTSDBUploader/tests/test_opentsdb_decoder.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from uuid import uuid4
3 |
4 | from yandextank.plugins.OpenTSDBUploader.decoder import Decoder
5 |
6 |
7 | class TestDecoder(object):
8 | def test_metrics_cast(self):
9 | test_uuid = str(uuid4())
10 | tank_tag = 'test_tank_tag'
11 | comment = 'test comment'
12 | raw_metrics = {
13 | 'metric1': -123,
14 | 'metric2': -123.456,
15 | 'metric3': 123,
16 | 'metric4': 123.456,
17 | 'metric5': 0,
18 | 'metric6': -0.1,
19 | 'metric7': 0.1,
20 | 'metric8': 'down',
21 | }
22 | timestamp = 123456789
23 | host = '127.0.0.1'
24 | data = [{'data': {host: {'comment': comment, 'metrics': raw_metrics}}, 'timestamp': timestamp}]
25 | expected_metrics = {
26 | 'metric1': -123.0,
27 | 'metric2': -123.456,
28 | 'metric3': 123.0,
29 | 'metric4': 123.456,
30 | 'metric5': 0.0,
31 | 'metric6': -0.1,
32 | 'metric7': 0.1,
33 | 'metric8': 'down',
34 | }
35 |
36 | decoder = Decoder(tank_tag, test_uuid, {}, True, True)
37 | result_points = decoder.decode_monitoring(data)
38 |
39 | assert len(result_points) == len(expected_metrics)
40 | # check other props
41 | for r_point in result_points:
42 | assert r_point['timestamp'] == timestamp
43 | assert r_point['metric'] == 'monitoring'
44 | assert r_point['tags']['comment'] == comment
45 | assert r_point['tags']['host'] == host
46 | assert r_point['tags']['tank'] == tank_tag
47 | assert r_point['tags']['uuid'] == test_uuid
48 | if r_point['tags']['field'] not in expected_metrics:
49 | assert False
50 | if not isinstance(r_point['value'], type(expected_metrics[r_point['tags']['field']])):
51 | assert False
52 | if not r_point['value'] == expected_metrics[r_point['tags']['field']]:
53 | assert False
54 |
--------------------------------------------------------------------------------
/docker/Dockerfile.jmeter:
--------------------------------------------------------------------------------
1 | # Yandex.Tank with jmeter and some plugins
2 |
3 | ARG VERSION=latest
4 | FROM yandex/yandex-tank:"${VERSION}"
5 | ARG VERSION
6 | ARG JMETER_VERSION=3.3
7 |
8 | MAINTAINER Yandex Load Team
9 |
10 | LABEL Description="Yandex.Tank with Apache jmeter" \
11 | Vendor="Yandex" \
12 | Jmeter.version="${JMETER_VERSION}"
13 |
14 | ENV JVM="openjdk-8-jdk"
15 |
16 | RUN DEBIAN_FRONTEND=noninteractive && \
17 | apt-get update && \
18 | apt-get install -yq --no-install-recommends ${JVM} && \
19 | apt-get clean && \
20 | rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/* /tmp/* /var/tmp/*
21 |
22 | ENV JMETER_PLUGINS="jpgc-csl,jpgc-tst,jpgc-dummy,jmeter-jdbc,jpgc-functions,jpgc-casutg,bzm-http2"
23 | ENV JMETER_HOME=/usr/local/apache-jmeter-"${JMETER_VERSION}"
24 | RUN wget https://archive.apache.org/dist/jmeter/binaries/apache-jmeter-${JMETER_VERSION}.tgz --progress=dot:giga && \
25 | tar -xzf apache-jmeter-${JMETER_VERSION}.tgz -C /usr/local && \
26 | rm apache-jmeter-${JMETER_VERSION}.tgz
27 |
28 | RUN cd ${JMETER_HOME}/lib/ && \
29 | for lib in \
30 | "kg/apc/cmdrunner/2.0/cmdrunner-2.0.jar" \
31 | "org/postgresql/postgresql/42.1.4/postgresql-42.1.4.jar"; \
32 | do local_name=$(echo "$lib" | awk -F'/' '{print $NF}') ; \
33 | wget "https://search.maven.org/remotecontent?filepath=${lib}" -O "${local_name}" --progress=dot:mega ;\
34 | done && \
35 | cd ${JMETER_HOME}/lib/ext && \
36 | wget 'https://search.maven.org/remotecontent?filepath=kg/apc/jmeter-plugins-manager/0.15/jmeter-plugins-manager-0.15.jar' -O jmeter-plugins-manager-0.15.jar --progress=dot:mega && \
37 | java -cp ${JMETER_HOME}/lib/ext/jmeter-plugins-manager-0.15.jar org.jmeterplugins.repository.PluginManagerCMDInstaller && \
38 | ${JMETER_HOME}/bin/PluginsManagerCMD.sh install "${JMETER_PLUGINS}" && \
39 | mkdir -p /etc/yandex-tank && \
40 | printf "jmeter:\n jmeter_path: ${JMETER_HOME}/bin/jmeter\n jmeter_ver: ${JMETER_VERSION}\n" > /etc/yandex-tank/10-jmeter.yaml
41 | ENV PATH ${PATH}:${JMETER_HOME}/bin
42 |
43 | COPY files/jmeter-large "${JMETER_HOME}"/bin/jmeter-large
44 |
45 |
--------------------------------------------------------------------------------
/yandextank/aggregator/chopper.py:
--------------------------------------------------------------------------------
1 | """
2 | Split incoming DataFrames into chunks, cache them, union chunks with same key
3 | and pass to the underlying aggregator.
4 | """
5 |
6 | import pandas as pd
7 |
8 |
9 | class TimeChopper(object):
10 | """
11 | TimeChopper splits incoming dataframes by index. Chunks are cached and
12 | chunks for same key from different DFs are joined. Then chunks are passed
13 | further as (, ) tuples.
14 | """
15 |
16 | def __init__(self, sources):
17 | # self.cache_size = cache_size
18 | self.sources = {i: src for i, src in enumerate(sources)}
19 | self.recent_ts = {i: 0 for i in range(len(self.sources))}
20 | self.cache = {}
21 |
22 | def __iter__(self):
23 | for _ in range(len(self.sources)):
24 | try:
25 | while True:
26 | for n, source in self.sources.items():
27 | chunk = next(source)
28 | if chunk is not None:
29 | self.recent_ts[n] = chunk.index[-1]
30 | grouped = chunk.groupby(level=0)
31 | for ts, group_data in list(grouped):
32 | if ts in self.cache:
33 | self.cache[ts] = pd.concat([self.cache[ts], group_data])
34 | else:
35 | self.cache[ts] = group_data
36 | last_ready_ts = min(self.recent_ts.values()) - 1
37 | for ts in sorted(filter(lambda x: x <= last_ready_ts, self.cache)):
38 | data = self.cache.pop(ts)
39 | yield ts, data, len(data)
40 | except StopIteration:
41 | self.sources.pop(n)
42 | self.recent_ts.pop(n)
43 | while self.cache:
44 | yield self.__get_result()
45 |
46 | def __get_result(self):
47 | ts = min(self.cache.keys())
48 | result = self.cache.pop(ts, None)
49 | cardinality = len(result) if result is not None else 0
50 | return ts, result, cardinality
51 |
--------------------------------------------------------------------------------
/yandextank/plugins/InfluxUploader/tests/test_influxdb_decoder.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from uuid import uuid4
3 | from yandextank.plugins.InfluxUploader.decoder import Decoder
4 |
5 |
6 | class TestDecoder(object):
7 | def test_metrics_cast(self):
8 | test_uuid = str(uuid4())
9 | tank_tag = 'test_tank_tag'
10 | comment = 'test comment'
11 | raw_metrics = {
12 | 'metric1': -123,
13 | 'metric2': -123.456,
14 | 'metric3': 123,
15 | 'metric4': 123.456,
16 | 'metric5': 0,
17 | 'metric6': -0.1,
18 | 'metric7': 0.1,
19 | 'metric8': 'down',
20 | }
21 | timestamp = 123456789
22 | host = '127.0.0.1'
23 | data = [{'data': {host: {'comment': comment, 'metrics': raw_metrics}}, 'timestamp': timestamp}]
24 | expected_metrics = {
25 | 'metric1': -123.0,
26 | 'metric2': -123.456,
27 | 'metric3': 123.0,
28 | 'metric4': 123.456,
29 | 'metric5': 0.0,
30 | 'metric6': -0.1,
31 | 'metric7': 0.1,
32 | 'metric8': 'down',
33 | }
34 |
35 | decoder = Decoder(tank_tag, test_uuid, {}, True, True)
36 | result_points = decoder.decode_monitoring(data)
37 |
38 | assert len(result_points) == 1
39 | r_point = result_points[0]
40 | # check other props
41 | assert r_point['time'] == timestamp
42 | assert r_point['measurement'] == 'monitoring'
43 | assert r_point['tags']['comment'] == comment
44 | assert r_point['tags']['host'] == host
45 | assert r_point['tags']['tank'] == tank_tag
46 | assert r_point['tags']['uuid'] == test_uuid
47 | # check metric cast
48 | assert len(r_point['fields']) == len(expected_metrics)
49 | for metric, value in r_point['fields'].items():
50 | if metric not in expected_metrics:
51 | assert False
52 | if not isinstance(value, type(expected_metrics[metric])):
53 | assert False
54 | if not value == expected_metrics[metric]:
55 | assert False
56 |
--------------------------------------------------------------------------------
/yandextank/core/config/schema.yaml:
--------------------------------------------------------------------------------
1 | core:
2 | type: dict
3 | allow_unknown: false
4 | schema:
5 | affinity:
6 | description: specify cpu core(s) to bind tank process to, http://linuxhowtos.org/manpages/1/taskset.htm
7 | type: string
8 | default: ''
9 | ammo_validation:
10 | description: ammo validate level. On of fail_on_error, inform, skip.
11 | type: string
12 | default: inform
13 | api_jobno:
14 | description: tankapi job id, also used as test\'s directory name - determined by tank
15 | type: string
16 | artifacts_base_dir:
17 | description: base directory to store tests\' artifacts directories
18 | type: string
19 | default: ./logs
20 | artifacts_dir:
21 | description: directory inside base directory to store test\'s artifacts, defaults to api_jobno if null
22 | type: string
23 | cmdline:
24 | type: string
25 | exitcode:
26 | type: integer
27 | flush_config_to:
28 | description: path to store config
29 | type: string
30 | ignore_lock:
31 | description: if tank is locked ( *.lock file(s) presented in lock_dir), shoot nevertheless
32 | type: boolean
33 | default: false
34 | uuid:
35 | type: string
36 | pid:
37 | type: integer
38 | message:
39 | type: string
40 | lock_dir:
41 | description: directory to store *.lock files
42 | type: string
43 | default: /var/lock/
44 | operator:
45 | description: your username
46 | type: string
47 | taskset_path:
48 | type: string
49 | default: taskset
50 | debug:
51 | description: enable debug logging
52 | type: boolean
53 | default: false
54 | aggregator_max_wait:
55 | description: maximum data waiting time from aggregator
56 | type: integer
57 | default: 31
58 | aggregator_max_termination_timeout:
59 | description: maximum timeout for aggregator to finish after test end in seconds
60 | type: integer
61 | default: 60
62 | skip_generator_check:
63 | description: enable tank running without load generator
64 | type: boolean
65 | default: false
66 | version:
67 | type: string
68 |
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/process.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import shlex
3 | import logging
4 | from six import string_types
5 |
6 |
7 | # FIXME poll_period is not used anywhere
8 | def execute(cmd, shell=False, poll_period=1.0, catch_out=False, executable=None, env=None):
9 | """Execute UNIX command and wait for its completion
10 |
11 | Args:
12 | cmd (str or list): command to execute
13 | shell (bool): invoke inside shell environment
14 | catch_out (bool): collect process' output
15 | executable: custom executable for popen
16 |
17 | Returns:
18 | returncode (int): process return code
19 | stdout (str): collected process stdout (only if catch_out set to true)
20 | stderr (str): collected process stderr (only if catch_out set to true)
21 | """
22 | # FIXME: move to module level
23 | log = logging.getLogger(__name__)
24 | log.info("Starting: %s", cmd)
25 |
26 | stdout = ""
27 | stderr = ""
28 |
29 | if not shell and isinstance(cmd, string_types):
30 | cmd = shlex.split(cmd)
31 | if not executable:
32 | executable = None
33 |
34 | if catch_out:
35 | process = subprocess.Popen(
36 | cmd,
37 | shell=shell,
38 | stderr=subprocess.PIPE,
39 | stdout=subprocess.PIPE,
40 | close_fds=True,
41 | env=env,
42 | executable=executable,
43 | )
44 | else:
45 | process = subprocess.Popen(cmd, shell=shell, close_fds=True, env=env, executable=executable)
46 |
47 | stdout, stderr = process.communicate()
48 | if stderr:
49 | log.error("There were errors:\n%s", stderr)
50 |
51 | if stdout:
52 | log.debug("Process output:\n%s", stdout)
53 | returncode = process.returncode
54 | log.info("Process cmd: %s – exit code: %s", cmd, returncode)
55 | return returncode, stdout, stderr
56 |
57 |
58 | # FIXME: remove this dumb popen wrapper
59 | def popen(cmnd):
60 | return subprocess.Popen(
61 | cmnd,
62 | bufsize=0,
63 | close_fds=True,
64 | shell=True,
65 | stdout=subprocess.PIPE,
66 | stderr=subprocess.PIPE,
67 | stdin=subprocess.PIPE,
68 | )
69 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Yandex Tank [](https://gitter.im/yandex/yandex-tank?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
2 |
3 | ### Yandextank has been moved to Python 3.
4 | ####[Latest stable release for Python 2 here](https://github.com/yandex/yandex-tank/releases/tag/Python2).
5 | Yandex.Tank is an extensible open source load testing tool for advanced linux users which is especially good as a part of an automated load testing suite
6 |
7 | 
8 |
9 | ## Main features
10 | * different load generators supported:
11 | * Evgeniy Mamchits' [phantom](https://github.com/yandex-load/phantom) is a very fast (100 000+ RPS) shooter written in C++ (default)
12 | * [JMeter](http://jmeter.apache.org/) is an extendable and widely known one
13 | * BFG is a Python-based generator that allows you to write your load scenarios in Python
14 | * experimental Golang generator: [pandora](https://github.com/yandex/pandora)
15 | * performance analytics backend service: [Overload](http://overload.yandex.net/). Store and analyze your test results online
16 | * several ammo formats supported like plain url list or access.log
17 | * test autostop plugin: stop your test when the results have became obvious and save time
18 | * customizable and extendable monitoring that works over SSH
19 |
20 | ## Documentation
21 | - [Installation](http://yandextank.readthedocs.org/en/latest/install.html)
22 |
23 | - Rest of [documentation](https://yandextank.readthedocs.org/en/latest/)
24 |
25 | - [Stackoverflow](https://stackoverflow.com/) – use `load-testing` + `yandex` tags
26 |
27 | ## Get help
28 | Chat with authors and other performance specialists: [](https://gitter.im/yandex/yandex-tank?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
29 |
30 | ## See also
31 | - [Overload𝛃](https://overload.yandex.net/) – performance analytics server
32 |
33 | - Evgeniy Mamchits' [phantom](https://github.com/yandex-load/phantom) – phantom scalable IO engine
34 |
35 | - [Vagrant environment](https://github.com/c3037/yandex-tank) with Yandex.Tank by Dmitry Porozhnyakov
36 |
--------------------------------------------------------------------------------
/yandextank/ammo_validator/validators/uri.py:
--------------------------------------------------------------------------------
1 | from typing import BinaryIO
2 | from ..common import AmmoType, Decision, FileFormatValidator, Features, Messages
3 |
4 |
5 | class UriValidator(FileFormatValidator):
6 | AMMO_TYPES = {AmmoType.URI}
7 |
8 | def is_suitable(self, features: Features) -> Decision:
9 | if features.is_begin_of_file_square_braced_lines():
10 | sline = features.first_non_square_braced_line().split()
11 | if len(sline) > 0 and sline[0].startswith(b'/'):
12 | return Decision({AmmoType.URI})
13 | sline = features.first_line().split()
14 | if len(sline) > 0 and sline[0].startswith(b'/'):
15 | return Decision({AmmoType.URI})
16 | return Decision(set())
17 |
18 | def _check_header(self, messages: Messages, start_offset: int, header: bytes):
19 | if b':' not in header:
20 | messages.error(self._msg('Header line does not contain ":"', file_offset=start_offset))
21 |
22 | def validate(self, stream: BinaryIO, max_scan_size: int) -> Messages:
23 | messages = Messages()
24 | count = 0
25 | success = 0
26 | start_offset = stream.tell()
27 | for line in stream:
28 | if stream.tell() > max_scan_size:
29 | break
30 |
31 | line = line.strip(b'\r\n\t ')
32 | if not line:
33 | continue
34 | count += 1
35 |
36 | if line.startswith(b'['):
37 | if not line.endswith(b']'):
38 | messages.error(self._msg('Header line does not end with "]"', file_offset=start_offset))
39 | self._check_header(messages, start_offset, line.strip(b'\r\n[]\t '))
40 | else:
41 | fields = line.split()
42 | if len(fields) >= 3:
43 | messages.warning(self._msg('Too many tags. Only one tag is allowed', file_offset=start_offset))
44 | success += 1
45 |
46 | start_offset = stream.tell()
47 |
48 | messages.info(self._msg(f'{count} non empty lines read ({success} uris)'))
49 | if not success:
50 | messages.error(self._msg('No successful readed packets in ammo'))
51 |
52 | return messages
53 |
--------------------------------------------------------------------------------
/yandextank/plugins/Telegraf/tests/test_config_parser.py:
--------------------------------------------------------------------------------
1 | from yandextank.plugins.Telegraf.config_parser import parse_xml, parse_yaml, TARGET_HINT_PLACEHOLDER
2 |
3 |
4 | class TestConfigParsers(object):
5 | def test_rawxml_parse(self):
6 | """raw xml read from string"""
7 | config = """
8 |
9 |
10 |
11 |
12 |
13 | """
14 |
15 | host = parse_xml(config)[0]
16 | assert host.metrics[0].name == 'CPU'
17 | assert host.get('ssh_key_path') == '/tmp'
18 |
19 | def test_rawxml_parse_without_key_path(self):
20 | """raw xml read from string"""
21 | config = """
22 |
23 |
24 |
25 |
26 |
27 | """
28 |
29 | host = parse_xml(config)[0]
30 | assert host.metrics[0].name == 'CPU'
31 | assert host.get('ssh_key_path') is None
32 |
33 | def test_raw_yaml_parse(self):
34 | """raw yaml read from string"""
35 | config = """
36 | hosts:
37 | localhost:
38 | ssh_key_path: /tmp
39 | metrics:
40 | cpu:
41 | nstat:
42 | """
43 | agents = parse_yaml(config)
44 | assert agents[0].address == 'localhost'
45 | host = agents[0]
46 | assert host.metrics[0].name == 'cpu'
47 | assert host.metrics[1].name == 'nstat'
48 | assert host.get('ssh_key_path') == '/tmp'
49 |
50 | def test_raw_yaml_parse_agent_config_is_none(self):
51 | config = """
52 | hosts:
53 | localhost:
54 | metrics:
55 | cpu:
56 | nstat:
57 | """
58 | agents = parse_yaml(config)
59 | assert agents[0].address == 'localhost'
60 | host = agents[0]
61 | assert host.metrics[0].name == 'cpu'
62 | assert host.metrics[1].name == 'nstat'
63 | assert host.get('ssh_key_path') is None
64 |
65 | def test_raw_yaml_parse_empty_config(self):
66 | config = ''
67 | agents = parse_yaml(config)
68 | assert agents[0].address == TARGET_HINT_PLACEHOLDER
69 |
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/common/condition.py:
--------------------------------------------------------------------------------
1 | import fnmatch
2 | import typing
3 | from urllib.parse import urlparse
4 |
5 |
6 | class Condition(object):
7 | def __init__(self, callable_: typing.Callable, explanation: str):
8 | self._callable = callable_
9 | self._explanation = explanation
10 |
11 | def __call__(self, *args, **kwargs):
12 | return self._callable(*args, **kwargs)
13 |
14 | def __repr__(self) -> str:
15 | return self._explanation
16 |
17 | def __str__(self) -> str:
18 | return self.__repr__()
19 |
20 |
21 | def uri_like(
22 | *,
23 | scheme: typing.Optional[str] = None,
24 | host: typing.Optional[str] = None,
25 | path: typing.Optional[str] = None,
26 | ) -> Condition:
27 | if all(arg is None for arg in [scheme, host, path]):
28 | raise ValueError('uri_like: at least one argument must be specified')
29 |
30 | def condition(url: str, *args, **kwargs):
31 | parsed = urlparse(url)
32 | meet = True
33 | if scheme is not None:
34 | meet = meet and parsed.scheme.lower() == scheme.lower()
35 | if host is not None:
36 | meet = meet and _host_match(parsed.netloc, host)
37 | if path is not None:
38 | meet = meet and fnmatch.fnmatch(parsed.path.lower(), path.lower())
39 | return meet
40 |
41 | explanation = '/'.join(
42 | filter(
43 | None,
44 | [
45 | f'{scheme}:/' if scheme is not None else 'scheme:/',
46 | host if host is not None else '*',
47 | path,
48 | ],
49 | )
50 | )
51 | return Condition(condition, f'uri like {explanation}')
52 |
53 |
54 | def and_(*conditions) -> Condition:
55 | explanation = ' and '.join([repr(c) for c in conditions])
56 | return Condition(lambda *args, **kwargs: all([c(*args, **kwargs) for c in conditions]), explanation)
57 |
58 |
59 | def path_like(pattern: str) -> Condition:
60 | def condition(path: str, *args, **kwargs):
61 | return fnmatch.fnmatchcase(path, pattern)
62 |
63 | return Condition(condition, f'path_like "{pattern}"')
64 |
65 |
66 | def _host_match(netloc: str, pattern: str) -> bool:
67 | netloc_host = netloc.rsplit(':', 1)[0]
68 | return fnmatch.fnmatch(netloc_host, pattern)
69 |
--------------------------------------------------------------------------------
/yandextank/stepper/format.py:
--------------------------------------------------------------------------------
1 | '''
2 | Ammo formatters
3 | '''
4 |
5 | import logging
6 |
7 | from .module_exceptions import StpdFileError
8 |
9 |
10 | class Stpd(object):
11 | '''
12 | STPD ammo formatter
13 | '''
14 |
15 | def __init__(self, ammo_factory):
16 | self.af = ammo_factory
17 |
18 | def __iter__(self):
19 | for timestamp, marker, missile in self.af:
20 | yield b"%s %s %s\n%s\n" % (str(len(missile)).encode('utf8'), str(timestamp).encode('utf8'), marker, missile)
21 |
22 |
23 | class StpdReader(object):
24 | '''Read missiles from stpd file'''
25 |
26 | def __init__(self, filename):
27 | self.filename = filename
28 | self.log = logging.getLogger(__name__)
29 | self.log.info("Loading stepped missiles from '%s'" % filename)
30 |
31 | def __iter__(self):
32 | def read_chunk_header(ammo_file):
33 | chunk_header = ''
34 | while not chunk_header:
35 | line = ammo_file.readline().decode('utf8')
36 | if not line:
37 | return line # EOF
38 | chunk_header = line.strip('\r\n')
39 | return chunk_header
40 |
41 | with open(self.filename, 'rb') as ammo_file:
42 | chunk_header = read_chunk_header(ammo_file)
43 | while chunk_header != '':
44 | try:
45 | fields = chunk_header.split()
46 | chunk_size = int(fields[0])
47 | timestamp = int(fields[1])
48 | marker = fields[2] if len(fields) > 2 else ''
49 | missile = ammo_file.read(chunk_size)
50 | if len(missile) < chunk_size:
51 | raise StpdFileError(
52 | "Unexpected end of file: read %s bytes instead of %s" % (len(missile), chunk_size)
53 | )
54 | yield (timestamp, missile, marker)
55 | except (IndexError, ValueError) as e:
56 | raise StpdFileError(
57 | "Error while reading ammo file. Position: %s, header: '%s', original exception: %s"
58 | % (ammo_file.tell(), chunk_header, e)
59 | )
60 | chunk_header = read_chunk_header(ammo_file)
61 | self.log.info("Reached the end of stpd file")
62 |
--------------------------------------------------------------------------------
/yandextank/plugins/Console/tests/test_spark.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import time
3 | from yandextank.plugins.Console.screen import Sparkline
4 |
5 |
6 | class TestSparkline(object):
7 | def test_unusual_vals(self):
8 | data = [0, 1, -100, 0.1, 1000, -0.1, 50]
9 | expected = ' _ _▇ _'
10 | sparkline = Sparkline(len(data))
11 | start = int(time.time()) - len(data)
12 | for num, val in enumerate(data):
13 | sparkline.add(start + num, 'data', val)
14 | spark = ''.join(sparkline.get_sparkline('data'))
15 | assert len(spark) == len(data)
16 | assert spark == expected
17 | zero = sparkline.get_sparkline('continous', spark_len=0)
18 | assert len(zero) == 0
19 | negative = sparkline.get_sparkline('continous', spark_len=-1)
20 | assert len(negative) == 0
21 |
22 | def test_non_continuos(self):
23 | data = range(20)
24 | expected = ' _▁▂▃▄▅▆▇ ▃▄▅▆▇ _'
25 | expected_short = '▆▇ _'
26 | expected_long = ' _▁▂▃▄▅▆▇ ▃▄▅▆▇ _'
27 | spark_len = 24
28 | sparkline = Sparkline(spark_len)
29 | start = int(time.time()) - len(data)
30 | for num, val in enumerate(data):
31 | if val <= 8 or val > 12:
32 | sparkline.add(start + num, 'data', val % 9)
33 | spark = ''.join(sparkline.get_sparkline('data', spark_len=len(data)))
34 | assert spark == expected
35 | short_spark = ''.join(sparkline.get_sparkline('data', spark_len=4))
36 | assert short_spark == expected_short
37 | long_spark = ''.join(sparkline.get_sparkline('data'))
38 | assert long_spark == expected_long
39 |
40 | def test_multi_graphs(self):
41 | expected_continous = '__▁▁▂▂▃▃▄▄▅▅▆▆▇▇'
42 | expected_spotty = '_ ▁ ▂ ▃ ▄ ▅ ▆ ▇ '
43 | continous_vals = range(1, 17)
44 | sparkline = Sparkline(len(continous_vals))
45 | start = int(time.time()) - len(continous_vals)
46 | for val in continous_vals:
47 | sparkline.add(start + val, 'continous', val)
48 | if val % 2 == 1:
49 | sparkline.add(start + val, 'spotty', val)
50 | continous = ''.join(sparkline.get_sparkline('continous'))
51 | spotty = ''.join(sparkline.get_sparkline('spotty'))
52 | assert continous == expected_continous
53 | assert spotty == expected_spotty
54 |
--------------------------------------------------------------------------------
/yandextank/aggregator/tests/test_pipeline.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 |
4 | import numpy as np
5 | import pytest
6 |
7 | from queue import Queue
8 |
9 | from yandextank.common.util import get_test_path
10 | from conftest import MAX_TS, random_split
11 |
12 | from yandextank.aggregator import TankAggregator
13 | from yandextank.aggregator.aggregator import Aggregator, DataPoller
14 | from yandextank.aggregator.chopper import TimeChopper
15 | from yandextank.plugins.Phantom.reader import string_to_df
16 | from yandextank.contrib.netort.netort.data_processing import Drain
17 |
18 | AGGR_CONFIG = TankAggregator.load_config()
19 |
20 |
21 | class TestPipeline(object):
22 | def test_partially_reversed_data(self, data):
23 | results_queue = Queue()
24 | chunks = list(random_split(data))
25 | chunks[5], chunks[6] = chunks[6], chunks[5]
26 |
27 | pipeline = Aggregator(TimeChopper([DataPoller(poll_period=0.1, max_wait=31).poll(chunks)]), AGGR_CONFIG, False)
28 | drain = Drain(pipeline, results_queue)
29 | drain.run()
30 | assert results_queue.qsize() == MAX_TS
31 |
32 | def test_slow_producer(self, data):
33 | results_queue = Queue()
34 | chunks = list(random_split(data))
35 | chunks[2], chunks[3] = chunks[3], chunks[2]
36 |
37 | def producer():
38 | for chunk in chunks:
39 | if np.random.random() > 0.5:
40 | yield None
41 | yield chunk
42 |
43 | pipeline = Aggregator(
44 | TimeChopper([DataPoller(poll_period=0.1, max_wait=31).poll(producer())]), AGGR_CONFIG, False
45 | )
46 | drain = Drain(pipeline, results_queue)
47 | drain.run()
48 | assert results_queue.qsize() == MAX_TS
49 |
50 | @pytest.mark.parametrize(
51 | 'phout, expected_results',
52 | [('yandextank/aggregator/tests/phout2927', 'yandextank/aggregator/tests/phout2927res.jsonl')],
53 | )
54 | def test_invalid_ammo(self, phout, expected_results):
55 | with open(os.path.join(get_test_path(), phout)) as fp:
56 | reader = [string_to_df(line) for line in fp.readlines()]
57 | pipeline = Aggregator(TimeChopper([DataPoller(poll_period=0.01, max_wait=31).poll(reader)]), AGGR_CONFIG, True)
58 | with open(os.path.join(get_test_path(), expected_results)) as fp:
59 | expected_results_parsed = json.load(fp)
60 | for item, expected_result in zip(pipeline, expected_results_parsed):
61 | for key, expected_value in expected_result.items():
62 | assert item[key] == expected_value
63 |
--------------------------------------------------------------------------------
/yandextank/plugins/JsonReport/plugin.py:
--------------------------------------------------------------------------------
1 | # TODO: make the next two lines unnecessary
2 | # pylint: disable=line-too-long
3 | # pylint: disable=missing-docstring
4 | import json
5 | import logging
6 | import numpy as np
7 | import os
8 |
9 | import io
10 |
11 | from ...common.interfaces import AbstractPlugin, MonitoringDataListener, AggregateResultListener
12 |
13 | logger = logging.getLogger(__name__) # pylint: disable=C0103
14 |
15 |
16 | class NumpyEncoder(json.JSONEncoder):
17 | def default(self, obj):
18 | if isinstance(obj, np.generic):
19 | return obj.item()
20 | elif isinstance(obj, np.ndarray):
21 | return obj.tolist()
22 | else:
23 | return super(NumpyEncoder, self).default(obj)
24 |
25 |
26 | class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
27 | # pylint:disable=R0902
28 | SECTION = 'json_report'
29 |
30 | def __init__(self, core, cfg, name):
31 | super(Plugin, self).__init__(core, cfg, name)
32 | self.monitoring_stream = io.open(
33 | os.path.join(self.core.artifacts_dir, self.get_option('monitoring_log')), mode='wb'
34 | )
35 | self.data_and_stats_stream = io.open(
36 | os.path.join(self.core.artifacts_dir, self.get_option('test_data_log')), mode='wb'
37 | )
38 | self._is_telegraf = None
39 |
40 | def get_available_options(self):
41 | return ['monitoring_log', 'test_data_log']
42 |
43 | def configure(self):
44 | self.core.job.subscribe_plugin(self)
45 |
46 | def on_aggregated_data(self, data, stats):
47 | """
48 | @data: aggregated data
49 | @stats: stats about gun
50 | """
51 | json_string = json.dumps({'data': data, 'stats': stats}, cls=NumpyEncoder)
52 | self.data_and_stats_stream.write('{}\n'.format(json_string).encode('utf-8'))
53 |
54 | def monitoring_data(self, data_list):
55 | if self.is_telegraf:
56 | monitoring_data = '{}\n'.format(json.dumps(data_list)).encode('utf-8')
57 | self.monitoring_stream.write(monitoring_data)
58 | else:
59 | [self.monitoring_stream.write('{}\n'.format(data.strip()).encode('utf-8')) for data in data_list if data]
60 |
61 | def post_process(self, retcode):
62 | self.close()
63 | return retcode
64 |
65 | def close(self):
66 | if self.data_and_stats_stream:
67 | self.data_and_stats_stream.close()
68 | if self.monitoring_stream:
69 | self.monitoring_stream.close()
70 |
71 | @property
72 | def is_telegraf(self):
73 | return True
74 |
--------------------------------------------------------------------------------
/yandextank/aggregator/tests/test_aggregator.py:
--------------------------------------------------------------------------------
1 | import os
2 | from datetime import datetime
3 | from threading import Event
4 |
5 | import pytest as pytest
6 |
7 | from yandextank.common.util import get_test_path
8 | from yandextank.aggregator import TankAggregator
9 | from yandextank.aggregator.aggregator import DataPoller
10 | from yandextank.common.util import FileMultiReader
11 | from yandextank.plugins.Phantom.reader import PhantomReader
12 |
13 |
14 | class PhantomMock(object):
15 | def __init__(self, phout):
16 | self.phout_filename = phout
17 | self.reader = None
18 | self.finished = Event()
19 |
20 | def get_reader(self):
21 | if self.reader is None:
22 | self.reader = PhantomReader(FileMultiReader(self.phout_filename, self.finished).get_file())
23 | return self.reader
24 |
25 | def get_stats_reader(self):
26 | return (i for i in [])
27 |
28 | def end_test(self, retcode):
29 | return retcode
30 |
31 |
32 | class ListenerMock(object):
33 | def __init__(self):
34 | self.collected_data = []
35 | self.cnt = 0
36 | self.avg = 0
37 |
38 | def on_aggregated_data(self, data, stats):
39 | rps = data['counted_rps']
40 | self.cnt += 1
41 | self.avg = (self.avg * (self.cnt - 1) + rps) / self.cnt
42 |
43 |
44 | @pytest.mark.parametrize('phout, expected_rps', [('yandextank/aggregator/tests/phout1', 300)])
45 | def test_agregator(phout, expected_rps):
46 | generator = PhantomMock(os.path.join(get_test_path(), phout))
47 | poller = DataPoller(poll_period=0.1, max_wait=31)
48 | aggregator = TankAggregator(generator, poller)
49 | listener = ListenerMock()
50 | aggregator.add_result_listener(listener)
51 | aggregator.start_test()
52 | generator.finished.set()
53 | while not aggregator.is_aggr_finished():
54 | aggregator.is_test_finished()
55 | aggregator.end_test(1)
56 | assert abs(listener.avg - expected_rps) < 0.1 * expected_rps
57 |
58 |
59 | @pytest.mark.parametrize('phout', ['yandextank/aggregator/tests/phout1'])
60 | def test_aggregator_max_timeout(phout):
61 | generator = PhantomMock(os.path.join(get_test_path(), phout))
62 | poller = DataPoller(poll_period=0.1, max_wait=31)
63 | aggregator = TankAggregator(generator, poller, termination_timeout=0.2)
64 | listener = ListenerMock()
65 | aggregator.add_result_listener(listener)
66 | aggregator.start_test()
67 | termination_start = datetime.now()
68 | aggregator.end_test(1)
69 | termination_lag = (datetime.now() - termination_start).total_seconds()
70 | assert termination_lag < 1
71 | assert termination_lag > 0.2
72 | generator.finished.set()
73 |
--------------------------------------------------------------------------------
/yandextank/stepper/mark.py:
--------------------------------------------------------------------------------
1 | from uuid import uuid4
2 | from builtins import int
3 |
4 | __test_missile = """\
5 | POST /example/search/hello/help/us?param1=50¶m2=0¶m3=hello HTTP/1.1\r
6 | Connection: close\r
7 | Host: example.org\r
8 | Content-length: 32\r
9 | \r
10 | param1=50¶m2=0¶m3=hello
11 | """
12 |
13 |
14 | def __mark_by_uri(missile):
15 | return '_'.join(missile.decode('utf8').split('\n', 1)[0].split(' ', 2)[1].split('?')[0].split('/'))
16 |
17 |
18 | class __UriMarker(object):
19 | '''
20 | Returns a uri marker function with requested limit
21 |
22 | >>> marker = __UriMarker(2)
23 | >>> marker(__test_missile)
24 | '_example_search'
25 | '''
26 |
27 | def __init__(self, limit):
28 | self.limit = limit
29 |
30 | def __call__(self, missile):
31 | return b'_'.join(missile.split(b'\n', 1)[0].split(b' ', 2)[1].split(b'?')[0].split(b'/')[0 : self.limit + 1])
32 |
33 |
34 | __markers = {
35 | 'uniq': lambda m: uuid4().hex,
36 | 'uri': __mark_by_uri,
37 | }
38 |
39 |
40 | class __Enumerator(object):
41 | def __init__(self, marker):
42 | self.marker = marker
43 | self.number = int(0)
44 |
45 | def __call__(self, missile):
46 | marker = b"%s#%d" % (self.marker(missile), self.number)
47 | self.number += 1
48 | return marker
49 |
50 |
51 | def get_marker(marker_type, enum_ammo=False):
52 | '''
53 | Returns a marker function of the requested marker_type
54 |
55 | >>> marker = get_marker('uniq')(__test_missile)
56 | >>> type(marker)
57 |
58 | >>> len(marker)
59 | 32
60 |
61 | >>> get_marker('uri')(__test_missile)
62 | '_example_search_hello_help_us'
63 |
64 | >>> marker = get_marker('non-existent')(__test_missile)
65 | Traceback (most recent call last):
66 | ...
67 | NotImplementedError: No such marker: "non-existent"
68 |
69 | >>> get_marker('3')(__test_missile)
70 | '_example_search_hello'
71 |
72 | >>> marker = get_marker('3', True)
73 | >>> marker(__test_missile)
74 | '_example_search_hello#0'
75 | >>> marker(__test_missile)
76 | '_example_search_hello#1'
77 | '''
78 | try:
79 | limit = int(marker_type)
80 | if limit:
81 | marker = __UriMarker(limit)
82 | else:
83 |
84 | def marker(m):
85 | return b''
86 |
87 | except ValueError:
88 | if marker_type in __markers:
89 | marker = __markers[marker_type]
90 | else:
91 | raise NotImplementedError('No such marker: "%s"' % marker_type)
92 |
93 | # todo: fix u'False'
94 | if enum_ammo:
95 | marker = __Enumerator(marker)
96 | return marker
97 |
--------------------------------------------------------------------------------
/yandextank/plugins/ShellExec/plugin.py:
--------------------------------------------------------------------------------
1 | '''
2 | Contains shellexec plugin
3 | '''
4 |
5 | from yandextank.contrib.netort.netort.process import execute
6 | from ...common.interfaces import AbstractPlugin
7 |
8 |
9 | class Plugin(AbstractPlugin):
10 | """
11 | ShellExec plugin
12 | allows executing shell scripts before/after test
13 | """
14 |
15 | SECTION = 'shellexec'
16 |
17 | def __init__(self, core, cfg, name):
18 | AbstractPlugin.__init__(self, core, cfg, name)
19 | self.catch_out = False
20 | self.end = None
21 | self.poll = None
22 | self.prepare = None
23 | self.start = None
24 | self.postprocess = None
25 | self.executable = None
26 |
27 | @staticmethod
28 | def get_key():
29 | return __file__
30 |
31 | def get_available_options(self):
32 | return ['prepare', 'start', 'end', 'poll', 'post_process', 'catch_out', 'shell']
33 |
34 | def configure(self):
35 | self.catch_out = True if self.get_option("catch_out", False) else False
36 | self.prepare = self.get_option("prepare", '')
37 | self.start = self.get_option("start", '')
38 | self.end = self.get_option("end", '')
39 | self.poll = self.get_option("poll", '')
40 | self.postprocess = self.get_option("post_process", '')
41 | self.executable = self.get_option('shell', '')
42 |
43 | def prepare_test(self):
44 | if self.prepare:
45 | self.execute(self.prepare)
46 |
47 | def start_test(self):
48 | if self.start:
49 | self.execute(self.start)
50 |
51 | def is_test_finished(self):
52 | if self.poll:
53 | self.log.info("Executing: %s", self.poll)
54 | retcode = execute(
55 | self.poll, shell=True, poll_period=0.1, catch_out=self.catch_out, executable=self.executable
56 | )[0]
57 | if retcode:
58 | self.log.warning("Non-zero exit code, interrupting test: %s", retcode)
59 | return retcode
60 | return -1
61 |
62 | def end_test(self, retcode):
63 | if self.end:
64 | self.execute(self.end)
65 | return retcode
66 |
67 | def post_process(self, retcode):
68 | if self.postprocess:
69 | self.execute(self.postprocess)
70 | return retcode
71 |
72 | def execute(self, cmd):
73 | """
74 | Execute and check exit code
75 | """
76 | self.log.info("Executing: %s", cmd)
77 | retcode = execute(cmd, shell=True, poll_period=0.1, catch_out=self.catch_out, executable=self.executable)[0]
78 | if retcode:
79 | raise RuntimeError("Subprocess returned %s" % retcode)
80 | return retcode
81 |
--------------------------------------------------------------------------------
/yandextank/stepper/tests/caseline-expected.stpd:
--------------------------------------------------------------------------------
1 | 5 0 test1
2 | test1
3 | 5 100
4 | test2
5 | 5 200 test1
6 | test1
7 | 5 300
8 | test2
9 | 5 400 test1
10 | test1
11 | 5 500
12 | test2
13 | 5 600 test1
14 | test1
15 | 5 700
16 | test2
17 | 5 800 test1
18 | test1
19 | 5 900
20 | test2
21 | 5 1000 test1
22 | test1
23 | 5 1100
24 | test2
25 | 5 1200 test1
26 | test1
27 | 5 1300
28 | test2
29 | 5 1400 test1
30 | test1
31 | 5 1500
32 | test2
33 | 5 1600 test1
34 | test1
35 | 5 1700
36 | test2
37 | 5 1800 test1
38 | test1
39 | 5 1900
40 | test2
41 | 5 2000 test1
42 | test1
43 | 5 2100
44 | test2
45 | 5 2200 test1
46 | test1
47 | 5 2300
48 | test2
49 | 5 2400 test1
50 | test1
51 | 5 2500
52 | test2
53 | 5 2600 test1
54 | test1
55 | 5 2700
56 | test2
57 | 5 2800 test1
58 | test1
59 | 5 2900
60 | test2
61 | 5 3000 test1
62 | test1
63 | 5 3100
64 | test2
65 | 5 3200 test1
66 | test1
67 | 5 3300
68 | test2
69 | 5 3400 test1
70 | test1
71 | 5 3500
72 | test2
73 | 5 3600 test1
74 | test1
75 | 5 3700
76 | test2
77 | 5 3800 test1
78 | test1
79 | 5 3900
80 | test2
81 | 5 4000 test1
82 | test1
83 | 5 4100
84 | test2
85 | 5 4200 test1
86 | test1
87 | 5 4300
88 | test2
89 | 5 4400 test1
90 | test1
91 | 5 4500
92 | test2
93 | 5 4600 test1
94 | test1
95 | 5 4700
96 | test2
97 | 5 4800 test1
98 | test1
99 | 5 4900
100 | test2
101 | 5 5000 test1
102 | test1
103 | 5 5100
104 | test2
105 | 5 5200 test1
106 | test1
107 | 5 5300
108 | test2
109 | 5 5400 test1
110 | test1
111 | 5 5500
112 | test2
113 | 5 5600 test1
114 | test1
115 | 5 5700
116 | test2
117 | 5 5800 test1
118 | test1
119 | 5 5900
120 | test2
121 | 5 6000 test1
122 | test1
123 | 5 6100
124 | test2
125 | 5 6200 test1
126 | test1
127 | 5 6300
128 | test2
129 | 5 6400 test1
130 | test1
131 | 5 6500
132 | test2
133 | 5 6600 test1
134 | test1
135 | 5 6700
136 | test2
137 | 5 6800 test1
138 | test1
139 | 5 6900
140 | test2
141 | 5 7000 test1
142 | test1
143 | 5 7100
144 | test2
145 | 5 7200 test1
146 | test1
147 | 5 7300
148 | test2
149 | 5 7400 test1
150 | test1
151 | 5 7500
152 | test2
153 | 5 7600 test1
154 | test1
155 | 5 7700
156 | test2
157 | 5 7800 test1
158 | test1
159 | 5 7900
160 | test2
161 | 5 8000 test1
162 | test1
163 | 5 8100
164 | test2
165 | 5 8200 test1
166 | test1
167 | 5 8300
168 | test2
169 | 5 8400 test1
170 | test1
171 | 5 8500
172 | test2
173 | 5 8600 test1
174 | test1
175 | 5 8700
176 | test2
177 | 5 8800 test1
178 | test1
179 | 5 8900
180 | test2
181 | 5 9000 test1
182 | test1
183 | 5 9100
184 | test2
185 | 5 9200 test1
186 | test1
187 | 5 9300
188 | test2
189 | 5 9400 test1
190 | test1
191 | 5 9500
192 | test2
193 | 5 9600 test1
194 | test1
195 | 5 9700
196 | test2
197 | 5 9800 test1
198 | test1
199 | 5 9900
200 | test2
201 |
--------------------------------------------------------------------------------
/yandextank/plugins/Bfg/reader.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import time
3 | import itertools as itt
4 | from queue import Empty
5 | from threading import Lock
6 | import threading as th
7 | import logging
8 |
9 | logger = logging.getLogger(__name__)
10 |
11 |
12 | def records_to_df(records):
13 | records = pd.DataFrame.from_records(records)
14 | records['receive_ts'] = records['send_ts'] + records['interval_real'] / 1e6
15 | records['receive_sec'] = records.receive_ts.astype(int)
16 | # TODO: consider configuration for the following:
17 | records['tag'] = records.tag.str.rsplit('#', 1, expand=True)[0]
18 | records.set_index(['receive_sec'], inplace=True)
19 | return records
20 |
21 |
22 | def _expand_steps(steps):
23 | return list(itt.chain(*[[rps] * int(duration) for rps, duration in steps]))
24 |
25 |
26 | class BfgReader(object):
27 | def __init__(self, results, closed):
28 | self.buffer = ""
29 | self.stat_buffer = ""
30 | self.results = results
31 | self.closed = closed
32 | self.records = []
33 | self.lock = Lock()
34 | self.thread = th.Thread(target=self._cacher)
35 | self.thread.start()
36 |
37 | def _cacher(self):
38 | while True:
39 | try:
40 | self.records.append(self.results.get(block=False))
41 | except Empty:
42 | if not self.closed.is_set():
43 | time.sleep(0.1)
44 | else:
45 | break
46 |
47 | def __next__(self):
48 | if self.closed.is_set():
49 | self.thread.join()
50 | raise StopIteration
51 | with self.lock:
52 | records = self.records
53 | self.records = []
54 | if records:
55 | return records_to_df(records)
56 | return None
57 |
58 | def __iter__(self):
59 | return self
60 |
61 |
62 | class BfgStatsReader(object):
63 | def __init__(self, instance_counter, steps):
64 | self.closed = False
65 | self.last_ts = 0
66 | self.steps = _expand_steps(steps)
67 | self.instance_counter = instance_counter
68 | self.start_time = int(time.time())
69 |
70 | def __iter__(self):
71 | while not self.closed:
72 | cur_ts = int(time.time())
73 | if cur_ts > self.last_ts:
74 | offset = cur_ts - self.start_time
75 | reqps = 0
76 | if offset >= 0 and offset < len(self.steps):
77 | reqps = self.steps[offset]
78 | yield [{'ts': cur_ts, 'metrics': {'instances': self.instance_counter.value, 'reqps': reqps}}]
79 | self.last_ts = cur_ts
80 | else:
81 | yield []
82 |
83 | def close(self):
84 | self.closed = True
85 |
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/tests/test_router.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from mock import Mock
3 | from yandextank.contrib.netort.netort.data_manager import MetricsRouter, DataManager, MetricData
4 | from yandextank.contrib.netort.netort.data_manager.common.interfaces import TypeDistribution, TypeQuantiles
5 | import pandas as pd
6 |
7 |
8 | class TestAggregatorBuffer(object):
9 |
10 | def setup_method(self):
11 | self.metrics_router = MetricsRouter(Mock(DataManager), 5)
12 | self.df1 = pd.read_csv('netort/data_manager/tests/df1MetricData.csv')
13 | self.metric_data1 = MetricData(
14 | df=self.df1,
15 | data_types=[TypeDistribution, TypeQuantiles],
16 | local_id='metric_85ef54ec-1275-4cd8-b358-9c5ed41070a5',
17 | test_start=0,
18 | )
19 | self.df2 = pd.read_csv('netort/data_manager/tests/df2MetricData.csv')
20 | self.metric_data2 = MetricData(
21 | df=self.df2,
22 | data_types=[TypeQuantiles, TypeDistribution],
23 | local_id='metric_4913cbd2-071f-4338-a020-2eae42f5b9ff',
24 | test_start=0,
25 | )
26 |
27 | @pytest.mark.xfail
28 | def test_buffer_last_piece(self):
29 | res1 = self.metrics_router._from_buffer(self.metric_data1, False)
30 | res2 = self.metrics_router._from_buffer(self.metric_data2, True)
31 | assert len(self.df1) + len(self.df2) == len(res1) + len(res2)
32 |
33 | @pytest.mark.xfail
34 | def test_buffer_no_last_piece(self):
35 | res1 = self.metrics_router._MetricsRouter__from_aggregator_buffer(self.df1, 'metric1', False)
36 | res2 = self.metrics_router._MetricsRouter__from_aggregator_buffer(self.df2, 'metric1', False)
37 | assert len(self.df1) + len(self.df2) > len(res1) + len(res2)
38 | assert len(self.df1) + len(self.df2) == len(res1) + len(res2) + len(
39 | self.metrics_router._MetricsRouter__aggregator_buffer.get('metric1', [])
40 | )
41 |
42 | @pytest.mark.xfail
43 | def test_buffer_multiple_metrics(self):
44 | res11 = self.metrics_router._MetricsRouter__from_aggregator_buffer(self.df1, 'metric1', False)
45 | res21 = self.metrics_router._MetricsRouter__from_aggregator_buffer(self.df1, 'metric2', False)
46 | res12 = self.metrics_router._MetricsRouter__from_aggregator_buffer(self.df2, 'metric1', False)
47 | res22 = self.metrics_router._MetricsRouter__from_aggregator_buffer(self.df2, 'metric2', True)
48 | assert len(self.df1) + len(self.df2) == len(res11) + len(res12) + len(
49 | self.metrics_router._MetricsRouter__aggregator_buffer.get('metric1', [])
50 | )
51 | assert len(self.df1) + len(self.df2) == len(res21) + len(res22)
52 | assert self.metrics_router._MetricsRouter__aggregator_buffer.get('metric2', []) is None
53 |
--------------------------------------------------------------------------------
/yandextank/core/expvar.py:
--------------------------------------------------------------------------------
1 | """
2 | Global metrics publishing module. Inspired by Golang's expvar module
3 |
4 | This implementation is not thread-safe
5 | """
6 |
7 | from queue import Queue, Empty
8 | import time
9 |
10 |
11 | class ExpVar(object):
12 | """
13 | This class stores variables
14 | """
15 |
16 | def __init__(self):
17 | self.variables = {}
18 |
19 | def publish(self, name, var):
20 | if name in self.variables:
21 | raise RuntimeError("'%s' variable have been already published before" % name)
22 | self.variables[name] = var
23 | return var
24 |
25 | def get(self, name):
26 | if name not in self.variables:
27 | raise RuntimeError("No such variable: %s", name)
28 | return self.variables[name]
29 |
30 | def get_dict(self):
31 | return {k: v.get() for k, v in self.variables.items()}
32 |
33 |
34 | class Var(object):
35 | """
36 | This class stores generic variable value.
37 | It is also a base class for other variable types
38 | """
39 |
40 | def __init__(self, value=None):
41 | self.value = value
42 |
43 | def set(self, value):
44 | self.value = value
45 |
46 | def get(self):
47 | return self.value
48 |
49 | def __str__(self):
50 | return str(self.value)
51 |
52 |
53 | class Int(Var):
54 | def __init__(self, value=0):
55 | if not isinstance(value, int):
56 | raise ValueError("Value should be an integer, but it is '%s'" % type(value))
57 | super(Int, self).__init__(value)
58 |
59 | def inc(self, delta=1):
60 | self.value += delta
61 |
62 |
63 | class Metric(object):
64 | """
65 | This class stores generic time-series data in a queue.
66 | Values are stored as (timestamp, value) tuples
67 | """
68 |
69 | def __init__(self):
70 | self.metric = Queue()
71 |
72 | def push(self, value, timestamp=None):
73 | if timestamp is None:
74 | timestamp = int(time.time())
75 | elif not isinstance(timestamp, int):
76 | raise ValueError("Timestamp should be an integer, but it is '%s'" % type(timestamp))
77 | self.metric.put((timestamp, value))
78 |
79 | def __next__(self):
80 | try:
81 | return self.metric.get_nowait()
82 | except Empty:
83 | raise StopIteration
84 |
85 | def get(self):
86 | # TODO: decide what we should return here
87 | return None
88 |
89 | def __iter__(self):
90 | return self
91 |
92 |
93 | EV = ExpVar()
94 |
95 |
96 | def publish(name, var):
97 | return EV.publish(name, var)
98 |
99 |
100 | def get(name):
101 | return EV.get(name)
102 |
103 |
104 | def get_dict():
105 | return EV.get_dict()
106 |
--------------------------------------------------------------------------------
/yandextank/plugins/Phantom/tests/test_log_analyzer.py:
--------------------------------------------------------------------------------
1 | import tempfile
2 | import pytest
3 | from pathlib import Path
4 |
5 | from yandextank.plugins.Phantom.log_analyzer import LogAnalyzer, LogLine
6 |
7 | LOG_SAMPLE = """2022-03-18 19:08:11.525 +0300 [error] [benchmark_io 4639] socket: Too many open files
8 | 2022-03-18 19:08:11.525 +0300 [error] [benchmark_io 4625] socket: Too many open files
9 | 2022-03-18 19:08:11.526 +0300 [error] [benchmark_io 4643] socket: Too many open files
10 | 2022-03-18 19:08:11.526 +0300 [error] [benchmark_io 4658] socket: Too many open files
11 | 2022-03-18 19:08:11.526 +0300 [error] [benchmark_io 4644] socket: Too many open files
12 | 2022-03-18 19:08:11.583 +0300 [error] [monitor_io] bq_sleep: Operation canceled
13 | 2022-03-18 19:08:11.584 +0300 [error] [benchmark_io stream_method brief_logger] bq_sleep: Operation canceled
14 | 2022-03-18 19:08:11.584 +0300 [error] [benchmark_io] cond-wait: Operation canceled
15 | 2022-03-18 19:08:11.584 +0300 [error] [monitor_io] bq_sleep: Operation canceled
16 | 2022-03-18 19:08:11.601 +0300 [error] [phantom_logger] bq_sleep: Operation canceled
17 | 2022-03-18 19:08:11.609 +0300 [info] [] Exit"""
18 |
19 |
20 | def test_one_most_recent_error():
21 | with tempfile.NamedTemporaryFile() as file:
22 | Path(file.name).write_text(LOG_SAMPLE)
23 | (error,) = LogAnalyzer(file.name).get_most_recent_errors(1)
24 | assert 'socket: Too many open files' == error
25 |
26 |
27 | def test_most_recent_errors():
28 | with tempfile.NamedTemporaryFile() as file:
29 | Path(file.name).write_text(LOG_SAMPLE)
30 | errors = LogAnalyzer(file.name).get_most_recent_errors()
31 | assert [
32 | 'socket: Too many open files',
33 | 'bq_sleep: Operation canceled',
34 | 'cond-wait: Operation canceled',
35 | ] == errors
36 |
37 |
38 | def test_empty_errors():
39 | with tempfile.NamedTemporaryFile() as file:
40 | Path(file.name).write_text('')
41 | errors = LogAnalyzer(file.name).get_most_recent_errors()
42 | assert [] == errors
43 |
44 |
45 | @pytest.mark.parametrize(
46 | ('line', 'level', 'message'),
47 | [
48 | (
49 | '2022-03-18 19:08:11.525 +0300 [error] [benchmark_io 4639] socket: Too many open files',
50 | 'error',
51 | 'socket: Too many open files',
52 | ),
53 | (
54 | '2022-03-18 19:08:11.584 +0300 [error] [benchmark_io stream_method brief_logger] bq_sleep: Operation canceled',
55 | 'error',
56 | 'bq_sleep: Operation canceled',
57 | ),
58 | (
59 | '2022-03-18 19:08:11.609 +0300 [info] [] Exit',
60 | 'info',
61 | 'Exit',
62 | ),
63 | ],
64 | )
65 | def test_line_split(line, level, message):
66 | parsed = LogLine(line)
67 | assert level == parsed.level
68 | assert message == parsed.message
69 |
--------------------------------------------------------------------------------
/yandextank/plugins/Telegraf/decoder.py:
--------------------------------------------------------------------------------
1 | """Known metrics decoder"""
2 |
3 | import logging
4 |
5 | logger = logging.getLogger(__name__)
6 |
7 |
8 | class MetricsDecoder(object):
9 | def __init__(self):
10 | """
11 | translates telegraf metric names into common Monitoring metric names
12 | translates `uncommon` names to `custom:%s`s
13 |
14 | """
15 | self.known_metrics = {
16 | 'mem_used': 'Memory_used',
17 | 'mem_free': 'Memory_free',
18 | 'mem_buffered': 'Memory_buff',
19 | 'mem_cached': 'Memory_cached',
20 | 'kernel_context_switches': 'System_csw',
21 | 'kernel_interrupts': 'System_int',
22 | 'kernel_processes_forked': 'System_forks',
23 | 'processes_total': 'System_numproc',
24 | 'processes_total_threads': 'System_numthreads',
25 | 'system_load1': 'System_la1',
26 | 'system_load5': 'System_la5',
27 | 'system_load15': 'System_la15',
28 | 'nstat_TcpRetransSegs': 'Net_retransmit',
29 | # those guys became inactive due to net interface names and disk ids
30 | # we don't need unknown id data here
31 | # 'net_packets_recv': 'Net_rx',
32 | # 'net_packets_sent': 'Net_tx',
33 | # 'net_bytes_recv': 'Net_recv',
34 | # 'net_bytes_sent': 'Net_send',
35 | # 'diskio_read_bytes': 'Disk_read',
36 | # 'diskio_write_bytes': 'Disk_write',
37 | # ----------
38 | # remove this crunch after front refactoring
39 | # 'cpu-cpu-total_usage_user': 'CPU_user',
40 | # 'cpu-cpu-total_usage_system': 'CPU_system',
41 | # 'cpu-cpu-total_usage_idle': 'CPU_idle',
42 | # 'cpu-cpu-total_usage_iowait': 'CPU_iowait',
43 | # 'cpu-cpu-total_usage_irq': 'CPU_irq',
44 | # 'cpu-cpu-total_usage_nice': 'CPU_nice',
45 | # 'cpu-cpu-total_usage_softirq': 'CPU_softirq',
46 | # 'cpu-cpu-total_usage_steal': 'CPU_steal',
47 | # 'cpu-cpu-total_usage_guest': 'CPU_guest'
48 | }
49 |
50 | self.diff_metrics = {
51 | 'cpu': [],
52 | 'mem': [],
53 | 'net': ['packets_recv', 'packets_sent', 'bytes_recv', 'bytes_sent'],
54 | 'nstat': ['TcpRetransSegs'],
55 | 'net_response': [],
56 | 'kernel': ['context_switches', 'interrupts', 'processes_forked', 'vmstat_pgfault', 'vmstat_pgmajfault'],
57 | 'diskio': ['read_bytes', 'write_bytes', 'io_time', 'read_time', 'reads', 'write_time', 'writes'],
58 | 'custom': [],
59 | }
60 |
61 | def find_common_names(self, key):
62 | if key in self.known_metrics:
63 | return self.known_metrics[key]
64 | else:
65 | return 'custom:{}'.format(key)
66 |
67 |
68 | decoder = MetricsDecoder()
69 |
--------------------------------------------------------------------------------
/yandextank/plugins/Phantom/tests/test_reader.py:
--------------------------------------------------------------------------------
1 | from threading import Event
2 | import os
3 |
4 | import pandas as pd
5 | from yandextank.common.util import get_test_path
6 | from yandextank.common.util import FileMultiReader
7 | from yandextank.plugins.Phantom.reader import PhantomReader, PhantomStatsReader, string_to_df_microsec
8 | from functools import reduce
9 |
10 |
11 | class TestPhantomReader(object):
12 | def setup_class(self):
13 | stop = Event()
14 | self.multireader = FileMultiReader(
15 | os.path.join(get_test_path(), 'yandextank/plugins/Phantom/tests/phout.dat'), stop
16 | )
17 | stop.set()
18 |
19 | def teardown_class(self):
20 | self.multireader.close()
21 |
22 | def test_read_all(self):
23 | reader = PhantomReader(self.multireader.get_file(), cache_size=1024)
24 | df = pd.DataFrame()
25 | for chunk in reader:
26 | df = pd.concat([df, pd.DataFrame.from_records(chunk)])
27 | assert len(df) == 200
28 | assert df['interval_real'].mean() == 11000714.0
29 |
30 | def test_reader_closed(self):
31 | reader = PhantomReader(self.multireader.get_file(), cache_size=64)
32 | frames = [i for i in reader]
33 | result = pd.concat(frames)
34 | assert len(result) == 200
35 | assert result['interval_real'].mean() == 11000714.0
36 |
37 | def test_reader_us(self):
38 | with open(os.path.join(get_test_path(), 'yandextank/plugins/Phantom/tests/phout.dat')) as f:
39 | chunk = f.read()
40 | result = string_to_df_microsec(chunk)
41 | expected = pd.read_pickle(os.path.join(get_test_path(), 'yandextank/plugins/Phantom/tests/expected_df.dat'))
42 | result['ts'] -= result['ts'][0]
43 | assert result.equals(expected)
44 |
45 |
46 | class MockInfo(object):
47 | def __init__(self, steps):
48 | self.steps = steps
49 |
50 |
51 | class TestStatsReader(object):
52 |
53 | def test_closed(self):
54 | STEPS = [
55 | [1.0, 1],
56 | [1.0, 1],
57 | [1.0, 1],
58 | [2.0, 1],
59 | [2.0, 1],
60 | [2.0, 1],
61 | [2.0, 1],
62 | [2.0, 1],
63 | [3.0, 1],
64 | [3.0, 1],
65 | [3.0, 1],
66 | [3.0, 1],
67 | [3.0, 1],
68 | [4.0, 1],
69 | [4.0, 1],
70 | [4.0, 1],
71 | [4.0, 1],
72 | [4.0, 1],
73 | [5.0, 1],
74 | [5.0, 1],
75 | [5.0, 1],
76 | ]
77 | reader = PhantomStatsReader(
78 | os.path.join(get_test_path(), 'yandextank/plugins/Phantom/tests/phantom_stat.dat'),
79 | MockInfo(STEPS),
80 | cache_size=1024 * 10,
81 | )
82 | reader.close()
83 | stats = reduce(lambda l1, l2: l1 + l2, [i for i in reader])
84 |
85 | assert len(stats) == 19
86 |
--------------------------------------------------------------------------------
/yandextank/plugins/Pandora/reader.py:
--------------------------------------------------------------------------------
1 | from threading import Thread, Event
2 |
3 | import requests
4 | import time
5 | import logging
6 |
7 | logger = logging.getLogger(__name__)
8 |
9 |
10 | class PandoraStatsPoller(Thread):
11 | def __init__(self, port):
12 | super(PandoraStatsPoller, self).__init__()
13 | self._stop_run = Event()
14 | self.buffer = []
15 | self.port = port
16 |
17 | def run(self):
18 | last_ts = int(time.time() - 1)
19 |
20 | while not self._stop_run.is_set():
21 | curr_ts = int(time.time())
22 | if curr_ts > last_ts:
23 | last_ts = curr_ts
24 | try:
25 | pandora_stat = requests.get(
26 | "http://localhost:{port}/debug/vars".format(port=self.port), timeout=0.9
27 | ).json()
28 | instances_metric = pandora_stat.get(
29 | "engine_LastMaxActiveRequests", pandora_stat.get("engine_ActiveRequests")
30 | )
31 | data = {
32 | 'ts': last_ts - 1,
33 | 'metrics': {
34 | 'instances': instances_metric,
35 | 'reqps': pandora_stat.get("engine_ReqPS"),
36 | },
37 | }
38 | except (requests.ConnectionError, requests.HTTPError, requests.exceptions.Timeout):
39 | logger.debug("Pandora expvar http interface is unavailable", exc_info=True)
40 | data = {'ts': last_ts - 1, 'metrics': {'instances': 0, 'reqps': 0}}
41 | self.buffer.append(data)
42 | else:
43 | time.sleep(0.2)
44 |
45 | def stop(self):
46 | self._stop_run.set()
47 |
48 | def get_data(self):
49 | result, self.buffer = self.buffer, []
50 | return result
51 |
52 |
53 | class PandoraStatsReader(object):
54 | # TODO: maybe make stats collection asyncronous
55 | def __init__(self, expvar, port):
56 | self.closed = False
57 | self.expvar = expvar
58 | self.port = port
59 | self.poller = PandoraStatsPoller(port)
60 | self.started = False
61 |
62 | def __next__(self):
63 | if not self.expvar:
64 | if self.closed:
65 | raise StopIteration
66 | return [{'ts': int(time.time() - 1), 'metrics': {'instances': 0, 'reqps': 0}}]
67 | else:
68 | if self.closed:
69 | raise StopIteration()
70 | elif not self.started:
71 | self.poller.start()
72 | self.started = True
73 | return self.poller.get_data()
74 |
75 | def close(self):
76 | self.closed = True
77 | if self.poller.is_alive():
78 | self.poller.stop()
79 | self.poller.join()
80 |
81 | def __iter__(self):
82 | return self
83 |
--------------------------------------------------------------------------------
/yandextank/plugins/ResourceCheck/plugin.py:
--------------------------------------------------------------------------------
1 | '''Module to check system resources at load generator'''
2 |
3 | import logging
4 | import time
5 |
6 | import psutil
7 | from ...common.util import expand_to_seconds
8 | from ...common.interfaces import AbstractPlugin
9 |
10 | from yandextank.contrib.netort.netort.process import execute
11 |
12 |
13 | class Plugin(AbstractPlugin):
14 | '''Plugin to check system resources'''
15 |
16 | SECTION = "rcheck"
17 |
18 | @staticmethod
19 | def get_key():
20 | return __file__
21 |
22 | def __init__(self, core, cfg, name):
23 | '''Constructor'''
24 | AbstractPlugin.__init__(self, core, cfg, name)
25 | self.interval = "10s"
26 | self.disk_limit = 2048 # 2 GB
27 | self.mem_limit = 512 # 0.5 GB
28 | self.last_check = 0
29 |
30 | def get_available_options(self):
31 | return ["interval", "disk_limit", "mem_limit"]
32 |
33 | def configure(self):
34 | self.interval = expand_to_seconds(self.get_option("interval", self.interval))
35 | self.disk_limit = int(self.get_option("disk_limit", self.disk_limit))
36 | self.mem_limit = int(self.get_option("mem_limit", self.mem_limit))
37 |
38 | def prepare_test(self):
39 | self.log.info("Checking tank resources...")
40 | self.__check_disk()
41 | self.__check_mem()
42 |
43 | def is_test_finished(self):
44 | self.log.debug("Checking tank resources...")
45 | if time.time() - self.last_check < self.interval:
46 | return -1
47 | self.__check_disk()
48 | self.__check_mem()
49 | self.last_check = time.time()
50 | return -1
51 |
52 | def __check_disk(self):
53 | '''raise exception on disk space exceeded'''
54 | cmd = "sh -c \"df --no-sync -m -P -x fuse -x tmpfs -x devtmpfs -x davfs -x nfs "
55 | cmd += self.core.artifacts_base_dir
56 | cmd += " | tail -n 1 | awk '{print \\$4}' \""
57 | res = execute(cmd, True, 0.1, True)
58 | logging.debug("Result: %s", res)
59 | if not len(res[1]):
60 | self.log.debug("No disk usage info: %s", res[2])
61 | return
62 | disk_free = res[1]
63 | self.log.debug("Disk free space: %s/%s", disk_free.strip(), self.disk_limit)
64 | if int(disk_free.strip()) < self.disk_limit:
65 | raise RuntimeError(
66 | "Not enough local resources: disk space less than %sMB in %s: %sMB"
67 | % (self.disk_limit, self.core.artifacts_base_dir, int(disk_free.strip()))
68 | )
69 |
70 | def __check_mem(self):
71 | '''raise exception on RAM exceeded'''
72 | mem_free = psutil.virtual_memory().available / 2**20
73 | self.log.debug("Memory free: %s/%s", mem_free, self.mem_limit)
74 | if mem_free < self.mem_limit:
75 | raise RuntimeError("Not enough resources: free memory less " "than %sMB: %sMB" % (self.mem_limit, mem_free))
76 |
--------------------------------------------------------------------------------
/yandextank/plugins/InfluxUploader/plugin.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # TODO: make the next two lines unnecessary
3 | # pylint: disable=line-too-long
4 | # pylint: disable=missing-docstring
5 | import datetime
6 | import logging
7 | import sys
8 | from uuid import uuid4
9 |
10 | from builtins import str
11 | from influxdb import InfluxDBClient
12 |
13 | from .decoder import Decoder
14 | from ...common.interfaces import AbstractPlugin, MonitoringDataListener, AggregateResultListener
15 |
16 | logger = logging.getLogger(__name__) # pylint: disable=C0103
17 |
18 |
19 | def chop(data_list, chunk_size):
20 | if sys.getsizeof(str(data_list)) <= chunk_size:
21 | return [data_list]
22 | elif len(data_list) == 1:
23 | logger.warning("Too large piece of Telegraf data. Might experience upload problems.")
24 | return [data_list]
25 | else:
26 | mid = len(data_list) / 2
27 | return chop(data_list[:mid], chunk_size) + chop(data_list[mid:], chunk_size)
28 |
29 |
30 | class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
31 | SECTION = 'influx'
32 |
33 | def __init__(self, core, cfg, name):
34 | AbstractPlugin.__init__(self, core, cfg, name)
35 | self.tank_tag = self.get_option("tank_tag")
36 | self.prefix_measurement = self.get_option("prefix_measurement")
37 | self._client = None
38 | self.start_time = None
39 | self.end_time = None
40 | self.decoder = Decoder(
41 | self.tank_tag,
42 | str(uuid4()),
43 | self.get_option("custom_tags"),
44 | self.get_option("labeled"),
45 | self.get_option("histograms"),
46 | )
47 |
48 | @property
49 | def client(self):
50 | if not self._client:
51 | self._client = InfluxDBClient(
52 | self.get_option("address"),
53 | self.get_option("port"),
54 | username=self.get_option("username"),
55 | password=self.get_option("password"),
56 | database=self.get_option("database"),
57 | )
58 | return self._client
59 |
60 | def prepare_test(self):
61 | self.core.job.subscribe_plugin(self)
62 |
63 | def start_test(self):
64 | self.start_time = datetime.datetime.now()
65 |
66 | def end_test(self, retcode):
67 | self.end_time = datetime.datetime.now() + datetime.timedelta(minutes=1)
68 | return retcode
69 |
70 | def on_aggregated_data(self, data, stats):
71 | self.client.write_points(self.decoder.decode_aggregates(data, stats, self.prefix_measurement), 's')
72 |
73 | def monitoring_data(self, data_list):
74 | if len(data_list) > 0:
75 | [self._send_monitoring(chunk) for chunk in chop(data_list, self.get_option("chunk_size"))]
76 |
77 | def _send_monitoring(self, data):
78 | self.client.write_points(self.decoder.decode_monitoring(data), 's')
79 |
80 | def set_uuid(self, id_):
81 | self.decoder.tags['uuid'] = id_
82 |
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_manager/common/util.py:
--------------------------------------------------------------------------------
1 | import collections
2 | import time
3 | import logging
4 | import os
5 | import re
6 | import yaml
7 |
8 | # TODO: rename to format_http_request
9 | from threading import Lock
10 |
11 |
12 | def pretty_print(req):
13 | return '{header}\n{query}\n{http_headers}\n\n{body}\n{footer}'.format(
14 | header='-----------QUERY START-----------',
15 | query=req.method + ' ' + req.url,
16 | http_headers='\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),
17 | body=req.body,
18 | footer='-----------QUERY END-----------',
19 | )
20 |
21 |
22 | def recursive_dict_update(d1, d2):
23 | for k, v in d2.items():
24 | if isinstance(v, collections.Mapping):
25 | r = recursive_dict_update(d1.get(k, {}), v)
26 | d1[k] = r
27 | else:
28 | d1[k] = d2[k]
29 | return d1
30 |
31 |
32 | def log_time_decorator(func):
33 | """
34 | logs func execution time
35 | :param func:
36 | :return:
37 | """
38 |
39 | def timed(*args, **kwargs):
40 | start = time.time()
41 | res = func(*args, **kwargs)
42 | logging.debug('TIMER {}: {}'.format(func.__name__, round(time.time() - start, 3)))
43 | return res
44 |
45 | return timed
46 |
47 |
48 | class thread_safe_property(object):
49 | # credits to https://stackoverflow.com/a/39217007/3316574
50 | def __init__(self, func):
51 | self._func = func
52 | self.__name__ = func.__name__
53 | self.__doc__ = func.__doc__
54 | self._lock = Lock()
55 |
56 | def __get__(self, obj, klass=None):
57 | if obj is None:
58 | return None
59 | # __get__ may be called concurrently
60 | with self._lock:
61 | # another thread may have computed property value
62 | # while this thread was in __get__
63 | if self.__name__ not in obj.__dict__:
64 | # none computed `_func` yet, do so (under lock) and set attribute
65 | obj.__dict__[self.__name__] = self._func(obj)
66 | # by now, attribute is guaranteed to be set,
67 | # either by this thread or another
68 | return obj.__dict__[self.__name__]
69 |
70 |
71 | def expandvars(path, default=None):
72 | if default is None:
73 | return os.path.expandvars(path)
74 |
75 | # matches expressions like ${VALUE} where VALUE is parsed to group 1
76 | reVar = r'\$\{([^}]*)\}'
77 |
78 | def replace_var(m: re.Match):
79 | return os.environ.get(m.group(1), default)
80 |
81 | return re.sub(reVar, replace_var, path)
82 |
83 |
84 | def env_constructor(loader, node):
85 | return expandvars(node.value, default='')
86 |
87 |
88 | class YamlEnvSubstConfigLoader(yaml.SafeLoader):
89 | pass
90 |
91 |
92 | env_matcher = re.compile(r'.*\$\{([^}^{]+)\}.*')
93 | YamlEnvSubstConfigLoader.add_implicit_resolver('!env', env_matcher, None)
94 | YamlEnvSubstConfigLoader.add_constructor('!env', constructor=env_constructor)
95 |
--------------------------------------------------------------------------------
/yandextank/plugins/OpenTSDBUploader/plugin.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # TODO: make the next two lines unnecessary
3 | # pylint: disable=line-too-long
4 | # pylint: disable=missing-docstring
5 | import datetime
6 | import logging
7 | import sys
8 | from builtins import str
9 | from uuid import uuid4
10 |
11 | from .client import OpenTSDBClient
12 | from .decoder import Decoder
13 | from ...common.interfaces import AbstractPlugin, MonitoringDataListener, AggregateResultListener
14 |
15 | logger = logging.getLogger(__name__) # pylint: disable=C0103
16 |
17 |
18 | def chop(data_list, chunk_size):
19 | if sys.getsizeof(str(data_list)) <= chunk_size:
20 | return [data_list]
21 | elif len(data_list) == 1:
22 | logger.warning("Too large piece of Telegraf data. Might experience upload problems.")
23 | return [data_list]
24 | else:
25 | mid = len(data_list) / 2
26 | return chop(data_list[:mid], chunk_size) + chop(data_list[mid:], chunk_size)
27 |
28 |
29 | class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
30 | SECTION = 'opentsdb'
31 |
32 | def __init__(self, core, cfg, name):
33 | AbstractPlugin.__init__(self, core, cfg, name)
34 | self.tank_tag = self.get_option("tank_tag")
35 | self.prefix_metric = self.get_option("prefix_metric")
36 | self._client = None
37 | self.start_time = None
38 | self.end_time = None
39 |
40 | self.decoder = Decoder(
41 | self.tank_tag,
42 | str(uuid4()),
43 | self.get_option("custom_tags"),
44 | self.get_option("labeled"),
45 | self.get_option("histograms"),
46 | )
47 |
48 | @property
49 | def client(self):
50 | if not self._client:
51 | self._client = OpenTSDBClient(
52 | host=self.get_option("address"),
53 | port=self.get_option("port"),
54 | username=self.get_option("username"),
55 | password=self.get_option("password"),
56 | ssl=self.get_option("ssl"),
57 | verify_ssl=self.get_option("verify_ssl"),
58 | )
59 | return self._client
60 |
61 | def prepare_test(self):
62 | self.core.job.subscribe_plugin(self)
63 |
64 | def start_test(self):
65 | self.start_time = datetime.datetime.now()
66 |
67 | def end_test(self, retcode):
68 | self.end_time = datetime.datetime.now() + datetime.timedelta(minutes=1)
69 | return retcode
70 |
71 | def on_aggregated_data(self, data, stats):
72 | self.client.write(self.decoder.decode_aggregates(data, stats, self.prefix_metric))
73 |
74 | def monitoring_data(self, data_list):
75 | if len(data_list) > 0:
76 | [self._send_monitoring(chunk) for chunk in chop(data_list, self.get_option("chunk_size"))]
77 |
78 | def _send_monitoring(self, data):
79 | self.client.write(self.decoder.decode_monitoring(data))
80 |
81 | def set_uuid(self, id_):
82 | self.decoder.tags['uuid'] = id_
83 |
--------------------------------------------------------------------------------
/yandextank/plugins/Telegraf/config_parser.py:
--------------------------------------------------------------------------------
1 | import os
2 | import yaml
3 | from xml.etree import ElementTree
4 | from requests.structures import CaseInsensitiveDict
5 |
6 | from typing import List
7 |
8 | TARGET_HINT_PLACEHOLDER = '[target]'
9 | YAML_HOSTS_SECTION = 'hosts'
10 | YAML_METRICS_SECTION = 'metrics'
11 | YAML_CUSTOM_METRIC = 'custom'
12 | YAML_CUSTOM_METRIC_CMD = 'cmd'
13 | XML_HOST_TAG_NAME = 'Host'
14 | XML_HOST_ADDRESS_ATTR = 'address'
15 |
16 |
17 | class ParseError(Exception):
18 | pass
19 |
20 |
21 | class Metric(CaseInsensitiveDict):
22 | name: str
23 | text: str
24 |
25 | def __init__(self, name, text, data):
26 | super().__init__(data)
27 | self.name = name
28 | self.text = text
29 |
30 |
31 | class Host(CaseInsensitiveDict):
32 | address: str
33 | metrics: List[Metric]
34 |
35 | def __init__(self, address, metrics, data):
36 | super().__init__(data)
37 | self.address = address
38 | self.metrics = metrics
39 |
40 |
41 | def parse_xml(config) -> List[Host]:
42 | try:
43 | if os.path.exists(config):
44 | tree = ElementTree.parse(config)
45 | else:
46 | tree = ElementTree.fromstring(config)
47 | except ElementTree.ParseError as ex:
48 | raise ParseError(ex)
49 |
50 | result = []
51 | hosts = tree.findall(XML_HOST_TAG_NAME)
52 | for host in hosts:
53 | hostname = host.get(XML_HOST_ADDRESS_ATTR, '').lower()
54 | metrics = [Metric(m.tag, m.text, m.attrib) for m in host]
55 | result.append(Host(hostname, metrics, host.attrib))
56 | return result
57 |
58 |
59 | def parse_yaml(config) -> List[Host]:
60 | try:
61 | if os.path.exists(config):
62 | with open(config, "r") as stream:
63 | yaml_content = yaml.safe_load(stream)
64 | else:
65 | yaml_content = yaml.safe_load(config)
66 | except yaml.YAMLError as ex:
67 | raise ParseError(ex)
68 |
69 | result = []
70 |
71 | yaml_content = yaml_content or {}
72 |
73 | global_inputs = yaml_content.get(YAML_METRICS_SECTION, {})
74 | agents = yaml_content.get(YAML_HOSTS_SECTION, {})
75 |
76 | # if no "agents:" provided use default host
77 | if len(agents) == 0:
78 | agents[TARGET_HINT_PLACEHOLDER] = None
79 | for hostname, hostdata in agents.items():
80 | metrics = []
81 | local_inputs = global_inputs.copy()
82 | hostdata = hostdata or {}
83 |
84 | local_inputs.update(hostdata.get(YAML_METRICS_SECTION, {}))
85 |
86 | for mname, mdata in local_inputs.items():
87 | if mdata is None:
88 | mdata = ''
89 |
90 | if mname.lower() == YAML_CUSTOM_METRIC and isinstance(mdata, dict):
91 | mtext = mdata.get(YAML_CUSTOM_METRIC_CMD, '') or str(mdata)
92 | else:
93 | mtext = str(mdata)
94 | metrics.append(Metric(mname, mtext, mdata if isinstance(mdata, dict) else {}))
95 |
96 | result.append(Host(hostname, metrics, hostdata if isinstance(hostdata, dict) else {}))
97 | return result
98 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | # Yandex.Tank
2 | #
3 | # VERSION 0.0.4
4 |
5 | FROM ubuntu:focal
6 | MAINTAINER Yandex Load Team
7 | # Version for desription
8 | ARG VERSION
9 | # You may specify tag instead of branch to build for specific tag
10 | ARG BRANCH=master
11 |
12 | LABEL Description="Fresh Yandex.Tank from github master branch with phantom" \
13 | Vendor="Yandex" \
14 | maintainer="direvius@yandex-team.ru" \
15 | YandexTank.version="${VERSION}" \
16 | Telegraf.version="${TELEGRAF_VERSION}"
17 |
18 | RUN export DEBIAN_FRONTEND=noninteractive && \
19 | apt-get update -q && \
20 | apt-get install --no-install-recommends -yq \
21 | software-properties-common && \
22 | add-apt-repository ppa:deadsnakes/ppa -y && \
23 | add-apt-repository ppa:yandex-load/main -y && \
24 | apt-get update -q && \
25 | apt-get install --no-install-recommends -yq \
26 | sudo \
27 | vim \
28 | wget \
29 | curl \
30 | less \
31 | iproute2 \
32 | telnet \
33 | atop \
34 | openssh-client \
35 | git \
36 | gpg-agent \
37 | python3.12 \
38 | phantom \
39 | phantom-ssl && \
40 | apt-get clean && \
41 | curl -sS https://bootstrap.pypa.io/get-pip.py | python3.12 && \
42 | rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/* /tmp/* /var/tmp/*
43 |
44 | ENV TELEGRAF_VERSION=1.29.1-1
45 | # https://www.influxdata.com/time-series-platform/telegraf/
46 | # influxdata-archive_compat.key GPG fingerprint:
47 | # 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
48 | RUN wget -q https://repos.influxdata.com/influxdata-archive_compat.key
49 | RUN echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influxdata-archive_compat.key' | sha256sum -c && cat influxdata-archive_compat.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null
50 | RUN echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
51 | RUN apt-get update
52 | RUN apt-get install telegraf
53 |
54 | ENV BUILD_DEPS="python3.12-dev build-essential gfortran libssl-dev libffi-dev"
55 | RUN export DEBIAN_FRONTEND=noninteractive && \
56 | apt-get update && \
57 | apt-get install -yq --no-install-recommends ${BUILD_DEPS} && \
58 | pip3 install --upgrade setuptools && \
59 | pip3 install --upgrade pip && \
60 | pip3 install https://api.github.com/repos/yandex/yandex-tank/tarball/${BRANCH} && \
61 | apt-get autoremove -y ${BUILD_DEPS} && \
62 | apt-get clean && \
63 | rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/* /tmp/* /var/tmp/* /root/.cache/*
64 |
65 | RUN curl -L -v -o pandora https://github.com/yandex/pandora/releases/latest/download/pandora_0.3.8_linux_amd64 && \
66 | chmod +x ./pandora && \
67 | mv ./pandora /usr/local/bin/
68 |
69 | COPY files/bashrc /root/.bashrc
70 | COPY files/inputrc /root/.inputrc
71 |
72 | VOLUME ["/var/loadtest"]
73 | WORKDIR /var/loadtest
74 | ENTRYPOINT ["/usr/local/bin/yandex-tank"]
75 |
--------------------------------------------------------------------------------
/yandextank/plugins/YCMonitoring/plugin.py:
--------------------------------------------------------------------------------
1 | import os
2 | from queue import Queue
3 |
4 | import logging
5 | from yandextank.common.interfaces import MonitoringPlugin
6 | from yandextank.common.monitoring import MonitoringPanel, DefaultCollector
7 | from yandextank.common.util import expand_to_seconds
8 | from yandextank.plugins.YCMonitoring.sensor import YCMonitoringSensor
9 |
10 | LOGGER = logging.getLogger(__name__)
11 |
12 |
13 | def as_list(value) -> list | None:
14 | if value is None or isinstance(value, list):
15 | return value
16 | return [value]
17 |
18 |
19 | class Plugin(MonitoringPlugin):
20 | def __init__(self, core, cfg, name):
21 | super(Plugin, self).__init__(core, cfg, name)
22 | self.timeout = expand_to_seconds(self.get_option('timeout'))
23 | self.poll_interval = expand_to_seconds(self.get_option('poll_interval'))
24 | self.request_timeout = expand_to_seconds(self.get_option('request_timeout'))
25 |
26 | def prepare_test(self):
27 | token = None
28 | token_option = self.get_option('token')
29 | if token_option == 'LOADTESTING_YC_TOKEN':
30 | token = os.environ.get('LOADTESTING_YC_TOKEN')
31 | else:
32 | try:
33 | with open(token_option, 'r') as tfile:
34 | token = tfile.read().strip('\n')
35 | except (OSError, IOError):
36 | error = f"YCMonitoring plugin: Authorization token is not set! File {self.get_option('token')} is not found or can't be read."
37 | LOGGER.warning(error, exc_info=True)
38 |
39 | if token:
40 | self.collector = DefaultCollector(logger=LOGGER, timeout=self.timeout, poll_interval=self.poll_interval)
41 | api_host = self.get_option('api_host')
42 | for panel_name, query_data in self.get_option('panels').items():
43 | queue = Queue()
44 | group_name = query_data.get('group_name') or api_host
45 | panel = MonitoringPanel(group_name, self.timeout, queue)
46 | senset = set()
47 | for query in query_data.get('queries'):
48 | sensor = YCMonitoringSensor(
49 | panel_name,
50 | api_host,
51 | token,
52 | query,
53 | query_data.get('folder_id') or os.environ.get('LOADTESTING_FOLDER_ID'),
54 | panel.queue,
55 | self.request_timeout,
56 | as_list(self.get_option('priority_labels')),
57 | as_list(self.get_option('ignore_labels')),
58 | )
59 | try:
60 | senset.update(sensor.get_sensors())
61 | self.collector.add_sensor(sensor)
62 | except ConnectionError:
63 | LOGGER.warning(f'ConnectionError when trying to get sensors with query: {query}', exc_info=True)
64 | panel.add_sensors(senset)
65 | self.collector.add_panel(panel)
66 | else:
67 | LOGGER.warning('YC Token not found')
68 |
--------------------------------------------------------------------------------
/yandextank/plugins/Pandora/tests/test_pandora_plugin.py:
--------------------------------------------------------------------------------
1 | from http.server import SimpleHTTPRequestHandler, HTTPServer
2 |
3 | import pytest
4 | from mock import MagicMock
5 | from threading import Thread
6 |
7 | from yandextank.plugins.Pandora import Plugin
8 |
9 | # https://raw.githubusercontent.com/yandex/yandex-tank/develop/README.md
10 |
11 |
12 | class RequestHandler(SimpleHTTPRequestHandler):
13 |
14 | def _do_handle(self):
15 | content = '{"test": "ammo"}'.encode('utf-8')
16 | self.send_response(200)
17 | self.send_header('Content-Type', 'application/json')
18 | self.send_header('Content-Length', len(content))
19 | self.end_headers()
20 | self.wfile.write(content)
21 |
22 | def do_GET(self):
23 | self._do_handle()
24 |
25 | def do_HEAD(self):
26 | self._do_handle()
27 |
28 |
29 | @pytest.fixture(scope='module')
30 | def pandora_server():
31 | server = HTTPServer(('localhost', 1234), RequestHandler)
32 | t = Thread(target=server.serve_forever, name="StatServer")
33 | try:
34 | t.start()
35 | yield
36 | finally:
37 | server.shutdown()
38 | server.socket.close()
39 | t.join()
40 |
41 |
42 | @pytest.mark.parametrize(
43 | 'cfg, expected',
44 | [
45 | (
46 | {
47 | 'pools': [
48 | {
49 | 'ammo': {
50 | 'uri-headers': '[User-Agent: Wget/1.13.4 (linux-gnu)] [Host: foo.ru] [Accept-Encoding: gzip,deflate,sdch]',
51 | 'type': 'uri',
52 | 'file': 'http://localhost:1234/ammo',
53 | },
54 | 'gun': {'answlog': {'enabled': 'true', 'path': 'answ.log', 'filter': 'error'}},
55 | }
56 | ]
57 | },
58 | {
59 | 'pools': [
60 | {
61 | 'ammo': {
62 | 'uri-headers': '[User-Agent: Wget/1.13.4 (linux-gnu)] [Host: foo.ru] [Accept-Encoding: gzip,deflate,sdch]',
63 | 'type': 'uri',
64 | 'file': 'some local file',
65 | },
66 | 'gun': {'answlog': {'enabled': 'false', 'path': 'answ.log', 'filter': 'error'}},
67 | }
68 | ]
69 | },
70 | )
71 | ],
72 | )
73 | def test_patch_config(cfg, expected, pandora_server):
74 | plugin = Plugin(MagicMock(), {}, 'pandora')
75 | # '/tmp/9b73d966bcbf27467d4c4190cfe58c2a.downloaded_resource'
76 | filename = plugin.patch_config(cfg)['pools'][0]['ammo']['file']
77 | assert filename.endswith('.downloaded_resource')
78 |
79 |
80 | @pytest.mark.parametrize(
81 | 'line', ['panic: short description', 'today ERROR shit happens', 'again\tFATAL oops i did it again']
82 | )
83 | def test_log_line_contains_error(line):
84 | assert Plugin.check_log_line_contains_error(line)
85 |
86 |
87 | @pytest.mark.parametrize(
88 | 'line',
89 | [
90 | 'not a panic: actually',
91 | 'just string',
92 | ],
93 | )
94 | def test_log_line_contains_no_error(line):
95 | assert not Plugin.check_log_line_contains_error(line)
96 |
--------------------------------------------------------------------------------
/yandextank/plugins/YCMonitoring/tests/test_sensor.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import json
3 | import os
4 |
5 | from yandextank.common.util import get_test_path
6 | from yandextank.plugins.YCMonitoring.sensor import YCMonitoringSensor, parse_yc_monitoring_query
7 |
8 |
9 | SENSOR = YCMonitoringSensor(
10 | 'example',
11 | '',
12 | '',
13 | '',
14 | '',
15 | None,
16 | 10,
17 | priority_labels=['cpu_name', 'label'],
18 | ignore_labels=[
19 | 'service',
20 | 'resource_type',
21 | 'device',
22 | 'interface_number',
23 | 'source_metric',
24 | 'subcluster_name',
25 | 'shard',
26 | 'dc',
27 | ],
28 | )
29 |
30 |
31 | @pytest.mark.parametrize(
32 | 'json_file_name, formated_sensors',
33 | [
34 | ('COUNTER.json', {'example_load-mock-kuber; kube-proxy-bb6kp; kube-system'}),
35 | (
36 | 'DGAUGE.json',
37 | {
38 | 'example_replica; test_host.net; postgresql_loadtesting',
39 | 'example_primary; test_host_2.net; postgresql_loadtesting',
40 | },
41 | ),
42 | (
43 | 'IGAUGE.json',
44 | {'example_test1-instance-group', 'example_test2-instance-group'},
45 | ),
46 | (
47 | 'RATE.json',
48 | {'example_dqtcr5464dbkv1s1rtrt; instance_example; 5', 'example_dqtcr5464dbkv1s1rtrt; instance_example; 2'},
49 | ),
50 | ],
51 | )
52 | def test_format_sensors(json_file_name: str, formated_sensors: set):
53 | with open(
54 | os.path.join(get_test_path(), f'yandextank/plugins/YCMonitoring/tests/{json_file_name}'), 'r'
55 | ) as json_file:
56 | data = json.load(json_file)
57 |
58 | sensors = set()
59 | if isinstance(data, dict) and data.get('metrics'):
60 | for idx, metric in enumerate(data['metrics']):
61 | sensors.add(SENSOR.format_sensor(metric['labels'], idx))
62 | assert sensors == formated_sensors
63 |
64 |
65 | @pytest.mark.parametrize(
66 | 'query, expected_query, folder_id',
67 | [
68 | ('"objects_count"{service="storage"}', '"objects_count"{service="storage"}', ''),
69 | (
70 | '"objects_count"{folderId=\'hahahaha\' , service="storage"}',
71 | '"objects_count"{service="storage"}',
72 | 'hahahaha',
73 | ),
74 | ('"objects_count"{folderId=hahahaha , service="storage"}', '"objects_count"{service="storage"}', 'hahahaha'),
75 | (
76 | 'cpu_utilization{folderId="hahahaha", service=\'compute\', resource_type=\'vm\', resource_id=\'resource-rc1b-1\'}',
77 | 'cpu_utilization{service=\'compute\', resource_type=\'vm\', resource_id=\'resource-rc1b-1\'}',
78 | 'hahahaha',
79 | ),
80 | (
81 | 'alias(series_sum("instance", "app.request_latency_ms_count"{folderId="asldkfjh123", service="custom", handle="/path"}), "{{instance}}")',
82 | 'alias(series_sum("instance", "app.request_latency_ms_count"{service="custom", handle="/path"}), "{{instance}}")',
83 | 'asldkfjh123',
84 | ),
85 | ],
86 | )
87 | def test_parse_yc_monitoring_query(query, expected_query, folder_id):
88 | actual_query, actual_folder_id = parse_yc_monitoring_query(query)
89 | assert actual_query == expected_query
90 | assert actual_folder_id == folder_id
91 |
--------------------------------------------------------------------------------
/yandextank/contrib/netort/netort/data_processing.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | try:
4 | import queue as q
5 | except ImportError:
6 | import Queue as q
7 | import threading
8 | import time
9 |
10 |
11 | logger = logging.getLogger(__name__)
12 |
13 |
14 | def get_nowait_from_queue(queue):
15 | """Collect all immediately available items from a queue"""
16 | data = []
17 | for _ in range(queue.qsize()):
18 | try:
19 | data.append(queue.get_nowait())
20 | except q.Empty:
21 | break
22 | return data
23 |
24 |
25 | class Drain(threading.Thread):
26 | """
27 | Drain a generator to a destination that answers to put(), in a thread
28 | """
29 |
30 | def __init__(self, source, destination):
31 | super(Drain, self).__init__()
32 | self.source = source
33 | self.destination = destination
34 | self._finished = threading.Event()
35 | self._interrupted = threading.Event()
36 | self.daemon = True # bdk+ytank stuck w/o this at join of this thread
37 |
38 | def run(self):
39 | try:
40 | for item in self.source:
41 | self.destination.put(item)
42 | if self._interrupted.is_set():
43 | break
44 | except Exception as e:
45 | logger.error(e, exc_info=True)
46 | self._interrupted.set()
47 | finally:
48 | self._finished.set()
49 |
50 | def wait(self, timeout=None):
51 | self._finished.wait(timeout=timeout)
52 |
53 | def close(self):
54 | self._interrupted.set()
55 |
56 |
57 | class Tee(threading.Thread):
58 | """Copy items from one queue to multiple in a thread.
59 |
60 | Note:
61 | Items are passed by reference.
62 | """
63 |
64 | def __init__(self, source, destination, type):
65 | """
66 | Args:
67 | source (queue): where to get items from
68 | destination (list): list of queues where to put items from the source
69 | type (string): ???
70 | """
71 | # TODO: what is type?!
72 | super(Tee, self).__init__()
73 | self.source = source
74 | self.destination = destination # TODO: this is actually a list of destinations. Rename.
75 | self.type = type
76 | self._finished = threading.Event()
77 | self._interrupted = threading.Event()
78 | self.daemon = True # just in case, bdk+ytank stuck w/o this at join of Drain thread
79 |
80 | def run(self):
81 | while not self._interrupted.is_set():
82 | data = get_nowait_from_queue(self.source)
83 | for item in data:
84 | for destination in self.destination:
85 | destination.put(item, self.type)
86 | if self._interrupted.is_set():
87 | break
88 | if self._interrupted.is_set():
89 | break
90 | if self._interrupted.is_set():
91 | break
92 | time.sleep(0.5)
93 | self._finished.set()
94 |
95 | def wait(self, timeout=None):
96 | self._finished.wait(timeout=timeout)
97 |
98 | def close(self):
99 | self._interrupted.set()
100 |
101 |
102 | # TODO: does it really chop anything?
103 | class Chopper(object):
104 | def __init__(self, source):
105 | self.source = source
106 |
107 | def __iter__(self):
108 | for chunk in self.source:
109 | for item in chunk:
110 | yield item
111 |
--------------------------------------------------------------------------------