├── .devenv.hosting.yaml ├── .flake8 ├── .github ├── CODEOWNERS └── workflows │ └── dco.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── README.md ├── build_assets └── activate.patch ├── pyproject.toml ├── pytest_tests ├── __init__.py ├── helpers │ ├── __init__.py │ ├── acl.py │ ├── aws_cli_client.py │ ├── binary_version.py │ ├── cli_helpers.py │ ├── cluster.py │ ├── complex_object_actions.py │ ├── container.py │ ├── container_access.py │ ├── env_properties.py │ ├── epoch.py │ ├── failover_utils.py │ ├── file_helper.py │ ├── frostfs_verbs.py │ ├── http_gate.py │ ├── iptables_helper.py │ ├── k6.py │ ├── node_management.py │ ├── object_access.py │ ├── payment_neogo.py │ ├── remote_process.py │ ├── s3_helper.py │ ├── storage_group.py │ ├── storage_object_info.py │ ├── storage_policy.py │ ├── test_control.py │ ├── tombstone.py │ ├── utility.py │ └── wallet.py ├── pytest.ini ├── requirements.txt ├── resources │ ├── common.py │ ├── files │ │ ├── policy.json │ │ └── s3_bearer_rules.json │ └── load_params.py ├── steps │ ├── __init__.py │ ├── cluster_test_base.py │ ├── load.py │ ├── s3_gate_base.py │ ├── s3_gate_bucket.py │ ├── s3_gate_object.py │ ├── session_token.py │ └── storage_object.py └── testsuites │ ├── __init__.py │ ├── acl │ ├── conftest.py │ ├── storage_group │ │ └── test_storagegroup.py │ ├── test_acl.py │ ├── test_bearer.py │ ├── test_eacl.py │ └── test_eacl_filters.py │ ├── conftest.py │ ├── container │ └── test_container.py │ ├── failovers │ ├── __init__.py │ ├── test_failover_network.py │ └── test_failover_storage.py │ ├── load │ └── test_load.py │ ├── network │ └── test_node_management.py │ ├── object │ ├── test_object_api.py │ ├── test_object_api_bearer.py │ ├── test_object_lifetime.py │ └── test_object_lock.py │ ├── payment │ └── test_balance.py │ ├── services │ ├── http_gate │ │ ├── test_http_bearer.py │ │ ├── test_http_gate.py │ │ ├── test_http_headers.py │ │ ├── test_http_object.py │ │ ├── test_http_streaming.py │ │ └── test_http_system_header.py │ ├── s3_gate │ │ ├── test_s3_ACL.py │ │ ├── test_s3_bucket.py │ │ ├── test_s3_gate.py │ │ ├── test_s3_locking.py │ │ ├── test_s3_multipart.py │ │ ├── test_s3_object.py │ │ ├── test_s3_policy.py │ │ ├── test_s3_tagging.py │ │ └── test_s3_versioning.py │ └── test_binaries.py │ ├── session_token │ ├── conftest.py │ ├── test_object_session_token.py │ ├── test_static_object_session_token.py │ └── test_static_session_token_container.py │ └── shard │ └── test_control_shard.py ├── requirements.txt ├── requirements_dev.txt ├── venv └── local-pytest │ └── environment.sh └── venv_template.mk /.devenv.hosting.yaml: -------------------------------------------------------------------------------- 1 | hosts: 2 | - address: localhost 3 | plugin_name: docker 4 | services: 5 | - name: s01 6 | attributes: 7 | container_name: s01 8 | config_path: ../frostfs-dev-env/services/storage/.storage.env 9 | wallet_path: ../frostfs-dev-env/services/storage/wallet01.json 10 | local_config_path: ./TemporaryDir/empty-password.yml 11 | local_wallet_path: ../frostfs-dev-env/services/storage/wallet01.json 12 | wallet_password: "" 13 | volume_name: storage_storage_s01 14 | endpoint_data0: s01.frostfs.devenv:8080 15 | control_endpoint: s01.frostfs.devenv:8081 16 | un_locode: "RU MOW" 17 | - name: s02 18 | attributes: 19 | container_name: s02 20 | config_path: ../frostfs-dev-env/services/storage/.storage.env 21 | wallet_path: ../frostfs-dev-env/services/storage/wallet02.json 22 | local_config_path: ./TemporaryDir/empty-password.yml 23 | local_wallet_path: ../frostfs-dev-env/services/storage/wallet02.json 24 | wallet_password: "" 25 | volume_name: storage_storage_s02 26 | endpoint_data0: s02.frostfs.devenv:8080 27 | control_endpoint: s02.frostfs.devenv:8081 28 | un_locode: "RU LED" 29 | - name: s03 30 | attributes: 31 | container_name: s03 32 | config_path: ../frostfs-dev-env/services/storage/.storage.env 33 | wallet_path: ../frostfs-dev-env/services/storage/wallet03.json 34 | local_config_path: ./TemporaryDir/empty-password.yml 35 | local_wallet_path: ../frostfs-dev-env/services/storage/wallet03.json 36 | wallet_password: "" 37 | volume_name: storage_storage_s03 38 | endpoint_data0: s03.frostfs.devenv:8080 39 | control_endpoint: s03.frostfs.devenv:8081 40 | un_locode: "SE STO" 41 | - name: s04 42 | attributes: 43 | container_name: s04 44 | config_path: ../frostfs-dev-env/services/storage/.storage.env 45 | wallet_path: ../frostfs-dev-env/services/storage/wallet04.json 46 | local_config_path: ./TemporaryDir/empty-password.yml 47 | local_wallet_path: ../frostfs-dev-env/services/storage/wallet04.json 48 | wallet_password: "" 49 | volume_name: storage_storage_s04 50 | endpoint_data0: s04.frostfs.devenv:8080 51 | control_endpoint: s04.frostfs.devenv:8081 52 | un_locode: "FI HEL" 53 | - name: s3-gate01 54 | attributes: 55 | container_name: s3_gate 56 | config_path: ../frostfs-dev-env/services/s3_gate/.s3.env 57 | wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json 58 | local_config_path: ./TemporaryDir/password-s3.yml 59 | local_wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json 60 | wallet_password: "s3" 61 | endpoint_data0: https://s3.frostfs.devenv:8080 62 | - name: http-gate01 63 | attributes: 64 | container_name: http_gate 65 | config_path: ../frostfs-dev-env/services/http_gate/.http.env 66 | wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json 67 | local_config_path: ./TemporaryDir/password-other.yml 68 | local_wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json 69 | wallet_password: "one" 70 | endpoint_data0: http://http.frostfs.devenv 71 | - name: ir01 72 | attributes: 73 | container_name: ir01 74 | config_path: ../frostfs-dev-env/services/ir/.ir.env 75 | wallet_path: ../frostfs-dev-env/services/ir/az.json 76 | local_config_path: ./TemporaryDir/password-other.yml 77 | local_wallet_path: ../frostfs-dev-env/services/ir/az.json 78 | wallet_password: "one" 79 | - name: morph-chain01 80 | attributes: 81 | container_name: morph_chain 82 | config_path: ../frostfs-dev-env/services/morph_chain/protocol.privnet.yml 83 | wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json 84 | local_config_path: ./TemporaryDir/password-other.yml 85 | local_wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json 86 | wallet_password: "one" 87 | endpoint_internal0: http://morph-chain.frostfs.devenv:30333 88 | - name: main-chain01 89 | attributes: 90 | container_name: main_chain 91 | config_path: ../frostfs-dev-env/services/chain/protocol.privnet.yml 92 | wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json 93 | local_config_path: ./TemporaryDir/password-other.yml 94 | local_wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json 95 | wallet_password: "one" 96 | endpoint_internal0: http://main-chain.frostfs.devenv:30333 97 | - name: coredns01 98 | attributes: 99 | container_name: coredns 100 | clis: 101 | - name: frostfs-cli 102 | exec_path: frostfs-cli 103 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude = 3 | .git, 4 | __pycache__, 5 | .idea, 6 | .pytest_cache, 7 | venv 8 | per-file-ignores = 9 | # imported but unused 10 | __init__.py: F401 11 | max-line-length = 100 12 | disable-noqa -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @vdomnich-yadro @dansingjulia @yadro-vavdeev @alexchetaev @abereziny 2 | -------------------------------------------------------------------------------- /.github/workflows/dco.yml: -------------------------------------------------------------------------------- 1 | name: DCO check 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | - develop 8 | 9 | jobs: 10 | commits_check_job: 11 | runs-on: ubuntu-latest 12 | name: Commits Check 13 | steps: 14 | - name: Get PR Commits 15 | id: 'get-pr-commits' 16 | uses: tim-actions/get-pr-commits@master 17 | with: 18 | token: ${{ secrets.GITHUB_TOKEN }} 19 | - name: DCO Check 20 | uses: tim-actions/dco@master 21 | with: 22 | commits: ${{ steps.get-pr-commits.outputs.commits }} 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # ignore IDE files 2 | .vscode 3 | .idea 4 | 5 | .DS_Store 6 | 7 | venv_macos 8 | 9 | 10 | # ignore test results 11 | **/log.html 12 | **/output.xml 13 | **/report.html 14 | **/dockerlogs*.tar.gz 15 | allure_results/* 16 | xunit_results.xml 17 | 18 | # ignore caches under any path 19 | **/__pycache__ 20 | **/.pytest_cache 21 | 22 | # ignore work directories and setup files 23 | .setup 24 | .env 25 | TemporaryDir/* 26 | artifacts/* 27 | docs/* 28 | venv.*/* 29 | wallet_config.yml 30 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/psf/black 3 | rev: 22.8.0 4 | hooks: 5 | - id: black 6 | language_version: python3.9 7 | - repo: https://github.com/pycqa/isort 8 | rev: 5.12.0 9 | hooks: 10 | - id: isort 11 | name: isort (python) 12 | 13 | ci: 14 | autofix_prs: false 15 | autoupdate_schedule: quarterly 16 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/make -f 2 | 3 | .DEFAULT_GOAL := help 4 | 5 | SHELL ?= bash 6 | 7 | VENVS = $(shell ls -1d venv/*/ | sort -u | xargs basename -a) 8 | 9 | .PHONY: all 10 | all: venvs 11 | 12 | include venv_template.mk 13 | 14 | .PHONY: venvs 15 | venvs: 16 | $(foreach venv,$(VENVS),venv.$(venv)) 17 | 18 | $(foreach venv,$(VENVS),$(eval $(call VENV_template,$(venv)))) 19 | 20 | clean: 21 | rm -rf venv.* 22 | 23 | pytest-local: 24 | @echo "⇒ Run Pytest" 25 | python -m pytest pytest_tests/testsuites/ 26 | 27 | help: 28 | @echo "⇒ run Run testcases ${R}" 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Testcases structure 2 | 3 | Tests written with PyTest Framework are located under `pytest_tests/testsuites` directory. 4 | 5 | These tests rely on resources and utility modules that have been originally developed for Pytest Framework. 6 | 7 | ## Testcases execution 8 | 9 | ### Initial preparation 10 | 11 | 1. Install frostfs-cli 12 | - `git clone git@github.com:TrueCloudLab/frostfs-node.git` 13 | - `cd frostfs-node` 14 | - `make` 15 | - `sudo cp bin/frostfs-cli /usr/local/bin/frostfs-cli` 16 | 17 | 2. Install frostfs-authmate 18 | - `git clone git@github.com:TrueCloudLab/frostfs-s3-gw.git` 19 | - `cd frostfs-s3-gw` 20 | - `make` 21 | - `sudo cp bin/frostfs-s3-authmate /usr/local/bin/frostfs-authmate` 22 | 23 | 3. Install neo-go 24 | - `git clone git@github.com:nspcc-dev/neo-go.git` 25 | - `cd neo-go` 26 | - `git checkout v0.101.0` (or the current version in the frostfs-dev-env) 27 | - `make` 28 | - `sudo cp bin/neo-go /usr/local/bin/neo-go` 29 | or download binary from releases: https://github.com/nspcc-dev/neo-go/releases 30 | 31 | 4. Clone frostfs-dev-env 32 | `git clone git@github.com:TrueCloudLab/frostfs-dev-env.git` 33 | 34 | Note that we expect frostfs-dev-env to be located under 35 | the `/../frostfs-dev-env` directory. If you put this repo in any other place, 36 | manually set the full path to frostfs-dev-env in the environment variable `DEVENV_PATH` at this step. 37 | 38 | 5. Make sure you have installed all the following prerequisites on your machine 39 | 40 | ``` 41 | make 42 | python3.10 43 | python3.10-dev 44 | libssl-dev 45 | ``` 46 | As we use frostfs-dev-env, you'll also need to install 47 | [prerequisites](https://github.com/TrueCloudLab/frostfs-dev-env#prerequisites) of this repository. 48 | 49 | 6. Prepare virtualenv 50 | 51 | ```shell 52 | $ make venv.local-pytest 53 | $ . venv.local-pytest/bin/activate 54 | ``` 55 | 56 | 7. Setup pre-commit hooks to run code formatters on staged files before you run a `git commit` command: 57 | 58 | ```shell 59 | $ pre-commit install 60 | ``` 61 | 62 | Optionally you might want to integrate code formatters with your code editor to apply formatters to code files as you go: 63 | * isort is supported by [PyCharm](https://plugins.jetbrains.com/plugin/15434-isortconnect), [VS Code](https://cereblanco.medium.com/setup-black-and-isort-in-vscode-514804590bf9). Plugins exist for other IDEs/editors as well. 64 | * black can be integrated with multiple editors, please, instructions are available [here](https://black.readthedocs.io/en/stable/integrations/editors.html). 65 | 66 | 8. Install Allure CLI 67 | 68 | Allure CLI installation is not an easy task, so a better option might be to run allure from 69 | docker container (please, refer to p.2 of the next section for instructions). 70 | 71 | To install Allure CLI you may take one of the following ways: 72 | 73 | - Follow the [instruction](https://docs.qameta.io/allure/#_linux) from the official website 74 | - Consult [the thread](https://github.com/allure-framework/allure2/issues/989) 75 | - Download release from the Github 76 | ```shell 77 | $ wget https://github.com/allure-framework/allure2/releases/download/2.18.1/allure_2.18.1-1_all.deb 78 | $ sudo apt install ./allure_2.18.1-1_all.deb 79 | ``` 80 | You also need the `default-jre` package installed. 81 | 82 | If none of the options worked for you, please complete the instruction with your approach. 83 | 84 | ### Run and get report 85 | 86 | 1. Run tests 87 | 88 | Make sure that the virtualenv is activated, then execute the following command to run a singular test suite or all the suites in the directory 89 | ```shell 90 | $ pytest --alluredir my-allure-123 pytest_tests/testsuites/object/test_object_api.py 91 | $ pytest --alluredir my-allure-123 pytest_tests/testsuites/ 92 | ``` 93 | 94 | 2. Generate report 95 | 96 | If you opted to install Allure CLI, you can generate a report using the command `allure generate`. The web representation of the report will be under `allure-report` directory: 97 | ```shell 98 | $ allure generate my-allure-123 99 | $ ls allure-report/ 100 | app.js data export favicon.ico history index.html plugins styles.css widgets 101 | ``` 102 | 103 | To inspect the report in a browser, run 104 | ```shell 105 | $ allure serve my-allure-123 106 | ``` 107 | 108 | If you prefer to run allure from Docker, you can use the following command: 109 | ```shell 110 | $ mkdir -p $PWD/allure-reports 111 | $ docker run -p 5050:5050 -e CHECK_RESULTS_EVERY_SECONDS=30 -e KEEP_HISTORY=1 \ 112 | -v $PWD/my-allure-123:/app/allure-results \ 113 | -v $PWD/allure-reports:/app/default-reports \ 114 | frankescobar/allure-docker-service 115 | ``` 116 | 117 | Then, you can check the allure report in your browser [by this link](http://localhost:5050/allure-docker-service/projects/default/reports/latest/index.html?redirect=false) 118 | 119 | NOTE: feel free to select a different location for `allure-reports` directory, there is no requirement to have it inside `frostfs-testcases`. For example, you can place it under `/tmp` path. 120 | 121 | # Contributing 122 | 123 | Feel free to contribute to this project after reading the [contributing 124 | guidelines](CONTRIBUTING.md). 125 | 126 | Before starting to work on a certain topic, create a new issue first, describing 127 | the feature/topic you are going to implement. 128 | 129 | 130 | # License 131 | 132 | - [GNU General Public License v3.0](LICENSE) 133 | 134 | ## Pytest marks 135 | 136 | Custom pytest marks used in tests: 137 | * `sanity` - Tests must be runs in sanity testruns. 138 | * `smoke` - Tests must be runs in smoke testruns. 139 | -------------------------------------------------------------------------------- /build_assets/activate.patch: -------------------------------------------------------------------------------- 1 | diff -urN bin.orig/activate bin/activate 2 | --- bin.orig/activate 2018-12-27 14:55:13.916461020 +0900 3 | +++ bin/activate 2018-12-27 20:38:35.223248728 +0900 4 | @@ -30,6 +30,15 @@ 5 | unset _OLD_VIRTUAL_PS1 6 | fi 7 | 8 | + # Unset exported dev-env variables 9 | + pushd ${DEVENV_PATH} > /dev/null 10 | + unset `make env | awk -F= '{print $1}'` 11 | + popd > /dev/null 12 | + 13 | + # Unset external env variables 14 | + declare -f env_deactivate > /dev/null && env_deactivate 15 | + declare -f venv_deactivate > /dev/null && venv_deactivate 16 | + 17 | unset VIRTUAL_ENV 18 | if [ ! "${1-}" = "nondestructive" ] ; then 19 | # Self destruct! 20 | @@ -47,6 +56,11 @@ 21 | PATH="$VIRTUAL_ENV/bin:$PATH" 22 | export PATH 23 | 24 | +# Set external variables 25 | +if [ -f ${VIRTUAL_ENV}/bin/environment.sh ] ; then 26 | + . ${VIRTUAL_ENV}/bin/environment.sh 27 | +fi 28 | + 29 | # unset PYTHONHOME if set 30 | if ! [ -z "${PYTHONHOME+_}" ] ; then 31 | _OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME" 32 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.isort] 2 | profile = "black" 3 | src_paths = ["pytest_tests"] 4 | line_length = 100 5 | 6 | [tool.black] 7 | line-length = 100 8 | target-version = ["py310"] 9 | -------------------------------------------------------------------------------- /pytest_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrueCloudLab/frostfs-testcases/565d740239dc07f53434433d4035b5933866acd3/pytest_tests/__init__.py -------------------------------------------------------------------------------- /pytest_tests/helpers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrueCloudLab/frostfs-testcases/565d740239dc07f53434433d4035b5933866acd3/pytest_tests/helpers/__init__.py -------------------------------------------------------------------------------- /pytest_tests/helpers/binary_version.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re 3 | 4 | from frostfs_testlib.cli import FrostfsAdm, FrostfsCli 5 | from frostfs_testlib.hosting import Hosting 6 | from frostfs_testlib.shell import Shell 7 | 8 | from pytest_tests.resources.common import FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC, WALLET_CONFIG 9 | 10 | logger = logging.getLogger("NeoLogger") 11 | 12 | 13 | def get_local_binaries_versions(shell: Shell) -> dict[str, str]: 14 | versions = {} 15 | 16 | for binary in ["neo-go", "frostfs-authmate"]: 17 | out = shell.exec(f"{binary} --version").stdout 18 | versions[binary] = _parse_version(out) 19 | 20 | frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, WALLET_CONFIG) 21 | versions["frostfs-cli"] = _parse_version(frostfs_cli.version.get().stdout) 22 | 23 | try: 24 | frostfs_adm = FrostfsAdm(shell, FROSTFS_ADM_EXEC) 25 | versions["frostfs-adm"] = _parse_version(frostfs_adm.version.get().stdout) 26 | except RuntimeError: 27 | logger.info(f"frostfs-adm not installed") 28 | 29 | out = shell.exec("aws --version").stdout 30 | out_lines = out.split("\n") 31 | versions["AWS"] = out_lines[0] if out_lines else "Unknown" 32 | 33 | return versions 34 | 35 | 36 | def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: 37 | versions_by_host = {} 38 | for host in hosting.hosts: 39 | binary_path_by_name = {} # Maps binary name to executable path 40 | for service_config in host.config.services: 41 | exec_path = service_config.attributes.get("exec_path") 42 | if exec_path: 43 | binary_path_by_name[service_config.name] = exec_path 44 | for cli_config in host.config.clis: 45 | binary_path_by_name[cli_config.name] = cli_config.exec_path 46 | 47 | shell = host.get_shell() 48 | versions_at_host = {} 49 | for binary_name, binary_path in binary_path_by_name.items(): 50 | try: 51 | result = shell.exec(f"{binary_path} --version") 52 | versions_at_host[binary_name] = _parse_version(result.stdout) 53 | except Exception as exc: 54 | logger.error(f"Cannot get version for {binary_path} because of\n{exc}") 55 | versions_at_host[binary_name] = "Unknown" 56 | versions_by_host[host.config.address] = versions_at_host 57 | 58 | # Consolidate versions across all hosts 59 | versions = {} 60 | for host, binary_versions in versions_by_host.items(): 61 | for name, version in binary_versions.items(): 62 | captured_version = versions.get(name) 63 | if captured_version: 64 | assert ( 65 | captured_version == version 66 | ), f"Binary {name} has inconsistent version on host {host}" 67 | else: 68 | versions[name] = version 69 | return versions 70 | 71 | 72 | def _parse_version(version_output: str) -> str: 73 | version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE) 74 | return version.group(1).strip() if version else "Unknown" 75 | -------------------------------------------------------------------------------- /pytest_tests/helpers/cli_helpers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3.10 2 | 3 | """ 4 | Helper functions to use with `frostfs-cli`, `neo-go` and other CLIs. 5 | """ 6 | import json 7 | import logging 8 | import subprocess 9 | import sys 10 | from contextlib import suppress 11 | from datetime import datetime 12 | from textwrap import shorten 13 | from typing import Union 14 | 15 | import allure 16 | import pexpect 17 | 18 | logger = logging.getLogger("NeoLogger") 19 | COLOR_GREEN = "\033[92m" 20 | COLOR_OFF = "\033[0m" 21 | 22 | 23 | def _cmd_run(cmd: str, timeout: int = 30) -> str: 24 | """ 25 | Runs given shell command , in case of success returns its stdout, 26 | in case of failure returns error message. 27 | """ 28 | compl_proc = None 29 | start_time = datetime.now() 30 | try: 31 | logger.info(f"{COLOR_GREEN}Executing command: {cmd}{COLOR_OFF}") 32 | start_time = datetime.utcnow() 33 | compl_proc = subprocess.run( 34 | cmd, 35 | check=True, 36 | universal_newlines=True, 37 | stdout=subprocess.PIPE, 38 | stderr=subprocess.STDOUT, 39 | timeout=timeout, 40 | shell=True, 41 | ) 42 | output = compl_proc.stdout 43 | return_code = compl_proc.returncode 44 | end_time = datetime.utcnow() 45 | logger.info(f"{COLOR_GREEN}Output: {output}{COLOR_OFF}") 46 | _attach_allure_log(cmd, output, return_code, start_time, end_time) 47 | 48 | return output 49 | except subprocess.CalledProcessError as exc: 50 | logger.info( 51 | f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode} " f"\nOutput: {exc.output}" 52 | ) 53 | end_time = datetime.now() 54 | return_code, cmd_output = subprocess.getstatusoutput(cmd) 55 | _attach_allure_log(cmd, cmd_output, return_code, start_time, end_time) 56 | 57 | raise RuntimeError( 58 | f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode}\n" f"Output: {exc.output}" 59 | ) from exc 60 | except OSError as exc: 61 | raise RuntimeError(f"Command: {cmd}\n" f"Output: {exc.strerror}") from exc 62 | except Exception as exc: 63 | return_code, cmd_output = subprocess.getstatusoutput(cmd) 64 | end_time = datetime.now() 65 | _attach_allure_log(cmd, cmd_output, return_code, start_time, end_time) 66 | logger.info( 67 | f"Command: {cmd}\n" 68 | f"Error:\nreturn code: {return_code}\n" 69 | f"Output: {exc.output.decode('utf-8') if type(exc.output) is bytes else exc.output}" 70 | ) 71 | raise 72 | 73 | 74 | def _run_with_passwd(cmd: str) -> str: 75 | child = pexpect.spawn(cmd) 76 | child.delaybeforesend = 1 77 | child.expect(".*") 78 | child.sendline("\r") 79 | if sys.platform == "darwin": 80 | child.expect(pexpect.EOF) 81 | cmd = child.before 82 | else: 83 | child.wait() 84 | cmd = child.read() 85 | return cmd.decode() 86 | 87 | 88 | def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = "json") -> str: 89 | child = pexpect.spawn(cmd) 90 | child.delaybeforesend = 1 91 | 92 | child.expect("AWS Access Key ID.*") 93 | child.sendline(key_id) 94 | 95 | child.expect("AWS Secret Access Key.*") 96 | child.sendline(access_key) 97 | 98 | child.expect("Default region name.*") 99 | child.sendline("") 100 | 101 | child.expect("Default output format.*") 102 | child.sendline(out_format) 103 | 104 | child.wait() 105 | cmd = child.read() 106 | # child.expect(pexpect.EOF) 107 | # cmd = child.before 108 | return cmd.decode() 109 | 110 | 111 | def _attach_allure_log( 112 | cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime 113 | ) -> None: 114 | command_attachment = ( 115 | f"COMMAND: '{cmd}'\n" 116 | f"OUTPUT:\n {output}\n" 117 | f"RC: {return_code}\n" 118 | f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {end_time - start_time}" 119 | ) 120 | with allure.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): 121 | allure.attach(command_attachment, "Command execution", allure.attachment_type.TEXT) 122 | 123 | 124 | def log_command_execution(cmd: str, output: Union[str, dict]) -> None: 125 | logger.info(f"{cmd}: {output}") 126 | with suppress(Exception): 127 | json_output = json.dumps(output, indent=4, sort_keys=True) 128 | output = json_output 129 | command_attachment = f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" 130 | with allure.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): 131 | allure.attach(command_attachment, "Command execution", allure.attachment_type.TEXT) 132 | -------------------------------------------------------------------------------- /pytest_tests/helpers/complex_object_actions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | """ 4 | This module contains functions which are used for Large Object assembling: 5 | getting Last Object and split and getting Link Object. It is not enough to 6 | simply perform a "raw" HEAD request, as noted in the issue: 7 | https://github.com/nspcc-dev/neofs-node/issues/1304. Therefore, the reliable 8 | retrieval of the aforementioned objects must be done this way: send direct 9 | "raw" HEAD request to the every Storage Node and return the desired OID on 10 | first non-null response. 11 | """ 12 | 13 | import logging 14 | from typing import Optional, Tuple 15 | 16 | import allure 17 | from frostfs_testlib.shell import Shell 18 | 19 | from pytest_tests.helpers import frostfs_verbs 20 | from pytest_tests.helpers.cluster import Cluster, StorageNode 21 | from pytest_tests.helpers.frostfs_verbs import head_object 22 | from pytest_tests.helpers.storage_object_info import StorageObjectInfo 23 | from pytest_tests.resources.common import WALLET_CONFIG 24 | 25 | logger = logging.getLogger("NeoLogger") 26 | 27 | 28 | def get_storage_object_chunks( 29 | storage_object: StorageObjectInfo, shell: Shell, cluster: Cluster 30 | ) -> list[str]: 31 | """ 32 | Get complex object split objects ids (no linker object) 33 | 34 | Args: 35 | storage_object: storage_object to get it's chunks 36 | shell: client shell to do cmd requests 37 | cluster: cluster object under test 38 | 39 | Returns: 40 | list of object ids of complex object chunks 41 | """ 42 | 43 | with allure.step(f"Get complex object chunks (f{storage_object.oid})"): 44 | split_object_id = get_link_object( 45 | storage_object.wallet_file_path, 46 | storage_object.cid, 47 | storage_object.oid, 48 | shell, 49 | cluster.storage_nodes, 50 | is_direct=False, 51 | ) 52 | head = head_object( 53 | storage_object.wallet_file_path, 54 | storage_object.cid, 55 | split_object_id, 56 | shell, 57 | cluster.default_rpc_endpoint, 58 | ) 59 | 60 | chunks_object_ids = [] 61 | if "split" in head["header"] and "children" in head["header"]["split"]: 62 | chunks_object_ids = head["header"]["split"]["children"] 63 | 64 | return chunks_object_ids 65 | 66 | 67 | def get_complex_object_split_ranges( 68 | storage_object: StorageObjectInfo, shell: Shell, cluster: Cluster 69 | ) -> list[Tuple[int, int]]: 70 | 71 | """ 72 | Get list of split ranges tuples (offset, length) of a complex object 73 | For example if object size if 100 and max object size in system is 30 74 | the returned list should be 75 | [(0, 30), (30, 30), (60, 30), (90, 10)] 76 | 77 | Args: 78 | storage_object: storage_object to get it's chunks 79 | shell: client shell to do cmd requests 80 | cluster: cluster object under test 81 | 82 | Returns: 83 | list of object ids of complex object chunks 84 | """ 85 | 86 | ranges: list = [] 87 | offset = 0 88 | chunks_ids = get_storage_object_chunks(storage_object, shell, cluster) 89 | for chunk_id in chunks_ids: 90 | head = head_object( 91 | storage_object.wallet_file_path, 92 | storage_object.cid, 93 | chunk_id, 94 | shell, 95 | cluster.default_rpc_endpoint, 96 | ) 97 | 98 | length = int(head["header"]["payloadLength"]) 99 | ranges.append((offset, length)) 100 | 101 | offset = offset + length 102 | 103 | return ranges 104 | 105 | 106 | @allure.step("Get Link Object") 107 | def get_link_object( 108 | wallet: str, 109 | cid: str, 110 | oid: str, 111 | shell: Shell, 112 | nodes: list[StorageNode], 113 | bearer: str = "", 114 | wallet_config: str = WALLET_CONFIG, 115 | is_direct: bool = True, 116 | ): 117 | """ 118 | Args: 119 | wallet (str): path to the wallet on whose behalf the Storage Nodes 120 | are requested 121 | cid (str): Container ID which stores the Large Object 122 | oid (str): Large Object ID 123 | shell: executor for cli command 124 | nodes: list of nodes to do search on 125 | bearer (optional, str): path to Bearer token file 126 | wallet_config (optional, str): path to the frostfs-cli config file 127 | is_direct: send request directly to the node or not; this flag 128 | turns into `--ttl 1` key 129 | Returns: 130 | (str): Link Object ID 131 | When no Link Object ID is found after all Storage Nodes polling, 132 | the function throws an error. 133 | """ 134 | for node in nodes: 135 | endpoint = node.get_rpc_endpoint() 136 | try: 137 | resp = frostfs_verbs.head_object( 138 | wallet, 139 | cid, 140 | oid, 141 | shell=shell, 142 | endpoint=endpoint, 143 | is_raw=True, 144 | is_direct=is_direct, 145 | bearer=bearer, 146 | wallet_config=wallet_config, 147 | ) 148 | if resp["link"]: 149 | return resp["link"] 150 | except Exception: 151 | logger.info(f"No Link Object found on {endpoint}; continue") 152 | logger.error(f"No Link Object for {cid}/{oid} found among all Storage Nodes") 153 | return None 154 | 155 | 156 | @allure.step("Get Last Object") 157 | def get_last_object( 158 | wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] 159 | ) -> Optional[str]: 160 | """ 161 | Args: 162 | wallet (str): path to the wallet on whose behalf the Storage Nodes 163 | are requested 164 | cid (str): Container ID which stores the Large Object 165 | oid (str): Large Object ID 166 | shell: executor for cli command 167 | nodes: list of nodes to do search on 168 | Returns: 169 | (str): Last Object ID 170 | When no Last Object ID is found after all Storage Nodes polling, 171 | the function throws an error. 172 | """ 173 | for node in nodes: 174 | endpoint = node.get_rpc_endpoint() 175 | try: 176 | resp = frostfs_verbs.head_object( 177 | wallet, cid, oid, shell=shell, endpoint=endpoint, is_raw=True, is_direct=True 178 | ) 179 | if resp["lastPart"]: 180 | return resp["lastPart"] 181 | except Exception: 182 | logger.info(f"No Last Object found on {endpoint}; continue") 183 | logger.error(f"No Last Object for {cid}/{oid} found among all Storage Nodes") 184 | return None 185 | -------------------------------------------------------------------------------- /pytest_tests/helpers/container_access.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | 3 | from frostfs_testlib.shell import Shell 4 | 5 | from pytest_tests.helpers.acl import EACLOperation 6 | from pytest_tests.helpers.cluster import Cluster 7 | from pytest_tests.helpers.object_access import ( 8 | can_delete_object, 9 | can_get_head_object, 10 | can_get_object, 11 | can_get_range_hash_of_object, 12 | can_get_range_of_object, 13 | can_put_object, 14 | can_search_object, 15 | ) 16 | 17 | 18 | def check_full_access_to_container( 19 | wallet: str, 20 | cid: str, 21 | oid: str, 22 | file_name: str, 23 | shell: Shell, 24 | cluster: Cluster, 25 | bearer: Optional[str] = None, 26 | wallet_config: Optional[str] = None, 27 | xhdr: Optional[dict] = None, 28 | ): 29 | endpoint = cluster.default_rpc_endpoint 30 | assert can_put_object(wallet, cid, file_name, shell, cluster, bearer, wallet_config, xhdr) 31 | assert can_get_head_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr) 32 | assert can_get_range_of_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr) 33 | assert can_get_range_hash_of_object( 34 | wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr 35 | ) 36 | assert can_search_object(wallet, cid, shell, endpoint, oid, bearer, wallet_config, xhdr) 37 | assert can_get_object(wallet, cid, oid, file_name, shell, cluster, bearer, wallet_config, xhdr) 38 | assert can_delete_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr) 39 | 40 | 41 | def check_no_access_to_container( 42 | wallet: str, 43 | cid: str, 44 | oid: str, 45 | file_name: str, 46 | shell: Shell, 47 | cluster: Cluster, 48 | bearer: Optional[str] = None, 49 | wallet_config: Optional[str] = None, 50 | xhdr: Optional[dict] = None, 51 | ): 52 | endpoint = cluster.default_rpc_endpoint 53 | assert not can_put_object(wallet, cid, file_name, shell, cluster, bearer, wallet_config, xhdr) 54 | assert not can_get_head_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr) 55 | assert not can_get_range_of_object( 56 | wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr 57 | ) 58 | assert not can_get_range_hash_of_object( 59 | wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr 60 | ) 61 | assert not can_search_object(wallet, cid, shell, endpoint, oid, bearer, wallet_config, xhdr) 62 | assert not can_get_object( 63 | wallet, cid, oid, file_name, shell, cluster, bearer, wallet_config, xhdr 64 | ) 65 | assert not can_delete_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr) 66 | 67 | 68 | def check_custom_access_to_container( 69 | wallet: str, 70 | cid: str, 71 | oid: str, 72 | file_name: str, 73 | shell: Shell, 74 | cluster: Cluster, 75 | deny_operations: Optional[List[EACLOperation]] = None, 76 | ignore_operations: Optional[List[EACLOperation]] = None, 77 | bearer: Optional[str] = None, 78 | wallet_config: Optional[str] = None, 79 | xhdr: Optional[dict] = None, 80 | ): 81 | endpoint = cluster.default_rpc_endpoint 82 | deny_operations = [op.value for op in deny_operations or []] 83 | ignore_operations = [op.value for op in ignore_operations or []] 84 | checks: dict = {} 85 | if EACLOperation.PUT.value not in ignore_operations: 86 | checks[EACLOperation.PUT.value] = can_put_object( 87 | wallet, cid, file_name, shell, cluster, bearer, wallet_config, xhdr 88 | ) 89 | if EACLOperation.HEAD.value not in ignore_operations: 90 | checks[EACLOperation.HEAD.value] = can_get_head_object( 91 | wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr 92 | ) 93 | if EACLOperation.GET_RANGE.value not in ignore_operations: 94 | checks[EACLOperation.GET_RANGE.value] = can_get_range_of_object( 95 | wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr 96 | ) 97 | if EACLOperation.GET_RANGE_HASH.value not in ignore_operations: 98 | checks[EACLOperation.GET_RANGE_HASH.value] = can_get_range_hash_of_object( 99 | wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr 100 | ) 101 | if EACLOperation.SEARCH.value not in ignore_operations: 102 | checks[EACLOperation.SEARCH.value] = can_search_object( 103 | wallet, cid, shell, endpoint, oid, bearer, wallet_config, xhdr 104 | ) 105 | if EACLOperation.GET.value not in ignore_operations: 106 | checks[EACLOperation.GET.value] = can_get_object( 107 | wallet, cid, oid, file_name, shell, cluster, bearer, wallet_config, xhdr 108 | ) 109 | if EACLOperation.DELETE.value not in ignore_operations: 110 | checks[EACLOperation.DELETE.value] = can_delete_object( 111 | wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr 112 | ) 113 | 114 | failed_checks = [ 115 | f"allowed {action} failed" 116 | for action, success in checks.items() 117 | if not success and action not in deny_operations 118 | ] + [ 119 | f"denied {action} succeeded" 120 | for action, success in checks.items() 121 | if success and action in deny_operations 122 | ] 123 | 124 | assert not failed_checks, ", ".join(failed_checks) 125 | 126 | 127 | def check_read_only_container( 128 | wallet: str, 129 | cid: str, 130 | oid: str, 131 | file_name: str, 132 | shell: Shell, 133 | cluster: Cluster, 134 | bearer: Optional[str] = None, 135 | wallet_config: Optional[str] = None, 136 | xhdr: Optional[dict] = None, 137 | ): 138 | return check_custom_access_to_container( 139 | wallet, 140 | cid, 141 | oid, 142 | file_name, 143 | deny_operations=[EACLOperation.PUT, EACLOperation.DELETE], 144 | bearer=bearer, 145 | wallet_config=wallet_config, 146 | xhdr=xhdr, 147 | shell=shell, 148 | cluster=cluster, 149 | ) 150 | -------------------------------------------------------------------------------- /pytest_tests/helpers/env_properties.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re 3 | 4 | import allure 5 | from pytest import Config 6 | 7 | logger = logging.getLogger("NeoLogger") 8 | 9 | 10 | @allure.step("Read environment.properties") 11 | def read_env_properties(config: Config) -> dict: 12 | environment_dir = config.getoption("--alluredir") 13 | if not environment_dir: 14 | return None 15 | 16 | file_path = f"{environment_dir}/environment.properties" 17 | with open(file_path, "r") as file: 18 | raw_content = file.read() 19 | 20 | env_properties = {} 21 | for line in raw_content.split("\n"): 22 | m = re.match("(.*?)=(.*)", line) 23 | if not m: 24 | logger.warning(f"Could not parse env property from {line}") 25 | continue 26 | key, value = m.group(1), m.group(2) 27 | env_properties[key] = value 28 | return env_properties 29 | 30 | 31 | @allure.step("Update data in environment.properties") 32 | def save_env_properties(config: Config, env_data: dict) -> None: 33 | environment_dir = config.getoption("--alluredir") 34 | if not environment_dir: 35 | return None 36 | 37 | file_path = f"{environment_dir}/environment.properties" 38 | with open(file_path, "a+") as env_file: 39 | for env, env_value in env_data.items(): 40 | env_file.write(f"{env}={env_value}\n") 41 | -------------------------------------------------------------------------------- /pytest_tests/helpers/epoch.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from time import sleep 3 | from typing import Optional 4 | 5 | import allure 6 | from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo 7 | from frostfs_testlib.shell import Shell 8 | from frostfs_testlib.utils import datetime_utils, wallet_utils 9 | from payment_neogo import get_contract_hash 10 | 11 | from pytest_tests.helpers.cluster import Cluster, StorageNode 12 | from pytest_tests.helpers.test_control import wait_for_success 13 | from pytest_tests.resources.common import ( 14 | FROSTFS_ADM_CONFIG_PATH, 15 | FROSTFS_ADM_EXEC, 16 | FROSTFS_CLI_EXEC, 17 | MAINNET_BLOCK_TIME, 18 | NEOGO_EXECUTABLE, 19 | ) 20 | 21 | logger = logging.getLogger("NeoLogger") 22 | 23 | 24 | @allure.step("Ensure fresh epoch") 25 | def ensure_fresh_epoch( 26 | shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None 27 | ) -> int: 28 | # ensure new fresh epoch to avoid epoch switch during test session 29 | alive_node = alive_node if alive_node else cluster.storage_nodes[0] 30 | current_epoch = get_epoch(shell, cluster, alive_node) 31 | tick_epoch(shell, cluster, alive_node) 32 | epoch = get_epoch(shell, cluster, alive_node) 33 | assert epoch > current_epoch, "Epoch wasn't ticked" 34 | return epoch 35 | 36 | 37 | @allure.step("Wait for epochs align in whole cluster") 38 | @wait_for_success(60, 5) 39 | def wait_for_epochs_align(shell: Shell, cluster: Cluster) -> bool: 40 | epochs = [] 41 | for node in cluster.storage_nodes: 42 | epochs.append(get_epoch(shell, cluster, node)) 43 | unique_epochs = list(set(epochs)) 44 | assert ( 45 | len(unique_epochs) == 1 46 | ), f"unaligned epochs found, {epochs}, count of unique epochs {len(unique_epochs)}" 47 | 48 | 49 | @allure.step("Get Epoch") 50 | def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): 51 | alive_node = alive_node if alive_node else cluster.storage_nodes[0] 52 | endpoint = alive_node.get_rpc_endpoint() 53 | wallet_path = alive_node.get_wallet_path() 54 | wallet_config = alive_node.get_wallet_config_path() 55 | 56 | cli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config) 57 | 58 | epoch = cli.netmap.epoch(endpoint, wallet_path) 59 | return int(epoch.stdout) 60 | 61 | 62 | @allure.step("Tick Epoch") 63 | def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): 64 | """ 65 | Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv) 66 | Args: 67 | shell: local shell to make queries about current epoch. Remote shell will be used to tick new one 68 | cluster: cluster instance under test 69 | alive_node: node to send requests to (first node in cluster by default) 70 | """ 71 | 72 | alive_node = alive_node if alive_node else cluster.storage_nodes[0] 73 | remote_shell = alive_node.host.get_shell() 74 | 75 | if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH: 76 | # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) 77 | frostfsadm = FrostfsAdm( 78 | shell=remote_shell, 79 | frostfs_adm_exec_path=FROSTFS_ADM_EXEC, 80 | config_file=FROSTFS_ADM_CONFIG_PATH, 81 | ) 82 | frostfsadm.morph.force_new_epoch() 83 | return 84 | 85 | # Otherwise we tick epoch using transaction 86 | cur_epoch = get_epoch(shell, cluster) 87 | 88 | # Use first node by default 89 | ir_node = cluster.ir_nodes[0] 90 | # In case if no local_wallet_path is provided, we use wallet_path 91 | ir_wallet_path = ir_node.get_wallet_path() 92 | ir_wallet_pass = ir_node.get_wallet_password() 93 | ir_address = wallet_utils.get_last_address_from_wallet(ir_wallet_path, ir_wallet_pass) 94 | 95 | morph_chain = cluster.morph_chain_nodes[0] 96 | morph_endpoint = morph_chain.get_endpoint() 97 | 98 | neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) 99 | neogo.contract.invokefunction( 100 | wallet=ir_wallet_path, 101 | wallet_password=ir_wallet_pass, 102 | scripthash=get_contract_hash(morph_chain, "netmap.frostfs", shell=shell), 103 | method="newEpoch", 104 | arguments=f"int:{cur_epoch + 1}", 105 | multisig_hash=f"{ir_address}:Global", 106 | address=ir_address, 107 | rpc_endpoint=morph_endpoint, 108 | force=True, 109 | gas=1, 110 | ) 111 | sleep(datetime_utils.parse_time(MAINNET_BLOCK_TIME)) 112 | -------------------------------------------------------------------------------- /pytest_tests/helpers/failover_utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from time import sleep 3 | 4 | import allure 5 | from frostfs_testlib.shell import Shell 6 | 7 | from pytest_tests.helpers.cluster import Cluster, StorageNode 8 | from pytest_tests.helpers.node_management import storage_node_healthcheck 9 | from pytest_tests.helpers.storage_policy import get_nodes_with_object 10 | 11 | logger = logging.getLogger("NeoLogger") 12 | 13 | 14 | @allure.step("Wait for object replication") 15 | def wait_object_replication( 16 | cid: str, 17 | oid: str, 18 | expected_copies: int, 19 | shell: Shell, 20 | nodes: list[StorageNode], 21 | ) -> list[StorageNode]: 22 | sleep_interval, attempts = 15, 20 23 | nodes_with_object = [] 24 | for _ in range(attempts): 25 | nodes_with_object = get_nodes_with_object(cid, oid, shell=shell, nodes=nodes) 26 | if len(nodes_with_object) >= expected_copies: 27 | return nodes_with_object 28 | sleep(sleep_interval) 29 | raise AssertionError( 30 | f"Expected {expected_copies} copies of object, but found {len(nodes_with_object)}. " 31 | f"Waiting time {sleep_interval * attempts}" 32 | ) 33 | 34 | 35 | @allure.step("Wait for storage nodes returned to cluster") 36 | def wait_all_storage_nodes_returned(cluster: Cluster) -> None: 37 | sleep_interval, attempts = 15, 20 38 | for __attempt in range(attempts): 39 | if is_all_storage_nodes_returned(cluster): 40 | return 41 | sleep(sleep_interval) 42 | raise AssertionError("Storage node(s) is broken") 43 | 44 | 45 | def is_all_storage_nodes_returned(cluster: Cluster) -> bool: 46 | with allure.step("Run health check for all storage nodes"): 47 | for node in cluster.storage_nodes: 48 | try: 49 | health_check = storage_node_healthcheck(node) 50 | except Exception as err: 51 | logger.warning(f"Node healthcheck fails with error {err}") 52 | return False 53 | if health_check.health_status != "READY" or health_check.network_status != "ONLINE": 54 | return False 55 | return True 56 | -------------------------------------------------------------------------------- /pytest_tests/helpers/file_helper.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import logging 3 | import os 4 | import uuid 5 | from typing import Any, Optional 6 | 7 | import allure 8 | 9 | from pytest_tests.resources.common import ASSETS_DIR 10 | 11 | logger = logging.getLogger("NeoLogger") 12 | 13 | 14 | def generate_file(size: int) -> str: 15 | """Generates a binary file with the specified size in bytes. 16 | 17 | Args: 18 | size: Size in bytes, can be declared as 6e+6 for example. 19 | 20 | Returns: 21 | The path to the generated file. 22 | """ 23 | file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) 24 | with open(file_path, "wb") as file: 25 | file.write(os.urandom(size)) 26 | logger.info(f"File with size {size} bytes has been generated: {file_path}") 27 | 28 | return file_path 29 | 30 | 31 | def generate_file_with_content( 32 | size: int, 33 | file_path: Optional[str] = None, 34 | content: Optional[str] = None, 35 | ) -> str: 36 | """Creates a new file with specified content. 37 | 38 | Args: 39 | file_path: Path to the file that should be created. If not specified, then random file 40 | path will be generated. 41 | content: Content that should be stored in the file. If not specified, then random binary 42 | content will be generated. 43 | 44 | Returns: 45 | Path to the generated file. 46 | """ 47 | mode = "w+" 48 | if content is None: 49 | content = os.urandom(size) 50 | mode = "wb" 51 | 52 | if not file_path: 53 | file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) 54 | else: 55 | if not os.path.exists(os.path.dirname(file_path)): 56 | os.makedirs(os.path.dirname(file_path)) 57 | 58 | with open(file_path, mode) as file: 59 | file.write(content) 60 | 61 | return file_path 62 | 63 | 64 | @allure.step("Get File Hash") 65 | def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str: 66 | """Generates hash for the specified file. 67 | 68 | Args: 69 | file_path: Path to the file to generate hash for. 70 | len: How many bytes to read. 71 | offset: Position to start reading from. 72 | 73 | Returns: 74 | Hash of the file as hex-encoded string. 75 | """ 76 | file_hash = hashlib.sha256() 77 | with open(file_path, "rb") as out: 78 | if len and not offset: 79 | file_hash.update(out.read(len)) 80 | elif len and offset: 81 | out.seek(offset, 0) 82 | file_hash.update(out.read(len)) 83 | elif offset and not len: 84 | out.seek(offset, 0) 85 | file_hash.update(out.read()) 86 | else: 87 | file_hash.update(out.read()) 88 | return file_hash.hexdigest() 89 | 90 | 91 | @allure.step("Concatenation set of files to one file") 92 | def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str: 93 | """Concatenates several files into a single file. 94 | 95 | Args: 96 | file_paths: Paths to the files to concatenate. 97 | resulting_file_name: Path to the file where concatenated content should be stored. 98 | 99 | Returns: 100 | Path to the resulting file. 101 | """ 102 | if not resulting_file_path: 103 | resulting_file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) 104 | with open(resulting_file_path, "wb") as f: 105 | for file in file_paths: 106 | with open(file, "rb") as part_file: 107 | f.write(part_file.read()) 108 | return resulting_file_path 109 | 110 | 111 | def split_file(file_path: str, parts: int) -> list[str]: 112 | """Splits specified file into several specified number of parts. 113 | 114 | Each part is saved under name `{original_file}_part_{i}`. 115 | 116 | Args: 117 | file_path: Path to the file that should be split. 118 | parts: Number of parts the file should be split into. 119 | 120 | Returns: 121 | Paths to the part files. 122 | """ 123 | with open(file_path, "rb") as file: 124 | content = file.read() 125 | 126 | content_size = len(content) 127 | chunk_size = int((content_size + parts) / parts) 128 | 129 | part_id = 1 130 | part_file_paths = [] 131 | for content_offset in range(0, content_size + 1, chunk_size): 132 | part_file_name = f"{file_path}_part_{part_id}" 133 | part_file_paths.append(part_file_name) 134 | with open(part_file_name, "wb") as out_file: 135 | out_file.write(content[content_offset : content_offset + chunk_size]) 136 | part_id += 1 137 | 138 | return part_file_paths 139 | 140 | 141 | def get_file_content( 142 | file_path: str, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None 143 | ) -> Any: 144 | """Returns content of specified file. 145 | 146 | Args: 147 | file_path: Path to the file. 148 | content_len: Limit of content length. If None, then entire file content is returned; 149 | otherwise only the first content_len bytes of the content are returned. 150 | mode: Mode of opening the file. 151 | offset: Position to start reading from. 152 | 153 | Returns: 154 | Content of the specified file. 155 | """ 156 | with open(file_path, mode) as file: 157 | if content_len and not offset: 158 | content = file.read(content_len) 159 | elif content_len and offset: 160 | file.seek(offset, 0) 161 | content = file.read(content_len) 162 | elif offset and not content_len: 163 | file.seek(offset, 0) 164 | content = file.read() 165 | else: 166 | content = file.read() 167 | 168 | return content 169 | -------------------------------------------------------------------------------- /pytest_tests/helpers/iptables_helper.py: -------------------------------------------------------------------------------- 1 | from frostfs_testlib.shell import Shell 2 | 3 | 4 | class IpTablesHelper: 5 | @staticmethod 6 | def drop_input_traffic_to_port(shell: Shell, ports: list[str]) -> None: 7 | for port in ports: 8 | shell.exec(f"sudo iptables -A INPUT -p tcp --dport {port} -j DROP") 9 | 10 | @staticmethod 11 | def restore_input_traffic_to_port(shell: Shell, ports: list[str]) -> None: 12 | for port in ports: 13 | shell.exec(f"sudo iptables -D INPUT -p tcp --dport {port} -j DROP") 14 | -------------------------------------------------------------------------------- /pytest_tests/helpers/object_access.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import allure 4 | from frostfs_testlib.resources.common import OBJECT_ACCESS_DENIED 5 | from frostfs_testlib.shell import Shell 6 | from frostfs_testlib.utils import string_utils 7 | 8 | from pytest_tests.helpers.cluster import Cluster 9 | from pytest_tests.helpers.file_helper import get_file_hash 10 | from pytest_tests.helpers.frostfs_verbs import ( 11 | delete_object, 12 | get_object_from_random_node, 13 | get_range, 14 | get_range_hash, 15 | head_object, 16 | put_object_to_random_node, 17 | search_object, 18 | ) 19 | 20 | OPERATION_ERROR_TYPE = RuntimeError 21 | 22 | 23 | def can_get_object( 24 | wallet: str, 25 | cid: str, 26 | oid: str, 27 | file_name: str, 28 | shell: Shell, 29 | cluster: Cluster, 30 | bearer: Optional[str] = None, 31 | wallet_config: Optional[str] = None, 32 | xhdr: Optional[dict] = None, 33 | ) -> bool: 34 | with allure.step("Try get object from container"): 35 | try: 36 | got_file_path = get_object_from_random_node( 37 | wallet, 38 | cid, 39 | oid, 40 | bearer=bearer, 41 | wallet_config=wallet_config, 42 | xhdr=xhdr, 43 | shell=shell, 44 | cluster=cluster, 45 | ) 46 | except OPERATION_ERROR_TYPE as err: 47 | assert string_utils.is_str_match_pattern( 48 | err, OBJECT_ACCESS_DENIED 49 | ), f"Expected {err} to match {OBJECT_ACCESS_DENIED}" 50 | return False 51 | assert get_file_hash(file_name) == get_file_hash(got_file_path) 52 | return True 53 | 54 | 55 | def can_put_object( 56 | wallet: str, 57 | cid: str, 58 | file_name: str, 59 | shell: Shell, 60 | cluster: Cluster, 61 | bearer: Optional[str] = None, 62 | wallet_config: Optional[str] = None, 63 | xhdr: Optional[dict] = None, 64 | attributes: Optional[dict] = None, 65 | ) -> bool: 66 | with allure.step("Try put object to container"): 67 | try: 68 | put_object_to_random_node( 69 | wallet, 70 | file_name, 71 | cid, 72 | bearer=bearer, 73 | wallet_config=wallet_config, 74 | xhdr=xhdr, 75 | attributes=attributes, 76 | shell=shell, 77 | cluster=cluster, 78 | ) 79 | except OPERATION_ERROR_TYPE as err: 80 | assert string_utils.is_str_match_pattern( 81 | err, OBJECT_ACCESS_DENIED 82 | ), f"Expected {err} to match {OBJECT_ACCESS_DENIED}" 83 | return False 84 | return True 85 | 86 | 87 | def can_delete_object( 88 | wallet: str, 89 | cid: str, 90 | oid: str, 91 | shell: Shell, 92 | endpoint: str, 93 | bearer: Optional[str] = None, 94 | wallet_config: Optional[str] = None, 95 | xhdr: Optional[dict] = None, 96 | ) -> bool: 97 | with allure.step("Try delete object from container"): 98 | try: 99 | delete_object( 100 | wallet, 101 | cid, 102 | oid, 103 | bearer=bearer, 104 | wallet_config=wallet_config, 105 | xhdr=xhdr, 106 | shell=shell, 107 | endpoint=endpoint, 108 | ) 109 | except OPERATION_ERROR_TYPE as err: 110 | assert string_utils.is_str_match_pattern( 111 | err, OBJECT_ACCESS_DENIED 112 | ), f"Expected {err} to match {OBJECT_ACCESS_DENIED}" 113 | return False 114 | return True 115 | 116 | 117 | def can_get_head_object( 118 | wallet: str, 119 | cid: str, 120 | oid: str, 121 | shell: Shell, 122 | endpoint: str, 123 | bearer: Optional[str] = None, 124 | wallet_config: Optional[str] = None, 125 | xhdr: Optional[dict] = None, 126 | ) -> bool: 127 | with allure.step("Try get head of object"): 128 | try: 129 | head_object( 130 | wallet, 131 | cid, 132 | oid, 133 | bearer=bearer, 134 | wallet_config=wallet_config, 135 | xhdr=xhdr, 136 | shell=shell, 137 | endpoint=endpoint, 138 | ) 139 | except OPERATION_ERROR_TYPE as err: 140 | assert string_utils.is_str_match_pattern( 141 | err, OBJECT_ACCESS_DENIED 142 | ), f"Expected {err} to match {OBJECT_ACCESS_DENIED}" 143 | return False 144 | return True 145 | 146 | 147 | def can_get_range_of_object( 148 | wallet: str, 149 | cid: str, 150 | oid: str, 151 | shell: Shell, 152 | endpoint: str, 153 | bearer: Optional[str] = None, 154 | wallet_config: Optional[str] = None, 155 | xhdr: Optional[dict] = None, 156 | ) -> bool: 157 | with allure.step("Try get range of object"): 158 | try: 159 | get_range( 160 | wallet, 161 | cid, 162 | oid, 163 | bearer=bearer, 164 | range_cut="0:10", 165 | wallet_config=wallet_config, 166 | xhdr=xhdr, 167 | shell=shell, 168 | endpoint=endpoint, 169 | ) 170 | except OPERATION_ERROR_TYPE as err: 171 | assert string_utils.is_str_match_pattern( 172 | err, OBJECT_ACCESS_DENIED 173 | ), f"Expected {err} to match {OBJECT_ACCESS_DENIED}" 174 | return False 175 | return True 176 | 177 | 178 | def can_get_range_hash_of_object( 179 | wallet: str, 180 | cid: str, 181 | oid: str, 182 | shell: Shell, 183 | endpoint: str, 184 | bearer: Optional[str] = None, 185 | wallet_config: Optional[str] = None, 186 | xhdr: Optional[dict] = None, 187 | ) -> bool: 188 | with allure.step("Try get range hash of object"): 189 | try: 190 | get_range_hash( 191 | wallet, 192 | cid, 193 | oid, 194 | bearer=bearer, 195 | range_cut="0:10", 196 | wallet_config=wallet_config, 197 | xhdr=xhdr, 198 | shell=shell, 199 | endpoint=endpoint, 200 | ) 201 | except OPERATION_ERROR_TYPE as err: 202 | assert string_utils.is_str_match_pattern( 203 | err, OBJECT_ACCESS_DENIED 204 | ), f"Expected {err} to match {OBJECT_ACCESS_DENIED}" 205 | return False 206 | return True 207 | 208 | 209 | def can_search_object( 210 | wallet: str, 211 | cid: str, 212 | shell: Shell, 213 | endpoint: str, 214 | oid: Optional[str] = None, 215 | bearer: Optional[str] = None, 216 | wallet_config: Optional[str] = None, 217 | xhdr: Optional[dict] = None, 218 | ) -> bool: 219 | with allure.step("Try search object in container"): 220 | try: 221 | oids = search_object( 222 | wallet, 223 | cid, 224 | bearer=bearer, 225 | wallet_config=wallet_config, 226 | xhdr=xhdr, 227 | shell=shell, 228 | endpoint=endpoint, 229 | ) 230 | except OPERATION_ERROR_TYPE as err: 231 | assert string_utils.is_str_match_pattern( 232 | err, OBJECT_ACCESS_DENIED 233 | ), f"Expected {err} to match {OBJECT_ACCESS_DENIED}" 234 | return False 235 | if oid: 236 | return oid in oids 237 | return True 238 | -------------------------------------------------------------------------------- /pytest_tests/helpers/s3_helper.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from datetime import datetime, timedelta 4 | from typing import Optional 5 | 6 | import allure 7 | from dateutil.parser import parse 8 | 9 | from pytest_tests.steps import s3_gate_bucket, s3_gate_object 10 | 11 | logger = logging.getLogger("NeoLogger") 12 | 13 | 14 | @allure.step("Expected all objects are presented in the bucket") 15 | def check_objects_in_bucket( 16 | s3_client, bucket, expected_objects: list, unexpected_objects: Optional[list] = None 17 | ) -> None: 18 | unexpected_objects = unexpected_objects or [] 19 | bucket_objects = s3_gate_object.list_objects_s3(s3_client, bucket) 20 | assert len(bucket_objects) == len( 21 | expected_objects 22 | ), f"Expected {len(expected_objects)} objects in the bucket" 23 | for bucket_object in expected_objects: 24 | assert ( 25 | bucket_object in bucket_objects 26 | ), f"Expected object {bucket_object} in objects list {bucket_objects}" 27 | 28 | for bucket_object in unexpected_objects: 29 | assert ( 30 | bucket_object not in bucket_objects 31 | ), f"Expected object {bucket_object} not in objects list {bucket_objects}" 32 | 33 | 34 | @allure.step("Try to get object and got error") 35 | def try_to_get_objects_and_expect_error(s3_client, bucket: str, object_keys: list) -> None: 36 | for obj in object_keys: 37 | try: 38 | s3_gate_object.get_object_s3(s3_client, bucket, obj) 39 | raise AssertionError(f"Object {obj} found in bucket {bucket}") 40 | except Exception as err: 41 | assert "The specified key does not exist" in str( 42 | err 43 | ), f"Expected error in exception {err}" 44 | 45 | 46 | @allure.step("Set versioning enable for bucket") 47 | def set_bucket_versioning(s3_client, bucket: str, status: s3_gate_bucket.VersioningStatus): 48 | s3_gate_bucket.get_bucket_versioning_status(s3_client, bucket) 49 | s3_gate_bucket.set_bucket_versioning(s3_client, bucket, status=status) 50 | bucket_status = s3_gate_bucket.get_bucket_versioning_status(s3_client, bucket) 51 | assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}" 52 | 53 | 54 | def object_key_from_file_path(full_path: str) -> str: 55 | return os.path.basename(full_path) 56 | 57 | 58 | def assert_tags( 59 | actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None 60 | ) -> None: 61 | expected_tags = ( 62 | [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] 63 | ) 64 | unexpected_tags = ( 65 | [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else [] 66 | ) 67 | if expected_tags == []: 68 | assert not actual_tags, f"Expected there is no tags, got {actual_tags}" 69 | assert len(expected_tags) == len(actual_tags) 70 | for tag in expected_tags: 71 | assert tag in actual_tags, f"Tag {tag} must be in {actual_tags}" 72 | for tag in unexpected_tags: 73 | assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}" 74 | 75 | 76 | @allure.step("Expected all tags are presented in object") 77 | def check_tags_by_object( 78 | s3_client, 79 | bucket: str, 80 | key_name: str, 81 | expected_tags: list, 82 | unexpected_tags: Optional[list] = None, 83 | ) -> None: 84 | actual_tags = s3_gate_object.get_object_tagging(s3_client, bucket, key_name) 85 | assert_tags( 86 | expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags 87 | ) 88 | 89 | 90 | @allure.step("Expected all tags are presented in bucket") 91 | def check_tags_by_bucket( 92 | s3_client, bucket: str, expected_tags: list, unexpected_tags: Optional[list] = None 93 | ) -> None: 94 | actual_tags = s3_gate_bucket.get_bucket_tagging(s3_client, bucket) 95 | assert_tags( 96 | expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags 97 | ) 98 | 99 | 100 | def assert_object_lock_mode( 101 | s3_client, 102 | bucket: str, 103 | file_name: str, 104 | object_lock_mode: str, 105 | retain_untile_date: datetime, 106 | legal_hold_status: str = "OFF", 107 | retain_period: Optional[int] = None, 108 | ): 109 | object_dict = s3_gate_object.get_object_s3(s3_client, bucket, file_name, full_output=True) 110 | assert ( 111 | object_dict.get("ObjectLockMode") == object_lock_mode 112 | ), f"Expected Object Lock Mode is {object_lock_mode}" 113 | assert ( 114 | object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status 115 | ), f"Expected Object Lock Legal Hold Status is {legal_hold_status}" 116 | object_retain_date = object_dict.get("ObjectLockRetainUntilDate") 117 | retain_date = ( 118 | parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date 119 | ) 120 | if retain_untile_date: 121 | assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_untile_date.strftime( 122 | "%Y-%m-%dT%H:%M:%S" 123 | ), f'Expected Object Lock Retain Until Date is {str(retain_untile_date.strftime("%Y-%m-%dT%H:%M:%S"))}' 124 | elif retain_period: 125 | last_modify_date = object_dict.get("LastModified") 126 | last_modify = ( 127 | parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date 128 | ) 129 | assert ( 130 | retain_date - last_modify + timedelta(seconds=1) 131 | ).days == retain_period, f"Expected retention period is {retain_period} days" 132 | 133 | 134 | def assert_s3_acl(acl_grants: list, permitted_users: str): 135 | if permitted_users == "AllUsers": 136 | grantees = {"AllUsers": 0, "CanonicalUser": 0} 137 | for acl_grant in acl_grants: 138 | if acl_grant.get("Grantee", {}).get("Type") == "Group": 139 | uri = acl_grant.get("Grantee", {}).get("URI") 140 | permission = acl_grant.get("Permission") 141 | assert (uri, permission) == ( 142 | "http://acs.amazonaws.com/groups/global/AllUsers", 143 | "FULL_CONTROL", 144 | ), "All Groups should have FULL_CONTROL" 145 | grantees["AllUsers"] += 1 146 | if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser": 147 | permission = acl_grant.get("Permission") 148 | assert permission == "FULL_CONTROL", "Canonical User should have FULL_CONTROL" 149 | grantees["CanonicalUser"] += 1 150 | assert grantees["AllUsers"] >= 1, "All Users should have FULL_CONTROL" 151 | assert grantees["CanonicalUser"] >= 1, "Canonical User should have FULL_CONTROL" 152 | 153 | if permitted_users == "CanonicalUser": 154 | for acl_grant in acl_grants: 155 | if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser": 156 | permission = acl_grant.get("Permission") 157 | assert permission == "FULL_CONTROL", "Only CanonicalUser should have FULL_CONTROL" 158 | else: 159 | logger.error("FULL_CONTROL is given to All Users") 160 | -------------------------------------------------------------------------------- /pytest_tests/helpers/storage_object_info.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Optional 3 | 4 | 5 | @dataclass 6 | class ObjectRef: 7 | cid: str 8 | oid: str 9 | 10 | 11 | @dataclass 12 | class LockObjectInfo(ObjectRef): 13 | lifetime: Optional[int] = None 14 | expire_at: Optional[int] = None 15 | 16 | 17 | @dataclass 18 | class StorageObjectInfo(ObjectRef): 19 | size: Optional[int] = None 20 | wallet_file_path: Optional[str] = None 21 | file_path: Optional[str] = None 22 | file_hash: Optional[str] = None 23 | attributes: Optional[list[dict[str, str]]] = None 24 | tombstone: Optional[str] = None 25 | locks: Optional[list[LockObjectInfo]] = None 26 | -------------------------------------------------------------------------------- /pytest_tests/helpers/storage_policy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | """ 4 | This module contains keywords which are used for asserting 5 | that storage policies are respected. 6 | """ 7 | 8 | import logging 9 | 10 | import allure 11 | from frostfs_testlib.resources.common import OBJECT_NOT_FOUND 12 | from frostfs_testlib.shell import Shell 13 | from frostfs_testlib.utils import string_utils 14 | 15 | from pytest_tests.helpers import complex_object_actions, frostfs_verbs 16 | from pytest_tests.helpers.cluster import StorageNode 17 | 18 | logger = logging.getLogger("NeoLogger") 19 | 20 | 21 | @allure.step("Get Object Copies") 22 | def get_object_copies( 23 | complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] 24 | ) -> int: 25 | """ 26 | The function performs requests to all nodes of the container and 27 | finds out if they store a copy of the object. The procedure is 28 | different for simple and complex object, so the function requires 29 | a sign of object complexity. 30 | Args: 31 | complexity (str): the tag of object size and complexity, 32 | [Simple|Complex] 33 | wallet (str): the path to the wallet on whose behalf the 34 | copies are got 35 | cid (str): ID of the container 36 | oid (str): ID of the Object 37 | shell: executor for cli command 38 | Returns: 39 | (int): the number of object copies in the container 40 | """ 41 | return ( 42 | get_simple_object_copies(wallet, cid, oid, shell, nodes) 43 | if complexity == "Simple" 44 | else get_complex_object_copies(wallet, cid, oid, shell, nodes) 45 | ) 46 | 47 | 48 | @allure.step("Get Simple Object Copies") 49 | def get_simple_object_copies( 50 | wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] 51 | ) -> int: 52 | """ 53 | To figure out the number of a simple object copies, only direct 54 | HEAD requests should be made to the every node of the container. 55 | We consider non-empty HEAD response as a stored object copy. 56 | Args: 57 | wallet (str): the path to the wallet on whose behalf the 58 | copies are got 59 | cid (str): ID of the container 60 | oid (str): ID of the Object 61 | shell: executor for cli command 62 | nodes: nodes to search on 63 | Returns: 64 | (int): the number of object copies in the container 65 | """ 66 | copies = 0 67 | for node in nodes: 68 | try: 69 | response = frostfs_verbs.head_object( 70 | wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True 71 | ) 72 | if response: 73 | logger.info(f"Found object {oid} on node {node}") 74 | copies += 1 75 | except Exception: 76 | logger.info(f"No {oid} object copy found on {node}, continue") 77 | continue 78 | return copies 79 | 80 | 81 | @allure.step("Get Complex Object Copies") 82 | def get_complex_object_copies( 83 | wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] 84 | ) -> int: 85 | """ 86 | To figure out the number of a complex object copies, we firstly 87 | need to retrieve its Last object. We consider that the number of 88 | complex object copies is equal to the number of its last object 89 | copies. When we have the Last object ID, the task is reduced 90 | to getting simple object copies. 91 | Args: 92 | wallet (str): the path to the wallet on whose behalf the 93 | copies are got 94 | cid (str): ID of the container 95 | oid (str): ID of the Object 96 | shell: executor for cli command 97 | Returns: 98 | (int): the number of object copies in the container 99 | """ 100 | last_oid = complex_object_actions.get_last_object(wallet, cid, oid, shell, nodes) 101 | assert last_oid, f"No Last Object for {cid}/{oid} found among all Storage Nodes" 102 | return get_simple_object_copies(wallet, cid, last_oid, shell, nodes) 103 | 104 | 105 | @allure.step("Get Nodes With Object") 106 | def get_nodes_with_object( 107 | cid: str, oid: str, shell: Shell, nodes: list[StorageNode] 108 | ) -> list[StorageNode]: 109 | """ 110 | The function returns list of nodes which store 111 | the given object. 112 | Args: 113 | cid (str): ID of the container which store the object 114 | oid (str): object ID 115 | shell: executor for cli command 116 | nodes: nodes to find on 117 | Returns: 118 | (list): nodes which store the object 119 | """ 120 | 121 | nodes_list = [] 122 | for node in nodes: 123 | wallet = node.get_wallet_path() 124 | wallet_config = node.get_wallet_config_path() 125 | try: 126 | res = frostfs_verbs.head_object( 127 | wallet, 128 | cid, 129 | oid, 130 | shell=shell, 131 | endpoint=node.get_rpc_endpoint(), 132 | is_direct=True, 133 | wallet_config=wallet_config, 134 | ) 135 | if res is not None: 136 | logger.info(f"Found object {oid} on node {node}") 137 | nodes_list.append(node) 138 | except Exception: 139 | logger.info(f"No {oid} object copy found on {node}, continue") 140 | continue 141 | return nodes_list 142 | 143 | 144 | @allure.step("Get Nodes Without Object") 145 | def get_nodes_without_object( 146 | wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] 147 | ) -> list[StorageNode]: 148 | """ 149 | The function returns list of nodes which do not store 150 | the given object. 151 | Args: 152 | wallet (str): the path to the wallet on whose behalf 153 | we request the nodes 154 | cid (str): ID of the container which store the object 155 | oid (str): object ID 156 | shell: executor for cli command 157 | Returns: 158 | (list): nodes which do not store the object 159 | """ 160 | nodes_list = [] 161 | for node in nodes: 162 | try: 163 | res = frostfs_verbs.head_object( 164 | wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True 165 | ) 166 | if res is None: 167 | nodes_list.append(node) 168 | except Exception as err: 169 | if string_utils.is_str_match_pattern(err, OBJECT_NOT_FOUND): 170 | nodes_list.append(node) 171 | else: 172 | raise Exception(f"Got error {err} on head object command") from err 173 | return nodes_list 174 | -------------------------------------------------------------------------------- /pytest_tests/helpers/test_control.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from functools import wraps 3 | from time import sleep, time 4 | 5 | from _pytest.outcomes import Failed 6 | from pytest import fail 7 | 8 | logger = logging.getLogger("NeoLogger") 9 | 10 | 11 | class expect_not_raises: 12 | """ 13 | Decorator/Context manager check that some action, method or test does not raises exceptions 14 | 15 | Useful to set proper state of failed test cases in allure 16 | 17 | Example: 18 | def do_stuff(): 19 | raise Exception("Fail") 20 | 21 | def test_yellow(): <- this test is marked yellow (Test Defect) in allure 22 | do_stuff() 23 | 24 | def test_red(): <- this test is marked red (Failed) in allure 25 | with expect_not_raises(): 26 | do_stuff() 27 | 28 | @expect_not_raises() 29 | def test_also_red(): <- this test is also marked red (Failed) in allure 30 | do_stuff() 31 | """ 32 | 33 | def __enter__(self): 34 | pass 35 | 36 | def __exit__(self, exception_type, exception_value, exception_traceback): 37 | if exception_value: 38 | fail(str(exception_value)) 39 | 40 | def __call__(self, func): 41 | @wraps(func) 42 | def impl(*a, **kw): 43 | with expect_not_raises(): 44 | func(*a, **kw) 45 | 46 | return impl 47 | 48 | 49 | def wait_for_success(max_wait_time: int = 60, interval: int = 1): 50 | """ 51 | Decorator to wait for some conditions/functions to pass successfully. 52 | This is useful if you don't know exact time when something should pass successfully and do not 53 | want to use sleep(X) with too big X. 54 | 55 | Be careful though, wrapped function should only check the state of something, not change it. 56 | """ 57 | 58 | def wrapper(func): 59 | @wraps(func) 60 | def impl(*a, **kw): 61 | start = int(round(time())) 62 | last_exception = None 63 | while start + max_wait_time >= int(round(time())): 64 | try: 65 | return func(*a, **kw) 66 | except Exception as ex: 67 | logger.debug(ex) 68 | last_exception = ex 69 | sleep(interval) 70 | except Failed as ex: 71 | logger.debug(ex) 72 | last_exception = ex 73 | sleep(interval) 74 | 75 | # timeout exceeded with no success, raise last_exception 76 | raise last_exception 77 | 78 | return impl 79 | 80 | return wrapper 81 | -------------------------------------------------------------------------------- /pytest_tests/helpers/tombstone.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | 4 | import allure 5 | from frostfs_testlib.shell import Shell 6 | from neo3.wallet import wallet 7 | 8 | from pytest_tests.helpers.frostfs_verbs import head_object 9 | 10 | logger = logging.getLogger("NeoLogger") 11 | 12 | 13 | @allure.step("Verify Head Tombstone") 14 | def verify_head_tombstone( 15 | wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str 16 | ): 17 | header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] 18 | 19 | s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"] 20 | logger.info(f"Header Session OIDs is {s_oid}") 21 | logger.info(f"OID is {oid}") 22 | 23 | assert header["containerID"] == cid, "Tombstone Header CID is wrong" 24 | 25 | with open(wallet_path, "r") as file: 26 | wlt_data = json.loads(file.read()) 27 | wlt = wallet.Wallet.from_json(wlt_data, password="") 28 | addr = wlt.accounts[0].address 29 | 30 | assert header["ownerID"] == addr, "Tombstone Owner ID is wrong" 31 | assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone" 32 | assert ( 33 | header["sessionToken"]["body"]["object"]["verb"] == "DELETE" 34 | ), "Header Session Type isn't DELETE" 35 | assert ( 36 | header["sessionToken"]["body"]["object"]["target"]["container"] == cid 37 | ), "Header Session ID is wrong" 38 | assert ( 39 | oid in header["sessionToken"]["body"]["object"]["target"]["objects"] 40 | ), "Header Session OID is wrong" 41 | -------------------------------------------------------------------------------- /pytest_tests/helpers/utility.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import allure 4 | from frostfs_testlib.utils import datetime_utils 5 | 6 | from pytest_tests.resources.common import STORAGE_GC_TIME 7 | 8 | 9 | def placement_policy_from_container(container_info: str) -> str: 10 | """ 11 | Get placement policy from container info: 12 | 13 | container ID: j7k4auNHRmiPMSmnH2qENLECD2au2y675fvTX6csDwd 14 | version: 2.12 15 | owner ID: NQ8HUxE5qEj7UUvADj7z9Z7pcvJdjtPwuw 16 | basic ACL: 0fbfbfff (eacl-public-read-write) 17 | attribute: Timestamp=1656340345 (2022-06-27 17:32:25 +0300 MSK) 18 | nonce: 1c511e88-efd7-4004-8dbf-14391a5d375a 19 | placement policy: 20 | REP 1 IN LOC_PLACE 21 | CBF 1 22 | SELECT 1 FROM LOC_SW AS LOC_PLACE 23 | FILTER Country EQ Sweden AS LOC_SW 24 | 25 | Args: 26 | container_info: output from frostfs-cli container get command 27 | 28 | Returns: 29 | placement policy as a string 30 | """ 31 | assert ":" in container_info, f"Could not find placement rule in the output {container_info}" 32 | return container_info.split(":")[-1].replace("\n", " ").strip() 33 | 34 | 35 | def wait_for_gc_pass_on_storage_nodes() -> None: 36 | wait_time = datetime_utils.parse_time(STORAGE_GC_TIME) 37 | with allure.step(f"Wait {wait_time}s until GC completes on storage nodes"): 38 | time.sleep(wait_time) 39 | -------------------------------------------------------------------------------- /pytest_tests/helpers/wallet.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | from dataclasses import dataclass 4 | 5 | from frostfs_testlib.shell import Shell 6 | from frostfs_testlib.utils import wallet_utils 7 | 8 | from pytest_tests.helpers.cluster import Cluster, NodeBase 9 | from pytest_tests.helpers.payment_neogo import deposit_gas, transfer_gas 10 | from pytest_tests.resources.common import FREE_STORAGE, WALLET_CONFIG, WALLET_PASS 11 | 12 | 13 | @dataclass 14 | class WalletFile: 15 | path: str 16 | password: str = WALLET_PASS 17 | config_path: str = WALLET_CONFIG 18 | 19 | @staticmethod 20 | def from_node(node: NodeBase): 21 | return WalletFile( 22 | node.get_wallet_path(), node.get_wallet_password(), node.get_wallet_config_path() 23 | ) 24 | 25 | def get_address(self) -> str: 26 | """ 27 | Extracts the last address from wallet. 28 | 29 | Returns: 30 | The address of the wallet. 31 | """ 32 | return wallet_utils.get_last_address_from_wallet(self.path, self.password) 33 | 34 | 35 | class WalletFactory: 36 | def __init__(self, wallets_dir: str, shell: Shell, cluster: Cluster) -> None: 37 | self.shell = shell 38 | self.wallets_dir = wallets_dir 39 | self.cluster = cluster 40 | 41 | def create_wallet(self, password: str = WALLET_PASS) -> WalletFile: 42 | """ 43 | Creates new default wallet 44 | Args: 45 | password: wallet password 46 | 47 | Returns: 48 | WalletFile object of new wallet 49 | """ 50 | wallet_path = os.path.join(self.wallets_dir, f"{str(uuid.uuid4())}.json") 51 | wallet_utils.init_wallet(wallet_path, password) 52 | 53 | if not FREE_STORAGE: 54 | main_chain = self.cluster.main_chain_nodes[0] 55 | deposit = 30 56 | transfer_gas( 57 | shell=self.shell, 58 | amount=deposit + 1, 59 | main_chain=main_chain, 60 | wallet_to_path=wallet_path, 61 | wallet_to_password=password, 62 | ) 63 | deposit_gas( 64 | shell=self.shell, 65 | amount=deposit, 66 | main_chain=main_chain, 67 | wallet_from_path=wallet_path, 68 | wallet_from_password=password, 69 | ) 70 | 71 | return WalletFile(wallet_path, password) 72 | -------------------------------------------------------------------------------- /pytest_tests/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | log_cli = 1 3 | log_cli_level = DEBUG 4 | log_cli_format = %(asctime)s [%(levelname)4s] %(message)s 5 | log_format = %(asctime)s [%(levelname)4s] %(message)s 6 | log_cli_date_format = %Y-%m-%d %H:%M:%S 7 | log_date_format = %H:%M:%S 8 | markers = 9 | # special markers 10 | staging: test to be excluded from run in verifier/pr-validation/sanity jobs and run test in staging job 11 | sanity: test runs in sanity testrun 12 | smoke: test runs in smoke testrun 13 | # functional markers 14 | container: tests for container creation 15 | grpc_api: standard gRPC API tests 16 | grpc_control: tests related to using frostfs-cli control commands 17 | grpc_object_lock: gRPC lock tests 18 | http_gate: HTTP gate contract 19 | s3_gate: All S3 gate tests 20 | s3_gate_base: Base S3 gate tests 21 | s3_gate_bucket: Bucket S3 gate tests 22 | s3_gate_locking: Locking S3 gate tests 23 | s3_gate_multipart: S3 gate tests with multipart object 24 | s3_gate_object: Object S3 gate tests 25 | s3_gate_tagging: Tagging S3 gate tests 26 | s3_gate_versioning: Versioning S3 gate tests 27 | long: long tests (with long execution time) 28 | node_mgmt: frostfs control commands 29 | session_token: tests for operations with session token 30 | static_session: tests for operations with static session token 31 | bearer: tests for bearer tokens 32 | acl: All tests for ACL 33 | acl_basic: tests for basic ACL 34 | acl_bearer: tests for ACL with bearer 35 | acl_extended: tests for extended ACL 36 | acl_filters: tests for extended ACL with filters and headers 37 | storage_group: tests for storage groups 38 | failover: tests for system recovery after a failure 39 | failover_panic: tests for system recovery after panic reboot of a node 40 | failover_network: tests for network failure 41 | failover_reboot: tests for system recovery after reboot of a node 42 | add_nodes: add nodes to cluster 43 | check_binaries: check frostfs installed binaries versions 44 | payments: tests for payment associated operations 45 | load: performance tests 46 | -------------------------------------------------------------------------------- /pytest_tests/requirements.txt: -------------------------------------------------------------------------------- 1 | -r ../requirements.txt 2 | -------------------------------------------------------------------------------- /pytest_tests/resources/common.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import yaml 4 | 5 | CONTAINER_WAIT_INTERVAL = "1m" 6 | 7 | SIMPLE_OBJECT_SIZE = os.getenv("SIMPLE_OBJECT_SIZE", "1000") 8 | COMPLEX_OBJECT_CHUNKS_COUNT = os.getenv("COMPLEX_OBJECT_CHUNKS_COUNT", "3") 9 | COMPLEX_OBJECT_TAIL_SIZE = os.getenv("COMPLEX_OBJECT_TAIL_SIZE", "1000") 10 | 11 | MAINNET_BLOCK_TIME = os.getenv("MAINNET_BLOCK_TIME", "1s") 12 | MAINNET_TIMEOUT = os.getenv("MAINNET_TIMEOUT", "1min") 13 | MORPH_BLOCK_TIME = os.getenv("MORPH_BLOCK_TIME", "1s") 14 | FROSTFS_CONTRACT_CACHE_TIMEOUT = os.getenv("FROSTFS_CONTRACT_CACHE_TIMEOUT", "30s") 15 | 16 | # Time interval that allows a GC pass on storage node (this includes GC sleep interval 17 | # of 1min plus 15 seconds for GC pass itself) 18 | STORAGE_GC_TIME = os.getenv("STORAGE_GC_TIME", "75s") 19 | 20 | GAS_HASH = os.getenv("GAS_HASH", "0xd2a4cff31913016155e38e474a2c06d08be276cf") 21 | 22 | FROSTFS_CONTRACT = os.getenv("FROSTFS_IR_CONTRACTS_FROSTFS") 23 | 24 | ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir") 25 | DEVENV_PATH = os.getenv("DEVENV_PATH", os.path.join("..", "frostfs-dev-env")) 26 | 27 | # Password of wallet owned by user on behalf of whom we are running tests 28 | WALLET_PASS = os.getenv("WALLET_PASS", "") 29 | 30 | 31 | # Paths to CLI executables on machine that runs tests 32 | NEOGO_EXECUTABLE = os.getenv("NEOGO_EXECUTABLE", "neo-go") 33 | FROSTFS_CLI_EXEC = os.getenv("FROSTFS_CLI_EXEC", "frostfs-cli") 34 | FROSTFS_AUTHMATE_EXEC = os.getenv("FROSTFS_AUTHMATE_EXEC", "frostfs-authmate") 35 | FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm") 36 | 37 | # Config for frostfs-adm utility. Optional if tests are running against devenv 38 | FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH") 39 | 40 | FREE_STORAGE = os.getenv("FREE_STORAGE", "false").lower() == "true" 41 | BIN_VERSIONS_FILE = os.getenv("BIN_VERSIONS_FILE") 42 | 43 | HOSTING_CONFIG_FILE = os.getenv("HOSTING_CONFIG_FILE", ".devenv.hosting.yaml") 44 | STORAGE_NODE_SERVICE_NAME_REGEX = r"s\d\d" 45 | HTTP_GATE_SERVICE_NAME_REGEX = r"http-gate\d\d" 46 | S3_GATE_SERVICE_NAME_REGEX = r"s3-gate\d\d" 47 | 48 | # Generate wallet configs 49 | # TODO: we should move all info about wallet configs to fixtures 50 | WALLET_CONFIG = os.path.join(os.getcwd(), "wallet_config.yml") 51 | with open(WALLET_CONFIG, "w") as file: 52 | yaml.dump({"password": WALLET_PASS}, file) 53 | -------------------------------------------------------------------------------- /pytest_tests/resources/files/policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "rep-3": "REP 3", 3 | "complex": "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" 4 | } -------------------------------------------------------------------------------- /pytest_tests/resources/files/s3_bearer_rules.json: -------------------------------------------------------------------------------- 1 | { 2 | "records": 3 | [ 4 | { 5 | "operation":"PUT", 6 | "action":"ALLOW", 7 | "filters":[], 8 | "targets": 9 | [ 10 | { 11 | "role":"OTHERS", 12 | "keys":[] 13 | } 14 | ] 15 | }, 16 | { 17 | "operation":"HEAD", 18 | "action":"ALLOW", 19 | "filters":[], 20 | "targets": 21 | [ 22 | { 23 | "role":"OTHERS", 24 | "keys":[] 25 | } 26 | ] 27 | }, 28 | { 29 | "operation":"DELETE", 30 | "action":"ALLOW", 31 | "filters":[], 32 | "targets": 33 | [ 34 | { 35 | "role":"OTHERS", 36 | "keys":[] 37 | } 38 | ] 39 | }, 40 | { 41 | "operation":"SEARCH", 42 | "action":"ALLOW", 43 | "filters":[], 44 | "targets": 45 | [ 46 | { 47 | "role":"OTHERS", 48 | "keys":[] 49 | } 50 | ] 51 | }, 52 | { 53 | "operation":"GET", 54 | "action":"ALLOW", 55 | "filters":[], 56 | "targets": 57 | [ 58 | { 59 | "role":"OTHERS", 60 | "keys":[] 61 | } 62 | ] 63 | }, 64 | { 65 | "operation":"GETRANGE", 66 | "action":"ALLOW", 67 | "filters":[], 68 | "targets": 69 | [ 70 | { 71 | "role":"OTHERS", 72 | "keys":[] 73 | } 74 | ] 75 | }, 76 | { 77 | "operation":"GETRANGEHASH", 78 | "action":"ALLOW", 79 | "filters":[], 80 | "targets": 81 | [ 82 | { 83 | "role":"OTHERS", 84 | "keys":[] 85 | } 86 | ] 87 | } 88 | ] 89 | } 90 | -------------------------------------------------------------------------------- /pytest_tests/resources/load_params.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # Load node parameters 4 | LOAD_NODES = os.getenv("LOAD_NODES", "").split(",") 5 | LOAD_NODE_SSH_USER = os.getenv("LOAD_NODE_SSH_USER", "root") 6 | LOAD_NODE_SSH_PRIVATE_KEY_PATH = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PATH") 7 | BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 10) 8 | BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 10) 9 | BACKGROUND_OBJ_SIZE = os.getenv("BACKGROUND_OBJ_SIZE", 1024) 10 | BACKGROUND_LOAD_MAX_TIME = os.getenv("BACKGROUND_LOAD_MAX_TIME", 600) 11 | 12 | # Load run parameters 13 | 14 | OBJ_SIZE = os.getenv("OBJ_SIZE", "1000").split(",") 15 | CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "1").split(",") 16 | OUT_FILE = os.getenv("OUT_FILE", "1mb_200.json").split(",") 17 | OBJ_COUNT = os.getenv("OBJ_COUNT", "4").split(",") 18 | WRITERS = os.getenv("WRITERS", "200").split(",") 19 | READERS = os.getenv("READER", "0").split(",") 20 | DELETERS = os.getenv("DELETERS", "0").split(",") 21 | LOAD_TIME = os.getenv("LOAD_TIME", "200").split(",") 22 | LOAD_TYPE = os.getenv("LOAD_TYPE", "grpc").split(",") 23 | LOAD_NODES_COUNT = os.getenv("LOAD_NODES_COUNT", "1").split(",") 24 | STORAGE_NODE_COUNT = os.getenv("STORAGE_NODE_COUNT", "4").split(",") 25 | CONTAINER_PLACEMENT_POLICY = os.getenv( 26 | "CONTAINER_PLACEMENT_POLICY", "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" 27 | ) 28 | -------------------------------------------------------------------------------- /pytest_tests/steps/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrueCloudLab/frostfs-testcases/565d740239dc07f53434433d4035b5933866acd3/pytest_tests/steps/__init__.py -------------------------------------------------------------------------------- /pytest_tests/steps/cluster_test_base.py: -------------------------------------------------------------------------------- 1 | import allure 2 | import pytest 3 | from frostfs_testlib.shell import Shell 4 | 5 | from pytest_tests.helpers import epoch 6 | from pytest_tests.helpers.cluster import Cluster 7 | 8 | 9 | # To skip adding every mandatory singleton dependency to EACH test function 10 | class ClusterTestBase: 11 | shell: Shell 12 | cluster: Cluster 13 | 14 | @pytest.fixture(scope="session", autouse=True) 15 | def fill_mandatory_dependencies(self, cluster: Cluster, client_shell: Shell): 16 | ClusterTestBase.shell = client_shell 17 | ClusterTestBase.cluster = cluster 18 | yield 19 | 20 | @allure.title("Tick {epochs_to_tick} epochs") 21 | def tick_epochs(self, epochs_to_tick: int): 22 | for _ in range(epochs_to_tick): 23 | self.tick_epoch() 24 | 25 | def tick_epoch(self): 26 | epoch.tick_epoch(self.shell, self.cluster) 27 | 28 | def wait_for_epochs_align(self): 29 | epoch.wait_for_epochs_align(self.shell, self.cluster) 30 | 31 | def get_epoch(self): 32 | return epoch.get_epoch(self.shell, self.cluster) 33 | 34 | def ensure_fresh_epoch(self): 35 | return epoch.ensure_fresh_epoch(self.shell, self.cluster) 36 | -------------------------------------------------------------------------------- /pytest_tests/steps/load.py: -------------------------------------------------------------------------------- 1 | import concurrent.futures 2 | import re 3 | from dataclasses import asdict 4 | 5 | import allure 6 | from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate 7 | from frostfs_testlib.cli.neogo import NeoGo 8 | from frostfs_testlib.hosting import Hosting 9 | from frostfs_testlib.shell import CommandOptions, SSHShell 10 | from frostfs_testlib.shell.interfaces import InteractiveInput 11 | 12 | from pytest_tests.helpers.k6 import K6, LoadParams, LoadResults 13 | from pytest_tests.resources.common import STORAGE_NODE_SERVICE_NAME_REGEX 14 | 15 | FROSTFS_AUTHMATE_PATH = "frostfs-authmate" 16 | STOPPED_HOSTS = [] 17 | 18 | 19 | @allure.title("Get services endpoints") 20 | def get_services_endpoints( 21 | hosting: Hosting, service_name_regex: str, endpoint_attribute: str 22 | ) -> list[str]: 23 | service_configs = hosting.find_service_configs(service_name_regex) 24 | return [service_config.attributes[endpoint_attribute] for service_config in service_configs] 25 | 26 | 27 | @allure.title("Stop nodes") 28 | def stop_unused_nodes(storage_nodes: list, used_nodes_count: int): 29 | for node in storage_nodes[used_nodes_count:]: 30 | host = node.host 31 | STOPPED_HOSTS.append(host) 32 | host.stop_host("hard") 33 | 34 | 35 | @allure.title("Start nodes") 36 | def start_stopped_nodes(): 37 | for host in STOPPED_HOSTS: 38 | host.start_host() 39 | STOPPED_HOSTS.remove(host) 40 | 41 | 42 | @allure.title("Init s3 client") 43 | def init_s3_client( 44 | load_nodes: list, login: str, pkey: str, container_placement_policy: str, hosting: Hosting 45 | ): 46 | service_configs = hosting.find_service_configs(STORAGE_NODE_SERVICE_NAME_REGEX) 47 | host = hosting.get_host_by_service(service_configs[0].name) 48 | wallet_path = service_configs[0].attributes["wallet_path"] 49 | neogo_cli_config = host.get_cli_config("neo-go") 50 | neogo_wallet = NeoGo(shell=host.get_shell(), neo_go_exec_path=neogo_cli_config.exec_path).wallet 51 | dump_keys_output = neogo_wallet.dump_keys(wallet=wallet_path, wallet_config=None).stdout 52 | public_key = str(re.search(r":\n(?P.*)", dump_keys_output).group("public_key")) 53 | node_endpoint = service_configs[0].attributes["rpc_endpoint"] 54 | # prompt_pattern doesn't work at the moment 55 | for load_node in load_nodes: 56 | ssh_client = SSHShell(host=load_node, login=login, private_key_path=pkey) 57 | path = ssh_client.exec(r"sudo find . -name 'k6' -exec dirname {} \; -quit").stdout.strip( 58 | "\n" 59 | ) 60 | frostfs_authmate_exec = FrostfsAuthmate(ssh_client, FROSTFS_AUTHMATE_PATH) 61 | issue_secret_output = frostfs_authmate_exec.secret.issue( 62 | wallet=f"{path}/scenarios/files/wallet.json", 63 | peer=node_endpoint, 64 | bearer_rules=f"{path}/scenarios/files/rules.json", 65 | gate_public_key=public_key, 66 | container_placement_policy=container_placement_policy, 67 | container_policy=f"{path}/scenarios/files/policy.json", 68 | wallet_password="", 69 | ).stdout 70 | aws_access_key_id = str( 71 | re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( 72 | "aws_access_key_id" 73 | ) 74 | ) 75 | aws_secret_access_key = str( 76 | re.search( 77 | r"secret_access_key.*:\s.(?P\w*)", issue_secret_output 78 | ).group("aws_secret_access_key") 79 | ) 80 | # prompt_pattern doesn't work at the moment 81 | configure_input = [ 82 | InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), 83 | InteractiveInput( 84 | prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key 85 | ), 86 | InteractiveInput(prompt_pattern=r".*", input=""), 87 | InteractiveInput(prompt_pattern=r".*", input=""), 88 | ] 89 | ssh_client.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) 90 | 91 | 92 | @allure.title("Clear cache and data from storage nodes") 93 | def clear_cache_and_data(hosting: Hosting): 94 | service_configs = hosting.find_service_configs(STORAGE_NODE_SERVICE_NAME_REGEX) 95 | for service_config in service_configs: 96 | host = hosting.get_host_by_service(service_config.name) 97 | host.stop_service(service_config.name) 98 | host.delete_storage_node_data(service_config.name) 99 | host.start_service(service_config.name) 100 | 101 | 102 | @allure.title("Prepare objects") 103 | def prepare_objects(k6_instance: K6): 104 | k6_instance.prepare() 105 | 106 | 107 | @allure.title("Prepare K6 instances and objects") 108 | def prepare_k6_instances( 109 | load_nodes: list, login: str, pkey: str, load_params: LoadParams, prepare: bool = True 110 | ) -> list[K6]: 111 | k6_load_objects = [] 112 | for load_node in load_nodes: 113 | ssh_client = SSHShell(host=load_node, login=login, private_key_path=pkey) 114 | k6_load_object = K6(load_params, ssh_client) 115 | k6_load_objects.append(k6_load_object) 116 | for k6_load_object in k6_load_objects: 117 | if prepare: 118 | with allure.step("Prepare objects"): 119 | prepare_objects(k6_load_object) 120 | return k6_load_objects 121 | 122 | 123 | @allure.title("Run K6") 124 | def run_k6_load(k6_instance: K6) -> LoadResults: 125 | with allure.step("Executing load"): 126 | k6_instance.start() 127 | k6_instance.wait_until_finished(k6_instance.load_params.load_time * 2) 128 | with allure.step("Printing results"): 129 | k6_instance.get_k6_results() 130 | return k6_instance.parsing_results() 131 | 132 | 133 | @allure.title("MultiNode K6 Run") 134 | def multi_node_k6_run(k6_instances: list) -> dict: 135 | results = [] 136 | avg_results = {} 137 | with concurrent.futures.ThreadPoolExecutor() as executor: 138 | futures = [] 139 | for k6_instance in k6_instances: 140 | futures.append(executor.submit(run_k6_load, k6_instance)) 141 | for future in concurrent.futures.as_completed(futures): 142 | results.append(asdict(future.result())) 143 | for k6_result in results: 144 | for key in k6_result: 145 | try: 146 | avg_results[key] += k6_result[key] / len(results) 147 | except KeyError: 148 | avg_results[key] = k6_result[key] / len(results) 149 | return avg_results 150 | 151 | 152 | @allure.title("Compare results") 153 | def compare_load_results(result: dict, result_new: dict): 154 | for key in result: 155 | if result[key] != 0 and result_new[key] != 0: 156 | if (abs(result[key] - result_new[key]) / min(result[key], result_new[key])) < 0.25: 157 | continue 158 | else: 159 | raise AssertionError(f"Difference in {key} values more than 25%") 160 | elif result[key] == 0 and result_new[key] == 0: 161 | continue 162 | else: 163 | raise AssertionError(f"Unexpected zero value in {key}") 164 | -------------------------------------------------------------------------------- /pytest_tests/steps/storage_object.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from time import sleep 3 | 4 | import allure 5 | import pytest 6 | from frostfs_testlib.resources.common import OBJECT_ALREADY_REMOVED 7 | from frostfs_testlib.shell import Shell 8 | 9 | from pytest_tests.helpers.cluster import Cluster 10 | from pytest_tests.helpers.epoch import tick_epoch 11 | from pytest_tests.helpers.frostfs_verbs import delete_object, get_object 12 | from pytest_tests.helpers.storage_object_info import StorageObjectInfo 13 | from pytest_tests.helpers.tombstone import verify_head_tombstone 14 | 15 | logger = logging.getLogger("NeoLogger") 16 | 17 | CLEANUP_TIMEOUT = 10 18 | 19 | 20 | @allure.step("Delete Objects") 21 | def delete_objects( 22 | storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster 23 | ) -> None: 24 | """ 25 | Deletes given storage objects. 26 | 27 | Args: 28 | storage_objects: list of objects to delete 29 | shell: executor for cli command 30 | """ 31 | 32 | with allure.step("Delete objects"): 33 | for storage_object in storage_objects: 34 | storage_object.tombstone = delete_object( 35 | storage_object.wallet_file_path, 36 | storage_object.cid, 37 | storage_object.oid, 38 | shell=shell, 39 | endpoint=cluster.default_rpc_endpoint, 40 | ) 41 | verify_head_tombstone( 42 | wallet_path=storage_object.wallet_file_path, 43 | cid=storage_object.cid, 44 | oid_ts=storage_object.tombstone, 45 | oid=storage_object.oid, 46 | shell=shell, 47 | endpoint=cluster.default_rpc_endpoint, 48 | ) 49 | 50 | tick_epoch(shell, cluster) 51 | sleep(CLEANUP_TIMEOUT) 52 | 53 | with allure.step("Get objects and check errors"): 54 | for storage_object in storage_objects: 55 | with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED): 56 | get_object( 57 | storage_object.wallet_file_path, 58 | storage_object.cid, 59 | storage_object.oid, 60 | shell=shell, 61 | endpoint=cluster.default_rpc_endpoint, 62 | ) 63 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrueCloudLab/frostfs-testcases/565d740239dc07f53434433d4035b5933866acd3/pytest_tests/testsuites/__init__.py -------------------------------------------------------------------------------- /pytest_tests/testsuites/acl/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | from dataclasses import dataclass 4 | from typing import Optional 5 | 6 | import allure 7 | import pytest 8 | from frostfs_testlib.resources.common import PUBLIC_ACL 9 | from frostfs_testlib.shell import Shell 10 | from frostfs_testlib.utils import wallet_utils 11 | 12 | from pytest_tests.helpers.acl import EACLRole 13 | from pytest_tests.helpers.cluster import Cluster 14 | from pytest_tests.helpers.container import create_container 15 | from pytest_tests.helpers.file_helper import generate_file 16 | from pytest_tests.helpers.frostfs_verbs import put_object_to_random_node 17 | from pytest_tests.resources.common import WALLET_CONFIG, WALLET_PASS 18 | 19 | OBJECT_COUNT = 5 20 | 21 | 22 | @dataclass 23 | class Wallet: 24 | wallet_path: Optional[str] = None 25 | config_path: Optional[str] = None 26 | 27 | 28 | @dataclass 29 | class Wallets: 30 | wallets: dict[EACLRole, list[Wallet]] 31 | 32 | def get_wallet(self, role: EACLRole = EACLRole.USER) -> Wallet: 33 | return self.wallets[role][0] 34 | 35 | def get_wallets_list(self, role: EACLRole = EACLRole.USER) -> list[Wallet]: 36 | return self.wallets[role] 37 | 38 | 39 | @pytest.fixture(scope="module") 40 | def wallets(default_wallet, temp_directory, cluster: Cluster) -> Wallets: 41 | other_wallets_paths = [ 42 | os.path.join(temp_directory, f"{str(uuid.uuid4())}.json") for _ in range(2) 43 | ] 44 | for other_wallet_path in other_wallets_paths: 45 | wallet_utils.init_wallet(other_wallet_path, WALLET_PASS) 46 | 47 | ir_node = cluster.ir_nodes[0] 48 | storage_node = cluster.storage_nodes[0] 49 | 50 | ir_wallet_path = ir_node.get_wallet_path() 51 | ir_wallet_config = ir_node.get_wallet_config_path() 52 | 53 | storage_wallet_path = storage_node.get_wallet_path() 54 | storage_wallet_config = storage_node.get_wallet_config_path() 55 | 56 | yield Wallets( 57 | wallets={ 58 | EACLRole.USER: [Wallet(wallet_path=default_wallet, config_path=WALLET_CONFIG)], 59 | EACLRole.OTHERS: [ 60 | Wallet(wallet_path=other_wallet_path, config_path=WALLET_CONFIG) 61 | for other_wallet_path in other_wallets_paths 62 | ], 63 | EACLRole.SYSTEM: [ 64 | Wallet(wallet_path=ir_wallet_path, config_path=ir_wallet_config), 65 | Wallet(wallet_path=storage_wallet_path, config_path=storage_wallet_config), 66 | ], 67 | } 68 | ) 69 | 70 | 71 | @pytest.fixture(scope="module") 72 | def file_path(simple_object_size): 73 | yield generate_file(simple_object_size) 74 | 75 | 76 | @pytest.fixture(scope="function") 77 | def eacl_container_with_objects( 78 | wallets: Wallets, client_shell: Shell, cluster: Cluster, file_path: str 79 | ): 80 | user_wallet = wallets.get_wallet() 81 | with allure.step("Create eACL public container"): 82 | cid = create_container( 83 | user_wallet.wallet_path, 84 | basic_acl=PUBLIC_ACL, 85 | shell=client_shell, 86 | endpoint=cluster.default_rpc_endpoint, 87 | ) 88 | 89 | with allure.step("Add test objects to container"): 90 | objects_oids = [ 91 | put_object_to_random_node( 92 | user_wallet.wallet_path, 93 | file_path, 94 | cid, 95 | attributes={"key1": "val1", "key": val, "key2": "abc"}, 96 | shell=client_shell, 97 | cluster=cluster, 98 | ) 99 | for val in range(OBJECT_COUNT) 100 | ] 101 | 102 | yield cid, objects_oids, file_path 103 | 104 | # with allure.step('Delete eACL public container'): 105 | # delete_container(user_wallet, cid) 106 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/acl/test_acl.py: -------------------------------------------------------------------------------- 1 | import allure 2 | import pytest 3 | from frostfs_testlib.resources.common import PRIVATE_ACL_F, PUBLIC_ACL_F, READONLY_ACL_F 4 | 5 | from pytest_tests.helpers.acl import EACLRole 6 | from pytest_tests.helpers.container import create_container 7 | from pytest_tests.helpers.container_access import ( 8 | check_full_access_to_container, 9 | check_no_access_to_container, 10 | check_read_only_container, 11 | ) 12 | from pytest_tests.helpers.frostfs_verbs import put_object_to_random_node 13 | from pytest_tests.steps.cluster_test_base import ClusterTestBase 14 | 15 | 16 | @pytest.mark.sanity 17 | @pytest.mark.smoke 18 | @pytest.mark.acl 19 | @pytest.mark.acl_basic 20 | class TestACLBasic(ClusterTestBase): 21 | @pytest.fixture(scope="function") 22 | def public_container(self, wallets): 23 | user_wallet = wallets.get_wallet() 24 | with allure.step("Create public container"): 25 | cid_public = create_container( 26 | user_wallet.wallet_path, 27 | basic_acl=PUBLIC_ACL_F, 28 | shell=self.shell, 29 | endpoint=self.cluster.default_rpc_endpoint, 30 | ) 31 | 32 | yield cid_public 33 | 34 | # with allure.step('Delete public container'): 35 | # delete_container(user_wallet.wallet_path, cid_public) 36 | 37 | @pytest.fixture(scope="function") 38 | def private_container(self, wallets): 39 | user_wallet = wallets.get_wallet() 40 | with allure.step("Create private container"): 41 | cid_private = create_container( 42 | user_wallet.wallet_path, 43 | basic_acl=PRIVATE_ACL_F, 44 | shell=self.shell, 45 | endpoint=self.cluster.default_rpc_endpoint, 46 | ) 47 | 48 | yield cid_private 49 | 50 | # with allure.step('Delete private container'): 51 | # delete_container(user_wallet.wallet_path, cid_private) 52 | 53 | @pytest.fixture(scope="function") 54 | def read_only_container(self, wallets): 55 | user_wallet = wallets.get_wallet() 56 | with allure.step("Create public readonly container"): 57 | cid_read_only = create_container( 58 | user_wallet.wallet_path, 59 | basic_acl=READONLY_ACL_F, 60 | shell=self.shell, 61 | endpoint=self.cluster.default_rpc_endpoint, 62 | ) 63 | 64 | yield cid_read_only 65 | 66 | # with allure.step('Delete public readonly container'): 67 | # delete_container(user_wallet.wallet_path, cid_read_only) 68 | 69 | @allure.title("Test basic ACL on public container") 70 | def test_basic_acl_public(self, wallets, public_container, file_path): 71 | """ 72 | Test basic ACL set during public container creation. 73 | """ 74 | user_wallet = wallets.get_wallet() 75 | other_wallet = wallets.get_wallet(role=EACLRole.OTHERS) 76 | cid = public_container 77 | for wallet, desc in ((user_wallet, "owner"), (other_wallet, "other users")): 78 | with allure.step("Add test objects to container"): 79 | # We create new objects for each wallet because check_full_access_to_container 80 | # deletes the object 81 | owner_object_oid = put_object_to_random_node( 82 | user_wallet.wallet_path, 83 | file_path, 84 | cid, 85 | shell=self.shell, 86 | cluster=self.cluster, 87 | attributes={"created": "owner"}, 88 | ) 89 | other_object_oid = put_object_to_random_node( 90 | other_wallet.wallet_path, 91 | file_path, 92 | cid, 93 | shell=self.shell, 94 | cluster=self.cluster, 95 | attributes={"created": "other"}, 96 | ) 97 | with allure.step(f"Check {desc} has full access to public container"): 98 | check_full_access_to_container( 99 | wallet.wallet_path, 100 | cid, 101 | owner_object_oid, 102 | file_path, 103 | shell=self.shell, 104 | cluster=self.cluster, 105 | ) 106 | check_full_access_to_container( 107 | wallet.wallet_path, 108 | cid, 109 | other_object_oid, 110 | file_path, 111 | shell=self.shell, 112 | cluster=self.cluster, 113 | ) 114 | 115 | @allure.title("Test basic ACL on private container") 116 | def test_basic_acl_private(self, wallets, private_container, file_path): 117 | """ 118 | Test basic ACL set during private container creation. 119 | """ 120 | user_wallet = wallets.get_wallet() 121 | other_wallet = wallets.get_wallet(role=EACLRole.OTHERS) 122 | cid = private_container 123 | with allure.step("Add test objects to container"): 124 | owner_object_oid = put_object_to_random_node( 125 | user_wallet.wallet_path, file_path, cid, shell=self.shell, cluster=self.cluster 126 | ) 127 | 128 | with allure.step("Check only owner has full access to private container"): 129 | with allure.step("Check no one except owner has access to operations with container"): 130 | check_no_access_to_container( 131 | other_wallet.wallet_path, 132 | cid, 133 | owner_object_oid, 134 | file_path, 135 | shell=self.shell, 136 | cluster=self.cluster, 137 | ) 138 | 139 | with allure.step("Check owner has full access to private container"): 140 | check_full_access_to_container( 141 | user_wallet.wallet_path, 142 | cid, 143 | owner_object_oid, 144 | file_path, 145 | shell=self.shell, 146 | cluster=self.cluster, 147 | ) 148 | 149 | @allure.title("Test basic ACL on readonly container") 150 | def test_basic_acl_readonly(self, wallets, client_shell, read_only_container, file_path): 151 | """ 152 | Test basic ACL Operations for Read-Only Container. 153 | """ 154 | user_wallet = wallets.get_wallet() 155 | other_wallet = wallets.get_wallet(role=EACLRole.OTHERS) 156 | cid = read_only_container 157 | 158 | with allure.step("Add test objects to container"): 159 | object_oid = put_object_to_random_node( 160 | user_wallet.wallet_path, file_path, cid, shell=client_shell, cluster=self.cluster 161 | ) 162 | 163 | with allure.step("Check other has read-only access to operations with container"): 164 | check_read_only_container( 165 | other_wallet.wallet_path, 166 | cid, 167 | object_oid, 168 | file_path, 169 | shell=client_shell, 170 | cluster=self.cluster, 171 | ) 172 | 173 | with allure.step("Check owner has full access to public container"): 174 | check_full_access_to_container( 175 | user_wallet.wallet_path, 176 | cid, 177 | object_oid, 178 | file_path, 179 | shell=client_shell, 180 | cluster=self.cluster, 181 | ) 182 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/container/test_container.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import allure 4 | import pytest 5 | from frostfs_testlib.resources.common import PRIVATE_ACL_F 6 | 7 | from pytest_tests.helpers.container import ( 8 | create_container, 9 | delete_container, 10 | get_container, 11 | list_containers, 12 | wait_for_container_creation, 13 | wait_for_container_deletion, 14 | ) 15 | from pytest_tests.helpers.utility import placement_policy_from_container 16 | from pytest_tests.steps.cluster_test_base import ClusterTestBase 17 | 18 | 19 | @pytest.mark.container 20 | @pytest.mark.sanity 21 | @pytest.mark.container 22 | class TestContainer(ClusterTestBase): 23 | @pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"]) 24 | @pytest.mark.smoke 25 | def test_container_creation(self, default_wallet, name): 26 | scenario_title = f"with name {name}" if name else "without name" 27 | allure.dynamic.title(f"User can create container {scenario_title}") 28 | 29 | wallet = default_wallet 30 | with open(wallet) as file: 31 | json_wallet = json.load(file) 32 | 33 | placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X" 34 | cid = create_container( 35 | wallet, 36 | rule=placement_rule, 37 | name=name, 38 | shell=self.shell, 39 | endpoint=self.cluster.default_rpc_endpoint, 40 | ) 41 | 42 | containers = list_containers( 43 | wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint 44 | ) 45 | assert cid in containers, f"Expected container {cid} in containers: {containers}" 46 | 47 | container_info: str = get_container( 48 | wallet, 49 | cid, 50 | json_mode=False, 51 | shell=self.shell, 52 | endpoint=self.cluster.default_rpc_endpoint, 53 | ) 54 | container_info = ( 55 | container_info.casefold() 56 | ) # To ignore case when comparing with expected values 57 | 58 | info_to_check = { 59 | f"basic ACL: {PRIVATE_ACL_F} (private)", 60 | f"owner ID: {json_wallet.get('accounts')[0].get('address')}", 61 | f"container ID: {cid}", 62 | } 63 | if name: 64 | info_to_check.add(f"Name={name}") 65 | 66 | with allure.step("Check container has correct information"): 67 | expected_policy = placement_rule.casefold() 68 | actual_policy = placement_policy_from_container(container_info) 69 | assert ( 70 | actual_policy == expected_policy 71 | ), f"Expected policy\n{expected_policy} but got policy\n{actual_policy}" 72 | 73 | for info in info_to_check: 74 | expected_info = info.casefold() 75 | assert ( 76 | expected_info in container_info 77 | ), f"Expected {expected_info} in container info:\n{container_info}" 78 | 79 | with allure.step("Delete container and check it was deleted"): 80 | delete_container( 81 | wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint 82 | ) 83 | self.tick_epoch() 84 | wait_for_container_deletion( 85 | wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint 86 | ) 87 | 88 | @allure.title("Parallel container creation and deletion") 89 | def test_container_creation_deletion_parallel(self, default_wallet): 90 | containers_count = 3 91 | wallet = default_wallet 92 | placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X" 93 | 94 | cids: list[str] = [] 95 | with allure.step(f"Create {containers_count} containers"): 96 | for _ in range(containers_count): 97 | cids.append( 98 | create_container( 99 | wallet, 100 | rule=placement_rule, 101 | await_mode=False, 102 | shell=self.shell, 103 | endpoint=self.cluster.default_rpc_endpoint, 104 | wait_for_creation=False, 105 | ) 106 | ) 107 | 108 | with allure.step(f"Wait for containers occur in container list"): 109 | for cid in cids: 110 | wait_for_container_creation( 111 | wallet, 112 | cid, 113 | sleep_interval=containers_count, 114 | shell=self.shell, 115 | endpoint=self.cluster.default_rpc_endpoint, 116 | ) 117 | 118 | with allure.step("Delete containers and check they were deleted"): 119 | for cid in cids: 120 | delete_container( 121 | wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint 122 | ) 123 | self.tick_epoch() 124 | wait_for_container_deletion( 125 | wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint 126 | ) 127 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/failovers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TrueCloudLab/frostfs-testcases/565d740239dc07f53434433d4035b5933866acd3/pytest_tests/testsuites/failovers/__init__.py -------------------------------------------------------------------------------- /pytest_tests/testsuites/failovers/test_failover_network.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from random import choices 3 | from time import sleep 4 | 5 | import allure 6 | import pytest 7 | from frostfs_testlib.resources.common import PUBLIC_ACL 8 | 9 | from pytest_tests.helpers.cluster import StorageNode 10 | from pytest_tests.helpers.container import create_container 11 | from pytest_tests.helpers.failover_utils import ( 12 | wait_all_storage_nodes_returned, 13 | wait_object_replication, 14 | ) 15 | from pytest_tests.helpers.file_helper import generate_file, get_file_hash 16 | from pytest_tests.helpers.frostfs_verbs import get_object, put_object_to_random_node 17 | from pytest_tests.helpers.iptables_helper import IpTablesHelper 18 | from pytest_tests.steps.cluster_test_base import ClusterTestBase 19 | 20 | logger = logging.getLogger("NeoLogger") 21 | STORAGE_NODE_COMMUNICATION_PORT = "8080" 22 | STORAGE_NODE_COMMUNICATION_PORT_TLS = "8082" 23 | PORTS_TO_BLOCK = [STORAGE_NODE_COMMUNICATION_PORT, STORAGE_NODE_COMMUNICATION_PORT_TLS] 24 | blocked_nodes: list[StorageNode] = [] 25 | 26 | 27 | @pytest.mark.failover 28 | @pytest.mark.failover_network 29 | class TestFailoverNetwork(ClusterTestBase): 30 | @pytest.fixture(autouse=True) 31 | @allure.step("Restore network") 32 | def restore_network(self): 33 | yield 34 | 35 | not_empty = len(blocked_nodes) != 0 36 | for node in list(blocked_nodes): 37 | with allure.step(f"Restore network at host for {node.label}"): 38 | IpTablesHelper.restore_input_traffic_to_port(node.host.get_shell(), PORTS_TO_BLOCK) 39 | blocked_nodes.remove(node) 40 | if not_empty: 41 | wait_all_storage_nodes_returned(self.cluster) 42 | 43 | @allure.title("Block Storage node traffic") 44 | def test_block_storage_node_traffic( 45 | self, default_wallet, require_multiple_hosts, simple_object_size 46 | ): 47 | """ 48 | Block storage nodes traffic using iptables and wait for replication for objects. 49 | """ 50 | wallet = default_wallet 51 | placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" 52 | wakeup_node_timeout = 10 # timeout to let nodes detect that traffic has blocked 53 | nodes_to_block_count = 2 54 | 55 | source_file_path = generate_file(simple_object_size) 56 | cid = create_container( 57 | wallet, 58 | shell=self.shell, 59 | endpoint=self.cluster.default_rpc_endpoint, 60 | rule=placement_rule, 61 | basic_acl=PUBLIC_ACL, 62 | ) 63 | oid = put_object_to_random_node( 64 | wallet, source_file_path, cid, shell=self.shell, cluster=self.cluster 65 | ) 66 | 67 | nodes = wait_object_replication( 68 | cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes 69 | ) 70 | 71 | logger.info(f"Nodes are {nodes}") 72 | nodes_to_block = nodes 73 | if nodes_to_block_count > len(nodes): 74 | # TODO: the intent of this logic is not clear, need to revisit 75 | nodes_to_block = choices(nodes, k=2) 76 | 77 | excluded_nodes = [] 78 | for node in nodes_to_block: 79 | with allure.step(f"Block incoming traffic at node {node} on port {PORTS_TO_BLOCK}"): 80 | blocked_nodes.append(node) 81 | excluded_nodes.append(node) 82 | IpTablesHelper.drop_input_traffic_to_port(node.host.get_shell(), PORTS_TO_BLOCK) 83 | sleep(wakeup_node_timeout) 84 | 85 | with allure.step(f"Check object is not stored on node {node}"): 86 | new_nodes = wait_object_replication( 87 | cid, 88 | oid, 89 | 2, 90 | shell=self.shell, 91 | nodes=list(set(self.cluster.storage_nodes) - set(excluded_nodes)), 92 | ) 93 | assert node not in new_nodes 94 | 95 | with allure.step(f"Check object data is not corrupted"): 96 | got_file_path = get_object( 97 | wallet, cid, oid, endpoint=new_nodes[0].get_rpc_endpoint(), shell=self.shell 98 | ) 99 | assert get_file_hash(source_file_path) == get_file_hash(got_file_path) 100 | 101 | for node in nodes_to_block: 102 | with allure.step(f"Unblock incoming traffic at host {node} on port {PORTS_TO_BLOCK}"): 103 | IpTablesHelper.restore_input_traffic_to_port(node.host.get_shell(), PORTS_TO_BLOCK) 104 | blocked_nodes.remove(node) 105 | sleep(wakeup_node_timeout) 106 | 107 | with allure.step(f"Check object data is not corrupted"): 108 | new_nodes = wait_object_replication( 109 | cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes 110 | ) 111 | 112 | got_file_path = get_object( 113 | wallet, cid, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint() 114 | ) 115 | assert get_file_hash(source_file_path) == get_file_hash(got_file_path) 116 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/failovers/test_failover_storage.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import allure 4 | import pytest 5 | from frostfs_testlib.hosting import Host 6 | from frostfs_testlib.resources.common import PUBLIC_ACL 7 | from frostfs_testlib.shell import CommandOptions 8 | 9 | from pytest_tests.helpers.cluster import Cluster, StorageNode 10 | from pytest_tests.helpers.container import create_container 11 | from pytest_tests.helpers.failover_utils import ( 12 | wait_all_storage_nodes_returned, 13 | wait_object_replication, 14 | ) 15 | from pytest_tests.helpers.file_helper import generate_file, get_file_hash 16 | from pytest_tests.helpers.frostfs_verbs import get_object, put_object_to_random_node 17 | from pytest_tests.steps.cluster_test_base import ClusterTestBase 18 | 19 | logger = logging.getLogger("NeoLogger") 20 | stopped_nodes: list[StorageNode] = [] 21 | 22 | 23 | @pytest.fixture(scope="function", autouse=True) 24 | @allure.step("Return all stopped hosts") 25 | def after_run_return_all_stopped_hosts(cluster: Cluster): 26 | yield 27 | return_stopped_hosts(cluster) 28 | 29 | 30 | def panic_reboot_host(host: Host) -> None: 31 | shell = host.get_shell() 32 | shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') 33 | 34 | options = CommandOptions(close_stdin=True, timeout=1, check=False) 35 | shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options) 36 | 37 | 38 | def return_stopped_hosts(cluster: Cluster) -> None: 39 | for node in list(stopped_nodes): 40 | with allure.step(f"Start host {node}"): 41 | node.host.start_host() 42 | stopped_nodes.remove(node) 43 | 44 | wait_all_storage_nodes_returned(cluster) 45 | 46 | 47 | @pytest.mark.failover 48 | class TestFailoverStorage(ClusterTestBase): 49 | @allure.title("Lose and return storage node's host") 50 | @pytest.mark.parametrize("hard_reboot", [True, False]) 51 | @pytest.mark.failover_reboot 52 | def test_lose_storage_node_host( 53 | self, default_wallet, hard_reboot: bool, require_multiple_hosts, simple_object_size 54 | ): 55 | wallet = default_wallet 56 | placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" 57 | source_file_path = generate_file(simple_object_size) 58 | cid = create_container( 59 | wallet, 60 | shell=self.shell, 61 | endpoint=self.cluster.default_rpc_endpoint, 62 | rule=placement_rule, 63 | basic_acl=PUBLIC_ACL, 64 | ) 65 | oid = put_object_to_random_node( 66 | wallet, source_file_path, cid, shell=self.shell, cluster=self.cluster 67 | ) 68 | nodes = wait_object_replication( 69 | cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes 70 | ) 71 | 72 | for node in nodes: 73 | stopped_nodes.append(node) 74 | 75 | with allure.step(f"Stop host {node}"): 76 | node.host.stop_host("hard" if hard_reboot else "soft") 77 | 78 | new_nodes = wait_object_replication( 79 | cid, 80 | oid, 81 | 2, 82 | shell=self.shell, 83 | nodes=list(set(self.cluster.storage_nodes) - {node}), 84 | ) 85 | assert all(old_node not in new_nodes for old_node in nodes) 86 | 87 | with allure.step("Check object data is not corrupted"): 88 | got_file_path = get_object( 89 | wallet, cid, oid, endpoint=new_nodes[0].get_rpc_endpoint(), shell=self.shell 90 | ) 91 | assert get_file_hash(source_file_path) == get_file_hash(got_file_path) 92 | 93 | with allure.step("Return all hosts"): 94 | return_stopped_hosts(self.cluster) 95 | 96 | with allure.step("Check object data is not corrupted"): 97 | new_nodes = wait_object_replication( 98 | cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes 99 | ) 100 | got_file_path = get_object( 101 | wallet, cid, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint() 102 | ) 103 | assert get_file_hash(source_file_path) == get_file_hash(got_file_path) 104 | 105 | @allure.title("Panic storage node's host") 106 | @pytest.mark.parametrize("sequence", [True, False]) 107 | @pytest.mark.failover_panic 108 | def test_panic_storage_node_host( 109 | self, default_wallet, require_multiple_hosts, sequence: bool, simple_object_size 110 | ): 111 | wallet = default_wallet 112 | placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" 113 | source_file_path = generate_file(simple_object_size) 114 | cid = create_container( 115 | wallet, 116 | shell=self.shell, 117 | endpoint=self.cluster.default_rpc_endpoint, 118 | rule=placement_rule, 119 | basic_acl=PUBLIC_ACL, 120 | ) 121 | oid = put_object_to_random_node( 122 | wallet, source_file_path, cid, shell=self.shell, cluster=self.cluster 123 | ) 124 | 125 | nodes = wait_object_replication( 126 | cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes 127 | ) 128 | allure.attach( 129 | "\n".join([str(node) for node in nodes]), 130 | "Current nodes with object", 131 | allure.attachment_type.TEXT, 132 | ) 133 | 134 | new_nodes: list[StorageNode] = [] 135 | for node in nodes: 136 | with allure.step(f"Hard reboot host {node} via magic SysRq option"): 137 | panic_reboot_host(node.host) 138 | if sequence: 139 | try: 140 | new_nodes = wait_object_replication( 141 | cid, 142 | oid, 143 | 2, 144 | shell=self.shell, 145 | nodes=list(set(self.cluster.storage_nodes) - {node}), 146 | ) 147 | except AssertionError: 148 | new_nodes = wait_object_replication( 149 | cid, 150 | oid, 151 | 2, 152 | shell=self.shell, 153 | nodes=self.cluster.storage_nodes, 154 | ) 155 | 156 | allure.attach( 157 | "\n".join([str(new_node) for new_node in new_nodes]), 158 | f"Nodes with object after {node} fail", 159 | allure.attachment_type.TEXT, 160 | ) 161 | 162 | if not sequence: 163 | new_nodes = wait_object_replication( 164 | cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes 165 | ) 166 | allure.attach( 167 | "\n".join([str(new_node) for new_node in new_nodes]), 168 | "Nodes with object after nodes fail", 169 | allure.attachment_type.TEXT, 170 | ) 171 | 172 | got_file_path = get_object( 173 | wallet, cid, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint() 174 | ) 175 | assert get_file_hash(source_file_path) == get_file_hash(got_file_path) 176 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/load/test_load.py: -------------------------------------------------------------------------------- 1 | import allure 2 | import pytest 3 | from frostfs_testlib.hosting import Hosting 4 | 5 | from pytest_tests.helpers.k6 import LoadParams 6 | from pytest_tests.resources.common import ( 7 | HTTP_GATE_SERVICE_NAME_REGEX, 8 | S3_GATE_SERVICE_NAME_REGEX, 9 | STORAGE_NODE_SERVICE_NAME_REGEX, 10 | ) 11 | from pytest_tests.resources.load_params import ( 12 | CONTAINER_PLACEMENT_POLICY, 13 | CONTAINERS_COUNT, 14 | DELETERS, 15 | LOAD_NODE_SSH_PRIVATE_KEY_PATH, 16 | LOAD_NODE_SSH_USER, 17 | LOAD_NODES, 18 | LOAD_NODES_COUNT, 19 | LOAD_TIME, 20 | LOAD_TYPE, 21 | OBJ_COUNT, 22 | OBJ_SIZE, 23 | OUT_FILE, 24 | READERS, 25 | STORAGE_NODE_COUNT, 26 | WRITERS, 27 | ) 28 | from pytest_tests.steps.cluster_test_base import ClusterTestBase 29 | from pytest_tests.steps.load import ( 30 | clear_cache_and_data, 31 | get_services_endpoints, 32 | init_s3_client, 33 | multi_node_k6_run, 34 | prepare_k6_instances, 35 | start_stopped_nodes, 36 | stop_unused_nodes, 37 | ) 38 | 39 | ENDPOINTS_ATTRIBUTES = { 40 | "http": {"regex": HTTP_GATE_SERVICE_NAME_REGEX, "endpoint_attribute": "endpoint"}, 41 | "grpc": {"regex": STORAGE_NODE_SERVICE_NAME_REGEX, "endpoint_attribute": "rpc_endpoint"}, 42 | "s3": {"regex": S3_GATE_SERVICE_NAME_REGEX, "endpoint_attribute": "endpoint"}, 43 | } 44 | 45 | 46 | @pytest.mark.load 47 | class TestLoad(ClusterTestBase): 48 | @pytest.fixture(autouse=True) 49 | def clear_cache_and_data(self, hosting: Hosting): 50 | clear_cache_and_data(hosting=hosting) 51 | yield 52 | start_stopped_nodes() 53 | 54 | @pytest.fixture(scope="session", autouse=True) 55 | def init_s3_client(self, hosting: Hosting): 56 | if "s3" in list(map(lambda x: x.lower(), LOAD_TYPE)): 57 | init_s3_client( 58 | load_nodes=LOAD_NODES, 59 | login=LOAD_NODE_SSH_USER, 60 | pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH, 61 | hosting=hosting, 62 | container_placement_policy=CONTAINER_PLACEMENT_POLICY, 63 | ) 64 | 65 | @pytest.mark.parametrize("obj_size, out_file", list(zip(OBJ_SIZE, OUT_FILE))) 66 | @pytest.mark.parametrize("writers, readers, deleters", list(zip(WRITERS, READERS, DELETERS))) 67 | @pytest.mark.parametrize("load_time", LOAD_TIME) 68 | @pytest.mark.parametrize("node_count", STORAGE_NODE_COUNT) 69 | @pytest.mark.parametrize("containers_count", CONTAINERS_COUNT) 70 | @pytest.mark.parametrize("load_type", LOAD_TYPE) 71 | @pytest.mark.parametrize("obj_count", OBJ_COUNT) 72 | @pytest.mark.parametrize("load_nodes_count", LOAD_NODES_COUNT) 73 | @pytest.mark.benchmark 74 | @pytest.mark.grpc 75 | def test_custom_load( 76 | self, 77 | obj_size, 78 | out_file, 79 | writers, 80 | readers, 81 | deleters, 82 | load_time, 83 | node_count, 84 | obj_count, 85 | load_type, 86 | load_nodes_count, 87 | containers_count, 88 | hosting: Hosting, 89 | ): 90 | allure.dynamic.title( 91 | f"Load test - node_count = {node_count}, " 92 | f"writers = {writers} readers = {readers}, " 93 | f"deleters = {deleters}, obj_size = {obj_size}, " 94 | f"load_time = {load_time}" 95 | ) 96 | stop_unused_nodes(self.cluster.storage_nodes, node_count) 97 | with allure.step("Get endpoints"): 98 | endpoints_list = get_services_endpoints( 99 | hosting=hosting, 100 | service_name_regex=ENDPOINTS_ATTRIBUTES[LOAD_TYPE]["regex"], 101 | endpoint_attribute=ENDPOINTS_ATTRIBUTES[LOAD_TYPE]["endpoint_attribute"], 102 | ) 103 | endpoints = ",".join(endpoints_list[:node_count]) 104 | load_params = LoadParams( 105 | endpoint=endpoints, 106 | obj_size=obj_size, 107 | containers_count=containers_count, 108 | out_file=out_file, 109 | obj_count=obj_count, 110 | writers=writers, 111 | readers=readers, 112 | deleters=deleters, 113 | load_time=load_time, 114 | load_type=load_type, 115 | ) 116 | load_nodes_list = LOAD_NODES[:load_nodes_count] 117 | k6_load_instances = prepare_k6_instances( 118 | load_nodes=load_nodes_list, 119 | login=LOAD_NODE_SSH_USER, 120 | pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH, 121 | load_params=load_params, 122 | ) 123 | with allure.step("Run load"): 124 | multi_node_k6_run(k6_load_instances) 125 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/object/test_object_api_bearer.py: -------------------------------------------------------------------------------- 1 | import allure 2 | import pytest 3 | from frostfs_testlib.resources.common import EACL_PUBLIC_READ_WRITE 4 | from frostfs_testlib.shell import Shell 5 | from pytest import FixtureRequest 6 | 7 | from pytest_tests.helpers.acl import ( 8 | EACLAccess, 9 | EACLOperation, 10 | EACLRole, 11 | EACLRule, 12 | form_bearertoken_file, 13 | ) 14 | from pytest_tests.helpers.cluster import Cluster 15 | from pytest_tests.helpers.container import ( 16 | REP_2_FOR_3_NODES_PLACEMENT_RULE, 17 | SINGLE_PLACEMENT_RULE, 18 | StorageContainer, 19 | StorageContainerInfo, 20 | create_container, 21 | ) 22 | from pytest_tests.helpers.epoch import get_epoch 23 | from pytest_tests.helpers.frostfs_verbs import delete_object, get_object 24 | from pytest_tests.helpers.test_control import expect_not_raises 25 | from pytest_tests.helpers.wallet import WalletFile 26 | from pytest_tests.steps.cluster_test_base import ClusterTestBase 27 | from pytest_tests.steps.storage_object import StorageObjectInfo 28 | 29 | 30 | @pytest.fixture(scope="module") 31 | @allure.title("Create bearer token for OTHERS with all operations allowed for all containers") 32 | def bearer_token_file_all_allow(default_wallet: str, client_shell: Shell, cluster: Cluster) -> str: 33 | bearer = form_bearertoken_file( 34 | default_wallet, 35 | "", 36 | [ 37 | EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS) 38 | for op in EACLOperation 39 | ], 40 | shell=client_shell, 41 | endpoint=cluster.default_rpc_endpoint, 42 | ) 43 | 44 | return bearer 45 | 46 | 47 | @pytest.fixture(scope="module") 48 | @allure.title("Create user container for bearer token usage") 49 | def user_container( 50 | default_wallet: str, client_shell: Shell, cluster: Cluster, request: FixtureRequest 51 | ) -> StorageContainer: 52 | container_id = create_container( 53 | default_wallet, 54 | shell=client_shell, 55 | rule=request.param, 56 | basic_acl=EACL_PUBLIC_READ_WRITE, 57 | endpoint=cluster.default_rpc_endpoint, 58 | ) 59 | # Deliberately using s3gate wallet here to test bearer token 60 | s3gate = cluster.s3gates[0] 61 | return StorageContainer( 62 | StorageContainerInfo(container_id, WalletFile.from_node(s3gate)), 63 | client_shell, 64 | cluster, 65 | ) 66 | 67 | 68 | @pytest.fixture() 69 | def storage_objects( 70 | user_container: StorageContainer, 71 | bearer_token_file_all_allow: str, 72 | request: FixtureRequest, 73 | client_shell: Shell, 74 | cluster: Cluster, 75 | ) -> list[StorageObjectInfo]: 76 | epoch = get_epoch(client_shell, cluster) 77 | storage_objects: list[StorageObjectInfo] = [] 78 | for node in cluster.storage_nodes: 79 | storage_objects.append( 80 | user_container.generate_object( 81 | request.param, 82 | epoch + 3, 83 | bearer_token=bearer_token_file_all_allow, 84 | endpoint=node.get_rpc_endpoint(), 85 | ) 86 | ) 87 | return storage_objects 88 | 89 | 90 | @pytest.mark.smoke 91 | @pytest.mark.bearer 92 | class TestObjectApiWithBearerToken(ClusterTestBase): 93 | @pytest.mark.parametrize( 94 | "user_container", 95 | [SINGLE_PLACEMENT_RULE], 96 | ids=["single replica for all nodes placement rule"], 97 | indirect=True, 98 | ) 99 | @pytest.mark.parametrize( 100 | "storage_objects", 101 | [pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")], 102 | ids=["simple object", "complex object"], 103 | indirect=True, 104 | ) 105 | def test_delete_object_with_s3_wallet_bearer( 106 | self, 107 | storage_objects: list[StorageObjectInfo], 108 | bearer_token_file_all_allow: str, 109 | request: FixtureRequest, 110 | ): 111 | allure.dynamic.title( 112 | f"Object can be deleted from any node using s3gate wallet with bearer token for {request.node.callspec.id}" 113 | ) 114 | 115 | s3_gate_wallet = self.cluster.s3gates[0] 116 | with allure.step("Try to delete each object from first storage node"): 117 | for storage_object in storage_objects: 118 | with expect_not_raises(): 119 | delete_object( 120 | s3_gate_wallet.get_wallet_path(), 121 | storage_object.cid, 122 | storage_object.oid, 123 | self.shell, 124 | endpoint=self.cluster.default_rpc_endpoint, 125 | bearer=bearer_token_file_all_allow, 126 | wallet_config=s3_gate_wallet.get_wallet_config_path(), 127 | ) 128 | 129 | @pytest.mark.parametrize( 130 | "user_container", 131 | [REP_2_FOR_3_NODES_PLACEMENT_RULE], 132 | ids=["2 replicas for 3 nodes placement rule"], 133 | indirect=True, 134 | ) 135 | @pytest.mark.parametrize( 136 | "file_size", 137 | [pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")], 138 | ids=["simple object", "complex object"], 139 | ) 140 | def test_get_object_with_s3_wallet_bearer_from_all_nodes( 141 | self, 142 | user_container: StorageContainer, 143 | file_size: int, 144 | bearer_token_file_all_allow: str, 145 | request: FixtureRequest, 146 | ): 147 | allure.dynamic.title( 148 | f"Object can be fetched from any node using s3gate wallet with bearer token for {request.node.callspec.id}" 149 | ) 150 | 151 | s3_gate_wallet = self.cluster.s3gates[0] 152 | with allure.step("Put one object to container"): 153 | epoch = self.get_epoch() 154 | storage_object = user_container.generate_object( 155 | file_size, epoch + 3, bearer_token=bearer_token_file_all_allow 156 | ) 157 | 158 | with allure.step("Try to fetch object from each storage node"): 159 | for node in self.cluster.storage_nodes: 160 | with expect_not_raises(): 161 | get_object( 162 | s3_gate_wallet.get_wallet_path(), 163 | storage_object.cid, 164 | storage_object.oid, 165 | self.shell, 166 | endpoint=node.get_rpc_endpoint(), 167 | bearer=bearer_token_file_all_allow, 168 | wallet_config=s3_gate_wallet.get_wallet_config_path(), 169 | ) 170 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/object/test_object_lifetime.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import allure 4 | import pytest 5 | from frostfs_testlib.resources.common import OBJECT_NOT_FOUND 6 | from pytest import FixtureRequest 7 | 8 | from pytest_tests.helpers.container import create_container 9 | from pytest_tests.helpers.epoch import get_epoch 10 | from pytest_tests.helpers.file_helper import generate_file, get_file_hash 11 | from pytest_tests.helpers.frostfs_verbs import ( 12 | get_object_from_random_node, 13 | put_object_to_random_node, 14 | ) 15 | from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes 16 | from pytest_tests.steps.cluster_test_base import ClusterTestBase 17 | 18 | logger = logging.getLogger("NeoLogger") 19 | 20 | 21 | @pytest.mark.sanity 22 | @pytest.mark.grpc_api 23 | class TestObjectApiLifetime(ClusterTestBase): 24 | @allure.title("Test object life time") 25 | @pytest.mark.parametrize( 26 | "object_size", 27 | [pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")], 28 | ids=["simple object", "complex object"], 29 | ) 30 | def test_object_api_lifetime( 31 | self, default_wallet: str, request: FixtureRequest, object_size: int 32 | ): 33 | """ 34 | Test object deleted after expiration epoch. 35 | """ 36 | 37 | allure.dynamic.title(f"Test object life time for {request.node.callspec.id}") 38 | 39 | wallet = default_wallet 40 | endpoint = self.cluster.default_rpc_endpoint 41 | cid = create_container(wallet, self.shell, endpoint) 42 | 43 | file_path = generate_file(object_size) 44 | file_hash = get_file_hash(file_path) 45 | epoch = get_epoch(self.shell, self.cluster) 46 | 47 | oid = put_object_to_random_node( 48 | wallet, file_path, cid, self.shell, self.cluster, expire_at=epoch + 1 49 | ) 50 | got_file = get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster) 51 | assert get_file_hash(got_file) == file_hash 52 | 53 | with allure.step("Tick two epochs"): 54 | for _ in range(2): 55 | self.tick_epoch() 56 | 57 | # Wait for GC, because object with expiration is counted as alive until GC removes it 58 | wait_for_gc_pass_on_storage_nodes() 59 | 60 | with allure.step("Check object deleted because it expires-on epoch"): 61 | with pytest.raises(Exception, match=OBJECT_NOT_FOUND): 62 | get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster) 63 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/payment/test_balance.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | import allure 5 | import pytest 6 | import yaml 7 | from frostfs_testlib.cli import FrostfsCli 8 | from frostfs_testlib.shell import CommandResult, Shell 9 | 10 | from pytest_tests.helpers.wallet import WalletFactory, WalletFile 11 | from pytest_tests.resources.common import FREE_STORAGE, FROSTFS_CLI_EXEC, WALLET_CONFIG 12 | from pytest_tests.steps.cluster_test_base import ClusterTestBase 13 | 14 | logger = logging.getLogger("NeoLogger") 15 | DEPOSIT_AMOUNT = 30 16 | 17 | 18 | @pytest.mark.sanity 19 | @pytest.mark.payments 20 | @pytest.mark.skipif(FREE_STORAGE, reason="Test only works on public network with paid storage") 21 | class TestBalanceAccounting(ClusterTestBase): 22 | @pytest.fixture(scope="class") 23 | def main_wallet(self, wallet_factory: WalletFactory) -> WalletFile: 24 | return wallet_factory.create_wallet() 25 | 26 | @pytest.fixture(scope="class") 27 | def other_wallet(self, wallet_factory: WalletFactory) -> WalletFile: 28 | return wallet_factory.create_wallet() 29 | 30 | @pytest.fixture(scope="class") 31 | def cli(self, client_shell: Shell) -> FrostfsCli: 32 | return FrostfsCli(client_shell, FROSTFS_CLI_EXEC, WALLET_CONFIG) 33 | 34 | @allure.step("Check deposit amount") 35 | def check_amount(self, result: CommandResult) -> None: 36 | amount_str = result.stdout.rstrip() 37 | 38 | try: 39 | amount = int(amount_str) 40 | except Exception as ex: 41 | pytest.fail( 42 | f"Amount parse error, should be parsable as int({DEPOSIT_AMOUNT}), but given {amount_str}: {ex}" 43 | ) 44 | 45 | assert amount == DEPOSIT_AMOUNT 46 | 47 | @staticmethod 48 | @allure.step("Write config with API endpoint") 49 | def write_api_config(config_dir: str, endpoint: str, wallet: str) -> str: 50 | with open(WALLET_CONFIG, "r") as file: 51 | wallet_config = yaml.full_load(file) 52 | api_config = { 53 | **wallet_config, 54 | "rpc-endpoint": endpoint, 55 | "wallet": wallet, 56 | } 57 | api_config_file = os.path.join(config_dir, "frostfs-cli-api-config.yaml") 58 | with open(api_config_file, "w") as file: 59 | yaml.dump(api_config, file) 60 | return api_config_file 61 | 62 | @allure.title("Test balance request with wallet and address") 63 | def test_balance_wallet_address(self, main_wallet: WalletFile, cli: FrostfsCli): 64 | result = cli.accounting.balance( 65 | wallet=main_wallet.path, 66 | rpc_endpoint=self.cluster.default_rpc_endpoint, 67 | address=main_wallet.get_address(), 68 | ) 69 | 70 | self.check_amount(result) 71 | 72 | @allure.title("Test balance request with wallet only") 73 | def test_balance_wallet(self, main_wallet: WalletFile, cli: FrostfsCli): 74 | result = cli.accounting.balance( 75 | wallet=main_wallet.path, rpc_endpoint=self.cluster.default_rpc_endpoint 76 | ) 77 | self.check_amount(result) 78 | 79 | @allure.title("Test balance request with wallet and wrong address") 80 | def test_balance_wrong_address( 81 | self, main_wallet: WalletFile, other_wallet: WalletFile, cli: FrostfsCli 82 | ): 83 | with pytest.raises(Exception, match="address option must be specified and valid"): 84 | cli.accounting.balance( 85 | wallet=main_wallet.path, 86 | rpc_endpoint=self.cluster.default_rpc_endpoint, 87 | address=other_wallet.get_address(), 88 | ) 89 | 90 | @allure.title("Test balance request with config file") 91 | def test_balance_api(self, temp_directory: str, main_wallet: WalletFile, client_shell: Shell): 92 | config_file = self.write_api_config( 93 | config_dir=temp_directory, 94 | endpoint=self.cluster.default_rpc_endpoint, 95 | wallet=main_wallet.path, 96 | ) 97 | logger.info(f"Config with API endpoint: {config_file}") 98 | 99 | cli = FrostfsCli(client_shell, FROSTFS_CLI_EXEC, config_file=config_file) 100 | result = cli.accounting.balance() 101 | 102 | self.check_amount(result) 103 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/services/http_gate/test_http_bearer.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import allure 4 | import pytest 5 | from frostfs_testlib.resources.common import PUBLIC_ACL 6 | 7 | from pytest_tests.helpers.acl import ( 8 | EACLAccess, 9 | EACLOperation, 10 | EACLRole, 11 | EACLRule, 12 | bearer_token_base64_from_file, 13 | create_eacl, 14 | form_bearertoken_file, 15 | set_eacl, 16 | sign_bearer, 17 | wait_for_cache_expired, 18 | ) 19 | from pytest_tests.helpers.container import create_container 20 | from pytest_tests.helpers.file_helper import generate_file 21 | from pytest_tests.helpers.http_gate import get_object_and_verify_hashes, upload_via_http_gate_curl 22 | from pytest_tests.steps.cluster_test_base import ClusterTestBase 23 | 24 | logger = logging.getLogger("NeoLogger") 25 | 26 | 27 | @pytest.mark.sanity 28 | @pytest.mark.http_gate 29 | class Test_http_bearer(ClusterTestBase): 30 | PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X" 31 | 32 | @pytest.fixture(scope="class", autouse=True) 33 | @allure.title("[Class/Autouse]: Prepare wallet and deposit") 34 | def prepare_wallet(self, default_wallet): 35 | Test_http_bearer.wallet = default_wallet 36 | 37 | @pytest.fixture(scope="class") 38 | def user_container(self) -> str: 39 | return create_container( 40 | wallet=self.wallet, 41 | shell=self.shell, 42 | endpoint=self.cluster.default_rpc_endpoint, 43 | rule=self.PLACEMENT_RULE, 44 | basic_acl=PUBLIC_ACL, 45 | ) 46 | 47 | @pytest.fixture(scope="class") 48 | def eacl_deny_for_others(self, user_container: str) -> None: 49 | with allure.step(f"Set deny all operations for {EACLRole.OTHERS} via eACL"): 50 | eacl = EACLRule( 51 | access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=EACLOperation.PUT 52 | ) 53 | set_eacl( 54 | self.wallet, 55 | user_container, 56 | create_eacl(user_container, eacl, shell=self.shell), 57 | shell=self.shell, 58 | endpoint=self.cluster.default_rpc_endpoint, 59 | ) 60 | wait_for_cache_expired() 61 | 62 | @pytest.fixture(scope="class") 63 | def bearer_token_no_limit_for_others(self, user_container: str) -> str: 64 | with allure.step(f"Create bearer token for {EACLRole.OTHERS} with all operations allowed"): 65 | bearer = form_bearertoken_file( 66 | self.wallet, 67 | user_container, 68 | [ 69 | EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS) 70 | for op in EACLOperation 71 | ], 72 | shell=self.shell, 73 | endpoint=self.cluster.default_rpc_endpoint, 74 | sign=False, 75 | ) 76 | bearer_signed = f"{bearer}_signed" 77 | sign_bearer( 78 | shell=self.shell, 79 | wallet_path=self.wallet, 80 | eacl_rules_file_from=bearer, 81 | eacl_rules_file_to=bearer_signed, 82 | json=False, 83 | ) 84 | return bearer_token_base64_from_file(bearer_signed) 85 | 86 | @allure.title(f"[negative] Put object without bearer token for {EACLRole.OTHERS}") 87 | def test_unable_put_without_bearer_token( 88 | self, simple_object_size: int, user_container: str, eacl_deny_for_others 89 | ): 90 | eacl_deny_for_others 91 | upload_via_http_gate_curl( 92 | cid=user_container, 93 | filepath=generate_file(simple_object_size), 94 | endpoint=self.cluster.default_http_gate_endpoint, 95 | error_pattern="access to object operation denied", 96 | ) 97 | 98 | @pytest.mark.parametrize( 99 | "object_size", 100 | [pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")], 101 | ids=["simple object", "complex object"], 102 | ) 103 | def test_put_with_bearer_when_eacl_restrict( 104 | self, 105 | object_size: int, 106 | user_container: str, 107 | eacl_deny_for_others, 108 | bearer_token_no_limit_for_others: str, 109 | ): 110 | eacl_deny_for_others 111 | bearer = bearer_token_no_limit_for_others 112 | file_path = generate_file(object_size) 113 | with allure.step( 114 | f"Put object with bearer token for {EACLRole.OTHERS}, then get and verify hashes" 115 | ): 116 | headers = [f" -H 'Authorization: Bearer {bearer}'"] 117 | oid = upload_via_http_gate_curl( 118 | cid=user_container, 119 | filepath=file_path, 120 | endpoint=self.cluster.default_http_gate_endpoint, 121 | headers=headers, 122 | ) 123 | get_object_and_verify_hashes( 124 | oid=oid, 125 | file_name=file_path, 126 | wallet=self.wallet, 127 | cid=user_container, 128 | shell=self.shell, 129 | nodes=self.cluster.storage_nodes, 130 | endpoint=self.cluster.default_http_gate_endpoint, 131 | ) 132 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/services/http_gate/test_http_object.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import allure 4 | import pytest 5 | from frostfs_testlib.resources.common import PUBLIC_ACL 6 | 7 | from pytest_tests.helpers.container import create_container 8 | from pytest_tests.helpers.file_helper import generate_file 9 | from pytest_tests.helpers.frostfs_verbs import put_object_to_random_node 10 | from pytest_tests.helpers.http_gate import ( 11 | get_object_and_verify_hashes, 12 | get_object_by_attr_and_verify_hashes, 13 | try_to_get_object_via_passed_request_and_expect_error, 14 | ) 15 | from pytest_tests.steps.cluster_test_base import ClusterTestBase 16 | 17 | logger = logging.getLogger("NeoLogger") 18 | 19 | 20 | @pytest.mark.sanity 21 | @pytest.mark.http_gate 22 | class Test_http_object(ClusterTestBase): 23 | PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" 24 | 25 | @pytest.fixture(scope="class", autouse=True) 26 | @allure.title("[Class/Autouse]: Prepare wallet and deposit") 27 | def prepare_wallet(self, default_wallet): 28 | Test_http_object.wallet = default_wallet 29 | 30 | @allure.title("Test Put over gRPC, Get over HTTP") 31 | @pytest.mark.parametrize( 32 | "object_size", 33 | [pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")], 34 | ids=["simple object", "complex object"], 35 | ) 36 | def test_object_put_get_attributes(self, object_size: int): 37 | """ 38 | Test that object can be put using gRPC interface and get using HTTP. 39 | 40 | Steps: 41 | 1. Create object; 42 | 2. Put objects using gRPC (frostfs-cli) with attributes [--attributes chapter1=peace,chapter2=war]; 43 | 3. Download object using HTTP gate (https://github.com/TrueCloudLab/frostfs-http-gw#downloading); 44 | 4. Compare hashes between original and downloaded object; 45 | 5. [Negative] Try to the get object with specified attributes and `get` request: [get/$CID/chapter1/peace]; 46 | 6. Download the object with specified attributes and `get_by_attribute` request: [get_by_attribute/$CID/chapter1/peace]; 47 | 7. Compare hashes between original and downloaded object; 48 | 8. [Negative] Try to the get object via `get_by_attribute` request: [get_by_attribute/$CID/$OID]; 49 | 50 | 51 | Expected result: 52 | Hashes must be the same. 53 | """ 54 | with allure.step("Create public container"): 55 | cid = create_container( 56 | self.wallet, 57 | shell=self.shell, 58 | endpoint=self.cluster.default_rpc_endpoint, 59 | rule=self.PLACEMENT_RULE, 60 | basic_acl=PUBLIC_ACL, 61 | ) 62 | 63 | # Generate file 64 | file_path = generate_file(object_size) 65 | 66 | # List of Key=Value attributes 67 | obj_key1 = "chapter1" 68 | obj_value1 = "peace" 69 | obj_key2 = "chapter2" 70 | obj_value2 = "war" 71 | 72 | # Prepare for grpc PUT request 73 | key_value1 = obj_key1 + "=" + obj_value1 74 | key_value2 = obj_key2 + "=" + obj_value2 75 | 76 | with allure.step("Put objects using gRPC [--attributes chapter1=peace,chapter2=war]"): 77 | oid = put_object_to_random_node( 78 | wallet=self.wallet, 79 | path=file_path, 80 | cid=cid, 81 | shell=self.shell, 82 | cluster=self.cluster, 83 | attributes=f"{key_value1},{key_value2}", 84 | ) 85 | with allure.step("Get object and verify hashes [ get/$CID/$OID ]"): 86 | get_object_and_verify_hashes( 87 | oid=oid, 88 | file_name=file_path, 89 | wallet=self.wallet, 90 | cid=cid, 91 | shell=self.shell, 92 | nodes=self.cluster.storage_nodes, 93 | endpoint=self.cluster.default_http_gate_endpoint, 94 | ) 95 | with allure.step("[Negative] try to get object: [get/$CID/chapter1/peace]"): 96 | attrs = {obj_key1: obj_value1, obj_key2: obj_value2} 97 | request = f"/get/{cid}/{obj_key1}/{obj_value1}" 98 | expected_err_msg = "Failed to get object via HTTP gate:" 99 | try_to_get_object_via_passed_request_and_expect_error( 100 | cid=cid, 101 | oid=oid, 102 | error_pattern=expected_err_msg, 103 | http_request_path=request, 104 | attrs=attrs, 105 | endpoint=self.cluster.default_http_gate_endpoint, 106 | ) 107 | 108 | with allure.step( 109 | "Download the object with attribute [get_by_attribute/$CID/chapter1/peace]" 110 | ): 111 | get_object_by_attr_and_verify_hashes( 112 | oid=oid, 113 | file_name=file_path, 114 | cid=cid, 115 | attrs=attrs, 116 | endpoint=self.cluster.default_http_gate_endpoint, 117 | ) 118 | with allure.step("[Negative] try to get object: get_by_attribute/$CID/$OID"): 119 | request = f"/get_by_attribute/{cid}/{oid}" 120 | try_to_get_object_via_passed_request_and_expect_error( 121 | cid=cid, 122 | oid=oid, 123 | error_pattern=expected_err_msg, 124 | http_request_path=request, 125 | endpoint=self.cluster.default_http_gate_endpoint, 126 | ) 127 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/services/http_gate/test_http_streaming.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import allure 4 | import pytest 5 | from frostfs_testlib.resources.common import PUBLIC_ACL 6 | 7 | from pytest_tests.helpers.container import create_container 8 | from pytest_tests.helpers.file_helper import generate_file 9 | from pytest_tests.helpers.http_gate import get_object_and_verify_hashes, upload_via_http_gate_curl 10 | from pytest_tests.steps.cluster_test_base import ClusterTestBase 11 | 12 | logger = logging.getLogger("NeoLogger") 13 | 14 | 15 | @pytest.mark.sanity 16 | @pytest.mark.http_gate 17 | class Test_http_streaming(ClusterTestBase): 18 | PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" 19 | 20 | @pytest.fixture(scope="class", autouse=True) 21 | @allure.title("[Class/Autouse]: Prepare wallet and deposit") 22 | def prepare_wallet(self, default_wallet): 23 | Test_http_streaming.wallet = default_wallet 24 | 25 | @allure.title("Test Put via pipe (steaming), Get over HTTP and verify hashes") 26 | @pytest.mark.parametrize( 27 | "object_size", 28 | [pytest.lazy_fixture("complex_object_size")], 29 | ids=["complex object"], 30 | ) 31 | def test_object_can_be_put_get_by_streaming(self, object_size: int): 32 | """ 33 | Test that object can be put using gRPC interface and get using HTTP. 34 | 35 | Steps: 36 | 1. Create big object; 37 | 2. Put object using curl with pipe (streaming); 38 | 3. Download object using HTTP gate (https://github.com/TrueCloudLab/frostfs-http-gw#downloading); 39 | 4. Compare hashes between original and downloaded object; 40 | 41 | Expected result: 42 | Hashes must be the same. 43 | """ 44 | with allure.step("Create public container and verify container creation"): 45 | cid = create_container( 46 | self.wallet, 47 | shell=self.shell, 48 | endpoint=self.cluster.default_rpc_endpoint, 49 | rule=self.PLACEMENT_RULE, 50 | basic_acl=PUBLIC_ACL, 51 | ) 52 | with allure.step("Allocate big object"): 53 | # Generate file 54 | file_path = generate_file(object_size) 55 | 56 | with allure.step( 57 | "Put objects using curl utility and Get object and verify hashes [ get/$CID/$OID ]" 58 | ): 59 | oid = upload_via_http_gate_curl( 60 | cid=cid, filepath=file_path, endpoint=self.cluster.default_http_gate_endpoint 61 | ) 62 | get_object_and_verify_hashes( 63 | oid=oid, 64 | file_name=file_path, 65 | wallet=self.wallet, 66 | cid=cid, 67 | shell=self.shell, 68 | nodes=self.cluster.storage_nodes, 69 | endpoint=self.cluster.default_http_gate_endpoint, 70 | ) 71 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/services/s3_gate/test_s3_ACL.py: -------------------------------------------------------------------------------- 1 | import allure 2 | import pytest 3 | 4 | from pytest_tests.helpers.file_helper import generate_file 5 | from pytest_tests.helpers.s3_helper import assert_s3_acl, object_key_from_file_path 6 | from pytest_tests.steps import s3_gate_bucket, s3_gate_object 7 | from pytest_tests.steps.s3_gate_base import TestS3GateBase 8 | 9 | 10 | def pytest_generate_tests(metafunc): 11 | if "s3_client" in metafunc.fixturenames: 12 | metafunc.parametrize("s3_client", ["aws cli", "boto3"], indirect=True) 13 | 14 | 15 | @pytest.mark.sanity 16 | @pytest.mark.acl 17 | @pytest.mark.s3_gate 18 | class TestS3GateACL(TestS3GateBase): 19 | @allure.title("Test S3: Object ACL") 20 | def test_s3_object_ACL(self, bucket, simple_object_size): 21 | file_path = generate_file(simple_object_size) 22 | file_name = object_key_from_file_path(file_path) 23 | 24 | with allure.step("Put object into bucket, Check ACL is empty"): 25 | s3_gate_object.put_object_s3(self.s3_client, bucket, file_path) 26 | obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket, file_name) 27 | assert obj_acl == [], f"Expected ACL is empty, got {obj_acl}" 28 | 29 | with allure.step("Put object ACL = public-read"): 30 | s3_gate_object.put_object_acl_s3(self.s3_client, bucket, file_name, "public-read") 31 | obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket, file_name) 32 | assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers") 33 | 34 | with allure.step("Put object ACL = private"): 35 | s3_gate_object.put_object_acl_s3(self.s3_client, bucket, file_name, "private") 36 | obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket, file_name) 37 | assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser") 38 | 39 | with allure.step( 40 | "Put object with grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers" 41 | ): 42 | s3_gate_object.put_object_acl_s3( 43 | self.s3_client, 44 | bucket, 45 | file_name, 46 | grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers", 47 | ) 48 | obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket, file_name) 49 | assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers") 50 | 51 | @allure.title("Test S3: Bucket ACL") 52 | def test_s3_bucket_ACL(self): 53 | with allure.step("Create bucket with ACL = public-read-write"): 54 | bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, True, acl="public-read-write") 55 | bucket_acl = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket) 56 | assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers") 57 | 58 | with allure.step("Change bucket ACL to private"): 59 | s3_gate_bucket.put_bucket_acl_s3(self.s3_client, bucket, acl="private") 60 | bucket_acl = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket) 61 | assert_s3_acl(acl_grants=bucket_acl, permitted_users="CanonicalUser") 62 | 63 | with allure.step( 64 | "Change bucket acl to --grant-write uri=http://acs.amazonaws.com/groups/global/AllUsers" 65 | ): 66 | s3_gate_bucket.put_bucket_acl_s3( 67 | self.s3_client, 68 | bucket, 69 | grant_write="uri=http://acs.amazonaws.com/groups/global/AllUsers", 70 | ) 71 | bucket_acl = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket) 72 | assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers") 73 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/services/s3_gate/test_s3_bucket.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta 2 | 3 | import allure 4 | import pytest 5 | 6 | from pytest_tests.helpers.file_helper import generate_file 7 | from pytest_tests.helpers.s3_helper import ( 8 | assert_object_lock_mode, 9 | assert_s3_acl, 10 | check_objects_in_bucket, 11 | object_key_from_file_path, 12 | ) 13 | from pytest_tests.steps import s3_gate_bucket, s3_gate_object 14 | from pytest_tests.steps.s3_gate_base import TestS3GateBase 15 | 16 | 17 | def pytest_generate_tests(metafunc): 18 | if "s3_client" in metafunc.fixturenames: 19 | metafunc.parametrize("s3_client", ["aws cli", "boto3"], indirect=True) 20 | 21 | 22 | @pytest.mark.sanity 23 | @pytest.mark.s3_gate 24 | @pytest.mark.s3_gate_bucket 25 | class TestS3GateBucket(TestS3GateBase): 26 | @allure.title("Test S3: Create Bucket with different ACL") 27 | def test_s3_create_bucket_with_ACL(self): 28 | 29 | with allure.step("Create bucket with ACL private"): 30 | bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, True, acl="private") 31 | bucket_acl = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket) 32 | assert_s3_acl(acl_grants=bucket_acl, permitted_users="CanonicalUser") 33 | 34 | with allure.step("Create bucket with ACL = public-read"): 35 | bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client, True, acl="public-read") 36 | bucket_acl_1 = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket_1) 37 | assert_s3_acl(acl_grants=bucket_acl_1, permitted_users="AllUsers") 38 | 39 | with allure.step("Create bucket with ACL public-read-write"): 40 | bucket_2 = s3_gate_bucket.create_bucket_s3( 41 | self.s3_client, True, acl="public-read-write" 42 | ) 43 | bucket_acl_2 = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket_2) 44 | assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers") 45 | 46 | with allure.step("Create bucket with ACL = authenticated-read"): 47 | bucket_3 = s3_gate_bucket.create_bucket_s3( 48 | self.s3_client, True, acl="authenticated-read" 49 | ) 50 | bucket_acl_3 = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket_3) 51 | assert_s3_acl(acl_grants=bucket_acl_3, permitted_users="AllUsers") 52 | 53 | @allure.title("Test S3: Create Bucket with different ACL by grand") 54 | def test_s3_create_bucket_with_grands(self): 55 | 56 | with allure.step("Create bucket with --grant-read"): 57 | bucket = s3_gate_bucket.create_bucket_s3( 58 | self.s3_client, 59 | True, 60 | grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers", 61 | ) 62 | bucket_acl = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket) 63 | assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers") 64 | 65 | with allure.step("Create bucket with --grant-wtite"): 66 | bucket_1 = s3_gate_bucket.create_bucket_s3( 67 | self.s3_client, 68 | True, 69 | grant_write="uri=http://acs.amazonaws.com/groups/global/AllUsers", 70 | ) 71 | bucket_acl_1 = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket_1) 72 | assert_s3_acl(acl_grants=bucket_acl_1, permitted_users="AllUsers") 73 | 74 | with allure.step("Create bucket with --grant-full-control"): 75 | bucket_2 = s3_gate_bucket.create_bucket_s3( 76 | self.s3_client, 77 | True, 78 | grant_full_control="uri=http://acs.amazonaws.com/groups/global/AllUsers", 79 | ) 80 | bucket_acl_2 = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket_2) 81 | assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers") 82 | 83 | @allure.title("Test S3: create bucket with object lock") 84 | def test_s3_bucket_object_lock(self, simple_object_size): 85 | file_path = generate_file(simple_object_size) 86 | file_name = object_key_from_file_path(file_path) 87 | 88 | with allure.step("Create bucket with --no-object-lock-enabled-for-bucket"): 89 | bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, False) 90 | date_obj = datetime.utcnow() + timedelta(days=1) 91 | with pytest.raises( 92 | Exception, match=r".*Object Lock configuration does not exist for this bucket.*" 93 | ): 94 | # An error occurred (ObjectLockConfigurationNotFoundError) when calling the PutObject operation (reached max retries: 0): 95 | # Object Lock configuration does not exist for this bucket 96 | s3_gate_object.put_object_s3( 97 | self.s3_client, 98 | bucket, 99 | file_path, 100 | ObjectLockMode="COMPLIANCE", 101 | ObjectLockRetainUntilDate=date_obj.strftime("%Y-%m-%dT%H:%M:%S"), 102 | ) 103 | with allure.step("Create bucket with --object-lock-enabled-for-bucket"): 104 | bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client, True) 105 | date_obj_1 = datetime.utcnow() + timedelta(days=1) 106 | s3_gate_object.put_object_s3( 107 | self.s3_client, 108 | bucket_1, 109 | file_path, 110 | ObjectLockMode="COMPLIANCE", 111 | ObjectLockRetainUntilDate=date_obj_1.strftime("%Y-%m-%dT%H:%M:%S"), 112 | ObjectLockLegalHoldStatus="ON", 113 | ) 114 | assert_object_lock_mode( 115 | self.s3_client, bucket_1, file_name, "COMPLIANCE", date_obj_1, "ON" 116 | ) 117 | 118 | @allure.title("Test S3: delete bucket") 119 | def test_s3_delete_bucket(self, simple_object_size): 120 | file_path_1 = generate_file(simple_object_size) 121 | file_name_1 = object_key_from_file_path(file_path_1) 122 | file_path_2 = generate_file(simple_object_size) 123 | file_name_2 = object_key_from_file_path(file_path_2) 124 | bucket = s3_gate_bucket.create_bucket_s3(self.s3_client) 125 | 126 | with allure.step("Put two objects into bucket"): 127 | s3_gate_object.put_object_s3(self.s3_client, bucket, file_path_1) 128 | s3_gate_object.put_object_s3(self.s3_client, bucket, file_path_2) 129 | check_objects_in_bucket(self.s3_client, bucket, [file_name_1, file_name_2]) 130 | 131 | with allure.step("Try to delete not empty bucket and get error"): 132 | with pytest.raises(Exception, match=r".*The bucket you tried to delete is not empty.*"): 133 | s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket) 134 | 135 | with allure.step("Delete object in bucket"): 136 | s3_gate_object.delete_object_s3(self.s3_client, bucket, file_name_1) 137 | s3_gate_object.delete_object_s3(self.s3_client, bucket, file_name_2) 138 | check_objects_in_bucket(self.s3_client, bucket, []) 139 | 140 | with allure.step(f"Delete empty bucket"): 141 | s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket) 142 | with pytest.raises(Exception, match=r".*Not Found.*"): 143 | s3_gate_bucket.head_bucket(self.s3_client, bucket) 144 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/services/s3_gate/test_s3_multipart.py: -------------------------------------------------------------------------------- 1 | import allure 2 | import pytest 3 | 4 | from pytest_tests.helpers.file_helper import generate_file, get_file_hash, split_file 5 | from pytest_tests.helpers.s3_helper import ( 6 | check_objects_in_bucket, 7 | object_key_from_file_path, 8 | set_bucket_versioning, 9 | ) 10 | from pytest_tests.steps import s3_gate_bucket, s3_gate_object 11 | from pytest_tests.steps.s3_gate_base import TestS3GateBase 12 | 13 | PART_SIZE = 5 * 1024 * 1024 14 | 15 | 16 | def pytest_generate_tests(metafunc): 17 | if "s3_client" in metafunc.fixturenames: 18 | metafunc.parametrize("s3_client", ["aws cli", "boto3"], indirect=True) 19 | 20 | 21 | @pytest.mark.sanity 22 | @pytest.mark.s3_gate 23 | @pytest.mark.s3_gate_multipart 24 | class TestS3GateMultipart(TestS3GateBase): 25 | @allure.title("Test S3 Object Multipart API") 26 | def test_s3_object_multipart(self): 27 | bucket = s3_gate_bucket.create_bucket_s3(self.s3_client) 28 | set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED) 29 | parts_count = 5 30 | file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part 31 | object_key = object_key_from_file_path(file_name_large) 32 | part_files = split_file(file_name_large, parts_count) 33 | parts = [] 34 | 35 | with allure.step("Upload first part"): 36 | upload_id = s3_gate_object.create_multipart_upload_s3( 37 | self.s3_client, bucket, object_key 38 | ) 39 | uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket) 40 | etag = s3_gate_object.upload_part_s3( 41 | self.s3_client, bucket, object_key, upload_id, 1, part_files[0] 42 | ) 43 | parts.append((1, etag)) 44 | got_parts = s3_gate_object.list_parts_s3(self.s3_client, bucket, object_key, upload_id) 45 | assert len(got_parts) == 1, f"Expected {1} parts, got\n{got_parts}" 46 | 47 | with allure.step("Upload last parts"): 48 | for part_id, file_path in enumerate(part_files[1:], start=2): 49 | etag = s3_gate_object.upload_part_s3( 50 | self.s3_client, bucket, object_key, upload_id, part_id, file_path 51 | ) 52 | parts.append((part_id, etag)) 53 | got_parts = s3_gate_object.list_parts_s3(self.s3_client, bucket, object_key, upload_id) 54 | s3_gate_object.complete_multipart_upload_s3( 55 | self.s3_client, bucket, object_key, upload_id, parts 56 | ) 57 | assert len(got_parts) == len( 58 | part_files 59 | ), f"Expected {parts_count} parts, got\n{got_parts}" 60 | 61 | with allure.step("Check upload list is empty"): 62 | uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket) 63 | assert not uploads, f"Expected there is no uploads in bucket {bucket}" 64 | 65 | with allure.step("Check we can get whole object from bucket"): 66 | got_object = s3_gate_object.get_object_s3(self.s3_client, bucket, object_key) 67 | assert get_file_hash(got_object) == get_file_hash(file_name_large) 68 | 69 | @allure.title("Test S3 Multipart abord") 70 | def test_s3_abort_multipart(self): 71 | bucket = s3_gate_bucket.create_bucket_s3(self.s3_client) 72 | set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED) 73 | parts_count = 5 74 | file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part 75 | object_key = object_key_from_file_path(file_name_large) 76 | part_files = split_file(file_name_large, parts_count) 77 | parts = [] 78 | 79 | with allure.step("Upload first part"): 80 | upload_id = s3_gate_object.create_multipart_upload_s3( 81 | self.s3_client, bucket, object_key 82 | ) 83 | uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket) 84 | etag = s3_gate_object.upload_part_s3( 85 | self.s3_client, bucket, object_key, upload_id, 1, part_files[0] 86 | ) 87 | parts.append((1, etag)) 88 | got_parts = s3_gate_object.list_parts_s3(self.s3_client, bucket, object_key, upload_id) 89 | assert len(got_parts) == 1, f"Expected {1} parts, got\n{got_parts}" 90 | 91 | with allure.step("Abort multipart upload"): 92 | s3_gate_object.abort_multipart_uploads_s3(self.s3_client, bucket, object_key, upload_id) 93 | uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket) 94 | assert not uploads, f"Expected there is no uploads in bucket {bucket}" 95 | 96 | @allure.title("Test S3 Upload Part Copy") 97 | def test_s3_multipart_copy(self): 98 | bucket = s3_gate_bucket.create_bucket_s3(self.s3_client) 99 | set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED) 100 | parts_count = 3 101 | file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part 102 | object_key = object_key_from_file_path(file_name_large) 103 | part_files = split_file(file_name_large, parts_count) 104 | parts = [] 105 | objs = [] 106 | 107 | with allure.step(f"Put {parts_count} objec in bucket"): 108 | for part in part_files: 109 | s3_gate_object.put_object_s3(self.s3_client, bucket, part) 110 | objs.append(object_key_from_file_path(part)) 111 | check_objects_in_bucket(self.s3_client, bucket, objs) 112 | 113 | with allure.step("Create multipart upload object"): 114 | upload_id = s3_gate_object.create_multipart_upload_s3( 115 | self.s3_client, bucket, object_key 116 | ) 117 | uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket) 118 | assert uploads, f"Expected there are uploads in bucket {bucket}" 119 | 120 | with allure.step("Start multipart upload"): 121 | for part_id, obj_key in enumerate(objs, start=1): 122 | etag = s3_gate_object.upload_part_copy_s3( 123 | self.s3_client, bucket, object_key, upload_id, part_id, f"{bucket}/{obj_key}" 124 | ) 125 | parts.append((part_id, etag)) 126 | got_parts = s3_gate_object.list_parts_s3(self.s3_client, bucket, object_key, upload_id) 127 | s3_gate_object.complete_multipart_upload_s3( 128 | self.s3_client, bucket, object_key, upload_id, parts 129 | ) 130 | assert len(got_parts) == len( 131 | part_files 132 | ), f"Expected {parts_count} parts, got\n{got_parts}" 133 | 134 | with allure.step("Check we can get whole object from bucket"): 135 | got_object = s3_gate_object.get_object_s3(self.s3_client, bucket, object_key) 136 | assert get_file_hash(got_object) == get_file_hash(file_name_large) 137 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/services/s3_gate/test_s3_policy.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import allure 4 | import pytest 5 | 6 | from pytest_tests.helpers.container import search_container_by_name 7 | from pytest_tests.helpers.file_helper import generate_file 8 | from pytest_tests.helpers.s3_helper import ( 9 | check_objects_in_bucket, 10 | object_key_from_file_path, 11 | set_bucket_versioning, 12 | ) 13 | from pytest_tests.helpers.storage_policy import get_simple_object_copies 14 | from pytest_tests.steps import s3_gate_bucket, s3_gate_object 15 | from pytest_tests.steps.s3_gate_base import TestS3GateBase 16 | 17 | 18 | def pytest_generate_tests(metafunc): 19 | policy = f"{os.getcwd()}/pytest_tests/resources/files/policy.json" 20 | if "s3_client" in metafunc.fixturenames: 21 | metafunc.parametrize( 22 | "s3_client", 23 | [("aws cli", policy), ("boto3", policy)], 24 | indirect=True, 25 | ids=["aws cli", "boto3"], 26 | ) 27 | 28 | 29 | @pytest.mark.s3_gate 30 | class TestS3GatePolicy(TestS3GateBase): 31 | @allure.title("Test S3: Verify bucket creation with retention policy applied") 32 | def test_s3_bucket_location(self, simple_object_size): 33 | file_path_1 = generate_file(simple_object_size) 34 | file_name_1 = object_key_from_file_path(file_path_1) 35 | file_path_2 = generate_file(simple_object_size) 36 | file_name_2 = object_key_from_file_path(file_path_2) 37 | 38 | with allure.step("Create two buckets with different bucket configuration"): 39 | bucket_1 = s3_gate_bucket.create_bucket_s3( 40 | self.s3_client, bucket_configuration="complex" 41 | ) 42 | set_bucket_versioning(self.s3_client, bucket_1, s3_gate_bucket.VersioningStatus.ENABLED) 43 | bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client, bucket_configuration="rep-3") 44 | set_bucket_versioning(self.s3_client, bucket_2, s3_gate_bucket.VersioningStatus.ENABLED) 45 | list_buckets = s3_gate_bucket.list_buckets_s3(self.s3_client) 46 | assert ( 47 | bucket_1 in list_buckets and bucket_2 in list_buckets 48 | ), f"Expected two buckets {bucket_1, bucket_2}, got {list_buckets}" 49 | 50 | # with allure.step("Check head buckets"): 51 | head_1 = s3_gate_bucket.head_bucket(self.s3_client, bucket_1) 52 | head_2 = s3_gate_bucket.head_bucket(self.s3_client, bucket_2) 53 | assert head_1 == {} or head_1.get("HEAD") == None, "Expected head is empty" 54 | assert head_2 == {} or head_2.get("HEAD") == None, "Expected head is empty" 55 | 56 | with allure.step("Put objects into buckets"): 57 | version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket_1, file_path_1) 58 | version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket_2, file_path_2) 59 | check_objects_in_bucket(self.s3_client, bucket_1, [file_name_1]) 60 | check_objects_in_bucket(self.s3_client, bucket_2, [file_name_2]) 61 | 62 | with allure.step("Check bucket location"): 63 | bucket_loc_1 = s3_gate_bucket.get_bucket_location(self.s3_client, bucket_1) 64 | bucket_loc_2 = s3_gate_bucket.get_bucket_location(self.s3_client, bucket_2) 65 | assert bucket_loc_1 == "complex" 66 | assert bucket_loc_2 == "rep-3" 67 | 68 | with allure.step("Check object policy"): 69 | cid_1 = search_container_by_name( 70 | self.wallet, bucket_1, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint 71 | ) 72 | copies_1 = get_simple_object_copies( 73 | wallet=self.wallet, 74 | cid=cid_1, 75 | oid=version_id_1, 76 | shell=self.shell, 77 | nodes=self.cluster.storage_nodes, 78 | ) 79 | assert copies_1 == 1 80 | cid_2 = search_container_by_name( 81 | self.wallet, bucket_2, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint 82 | ) 83 | copies_2 = get_simple_object_copies( 84 | wallet=self.wallet, 85 | cid=cid_2, 86 | oid=version_id_2, 87 | shell=self.shell, 88 | nodes=self.cluster.storage_nodes, 89 | ) 90 | assert copies_2 == 3 91 | 92 | @allure.title("Test S3: bucket policy ") 93 | def test_s3_bucket_policy(self): 94 | with allure.step("Create bucket with default policy"): 95 | bucket = s3_gate_bucket.create_bucket_s3(self.s3_client) 96 | set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED) 97 | 98 | with allure.step("GetBucketPolicy"): 99 | s3_gate_bucket.get_bucket_policy(self.s3_client, bucket) 100 | 101 | with allure.step("Put new policy"): 102 | custom_policy = f"file://{os.getcwd()}/pytest_tests/resources/files/bucket_policy.json" 103 | custom_policy = { 104 | "Version": "2008-10-17", 105 | "Id": "aaaa-bbbb-cccc-dddd", 106 | "Statement": [ 107 | { 108 | "Sid": "AddPerm", 109 | "Effect": "Allow", 110 | "Principal": {"AWS": "*"}, 111 | "Action": ["s3:GetObject"], 112 | "Resource": [f"arn:aws:s3:::{bucket}/*"], 113 | } 114 | ], 115 | } 116 | 117 | s3_gate_bucket.put_bucket_policy(self.s3_client, bucket, custom_policy) 118 | with allure.step("GetBucketPolicy"): 119 | policy_1 = s3_gate_bucket.get_bucket_policy(self.s3_client, bucket) 120 | print(policy_1) 121 | 122 | @allure.title("Test S3: bucket policy ") 123 | def test_s3_cors(self): 124 | with allure.step("Create bucket without cors"): 125 | bucket = s3_gate_bucket.create_bucket_s3(self.s3_client) 126 | set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED) 127 | 128 | with pytest.raises(Exception): 129 | bucket_cors = s3_gate_bucket.get_bucket_cors(self.s3_client, bucket) 130 | 131 | with allure.step("Put bucket cors"): 132 | cors = { 133 | "CORSRules": [ 134 | { 135 | "AllowedOrigins": ["http://www.example.com"], 136 | "AllowedHeaders": ["*"], 137 | "AllowedMethods": ["PUT", "POST", "DELETE"], 138 | "MaxAgeSeconds": 3000, 139 | "ExposeHeaders": ["x-amz-server-side-encryption"], 140 | }, 141 | { 142 | "AllowedOrigins": ["*"], 143 | "AllowedHeaders": ["Authorization"], 144 | "AllowedMethods": ["GET"], 145 | "MaxAgeSeconds": 3000, 146 | }, 147 | ] 148 | } 149 | s3_gate_bucket.put_bucket_cors(self.s3_client, bucket, cors) 150 | bucket_cors = s3_gate_bucket.get_bucket_cors(self.s3_client, bucket) 151 | assert bucket_cors == cors.get( 152 | "CORSRules" 153 | ), f"Expected corsrules must be {cors.get('CORSRules')}" 154 | 155 | with allure.step("delete bucket cors"): 156 | s3_gate_bucket.delete_bucket_cors(self.s3_client, bucket) 157 | 158 | with pytest.raises(Exception): 159 | bucket_cors = s3_gate_bucket.get_bucket_cors(self.s3_client, bucket) 160 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/services/s3_gate/test_s3_tagging.py: -------------------------------------------------------------------------------- 1 | from random import choice 2 | from string import ascii_letters 3 | from typing import Tuple 4 | 5 | import allure 6 | import pytest 7 | 8 | from pytest_tests.helpers.file_helper import generate_file 9 | from pytest_tests.helpers.s3_helper import ( 10 | check_tags_by_bucket, 11 | check_tags_by_object, 12 | object_key_from_file_path, 13 | ) 14 | from pytest_tests.steps import s3_gate_bucket, s3_gate_object 15 | from pytest_tests.steps.s3_gate_base import TestS3GateBase 16 | 17 | 18 | def pytest_generate_tests(metafunc): 19 | if "s3_client" in metafunc.fixturenames: 20 | metafunc.parametrize("s3_client", ["aws cli", "boto3"], indirect=True) 21 | 22 | 23 | @pytest.mark.sanity 24 | @pytest.mark.s3_gate 25 | @pytest.mark.s3_gate_tagging 26 | class TestS3GateTagging(TestS3GateBase): 27 | @staticmethod 28 | def create_tags(count: int) -> Tuple[list, list]: 29 | tags = [] 30 | for _ in range(count): 31 | tag_key = "".join(choice(ascii_letters) for _ in range(8)) 32 | tag_value = "".join(choice(ascii_letters) for _ in range(12)) 33 | tags.append((tag_key, tag_value)) 34 | return tags 35 | 36 | @allure.title("Test S3: Object tagging") 37 | def test_s3_object_tagging(self, bucket, simple_object_size): 38 | file_path = generate_file(simple_object_size) 39 | file_name = object_key_from_file_path(file_path) 40 | 41 | with allure.step("Put with 3 tags object into bucket"): 42 | tag_1 = "Tag1=Value1" 43 | s3_gate_object.put_object_s3(self.s3_client, bucket, file_path, Tagging=tag_1) 44 | got_tags = s3_gate_object.get_object_tagging(self.s3_client, bucket, file_name) 45 | assert got_tags, f"Expected tags, got {got_tags}" 46 | assert got_tags == [{"Key": "Tag1", "Value": "Value1"}], "Tags must be the same" 47 | 48 | with allure.step("Put 10 new tags for object"): 49 | tags_2 = self.create_tags(10) 50 | s3_gate_object.put_object_tagging(self.s3_client, bucket, file_name, tags=tags_2) 51 | check_tags_by_object(self.s3_client, bucket, file_name, tags_2, [("Tag1", "Value1")]) 52 | 53 | with allure.step("Put 10 extra new tags for object"): 54 | tags_3 = self.create_tags(10) 55 | s3_gate_object.put_object_tagging(self.s3_client, bucket, file_name, tags=tags_3) 56 | check_tags_by_object(self.s3_client, bucket, file_name, tags_3, tags_2) 57 | 58 | with allure.step("Copy one object with tag"): 59 | copy_obj_path_1 = s3_gate_object.copy_object_s3( 60 | self.s3_client, bucket, file_name, tagging_directive="COPY" 61 | ) 62 | check_tags_by_object(self.s3_client, bucket, copy_obj_path_1, tags_3, tags_2) 63 | 64 | with allure.step("Put 11 new tags to object and expect an error"): 65 | tags_4 = self.create_tags(11) 66 | with pytest.raises(Exception, match=r".*Object tags cannot be greater than 10*"): 67 | # An error occurred (BadRequest) when calling the PutObjectTagging operation: Object tags cannot be greater than 10 68 | s3_gate_object.put_object_tagging(self.s3_client, bucket, file_name, tags=tags_4) 69 | 70 | with allure.step("Put empty tag"): 71 | tags_5 = [] 72 | s3_gate_object.put_object_tagging(self.s3_client, bucket, file_name, tags=tags_5) 73 | check_tags_by_object(self.s3_client, bucket, file_name, []) 74 | 75 | with allure.step("Put 10 object tags"): 76 | tags_6 = self.create_tags(10) 77 | s3_gate_object.put_object_tagging(self.s3_client, bucket, file_name, tags=tags_6) 78 | check_tags_by_object(self.s3_client, bucket, file_name, tags_6) 79 | 80 | with allure.step("Delete tags by delete-object-tagging"): 81 | s3_gate_object.delete_object_tagging(self.s3_client, bucket, file_name) 82 | check_tags_by_object(self.s3_client, bucket, file_name, []) 83 | 84 | @allure.title("Test S3: bucket tagging") 85 | def test_s3_bucket_tagging(self, bucket): 86 | 87 | with allure.step("Put 10 bucket tags"): 88 | tags_1 = self.create_tags(10) 89 | s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, tags_1) 90 | check_tags_by_bucket(self.s3_client, bucket, tags_1) 91 | 92 | with allure.step("Put new 10 bucket tags"): 93 | tags_2 = self.create_tags(10) 94 | s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, tags_2) 95 | check_tags_by_bucket(self.s3_client, bucket, tags_2, tags_1) 96 | 97 | with allure.step("Put 11 new tags to bucket and expect an error"): 98 | tags_3 = self.create_tags(11) 99 | with pytest.raises(Exception, match=r".*Object tags cannot be greater than 10.*"): 100 | # An error occurred (BadRequest) when calling the PutBucketTagging operation (reached max retries: 0): Object tags cannot be greater than 10 101 | s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, tags_3) 102 | 103 | with allure.step("Put empty tag"): 104 | tags_4 = [] 105 | s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, tags_4) 106 | check_tags_by_bucket(self.s3_client, bucket, tags_4) 107 | 108 | with allure.step("Put new 10 bucket tags"): 109 | tags_5 = self.create_tags(10) 110 | s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, tags_5) 111 | check_tags_by_bucket(self.s3_client, bucket, tags_5, tags_2) 112 | 113 | with allure.step("Delete tags by delete-bucket-tagging"): 114 | s3_gate_bucket.delete_bucket_tagging(self.s3_client, bucket) 115 | check_tags_by_bucket(self.s3_client, bucket, []) 116 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/services/s3_gate/test_s3_versioning.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import allure 4 | import pytest 5 | 6 | from pytest_tests.helpers.file_helper import generate_file, generate_file_with_content 7 | from pytest_tests.helpers.s3_helper import set_bucket_versioning 8 | from pytest_tests.steps import s3_gate_bucket, s3_gate_object 9 | from pytest_tests.steps.s3_gate_base import TestS3GateBase 10 | 11 | 12 | def pytest_generate_tests(metafunc): 13 | if "s3_client" in metafunc.fixturenames: 14 | metafunc.parametrize("s3_client", ["aws cli", "boto3"], indirect=True) 15 | 16 | 17 | @pytest.mark.sanity 18 | @pytest.mark.s3_gate 19 | @pytest.mark.s3_gate_versioning 20 | class TestS3GateVersioning(TestS3GateBase): 21 | @staticmethod 22 | def object_key_from_file_path(full_path: str) -> str: 23 | return os.path.basename(full_path) 24 | 25 | @allure.title("Test S3: try to disable versioning") 26 | def test_s3_version_off(self): 27 | 28 | bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, True) 29 | with pytest.raises(Exception): 30 | set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.SUSPENDED) 31 | 32 | @allure.title("Test S3: Enable and disable versioning") 33 | def test_s3_version(self, simple_object_size): 34 | file_path = generate_file(simple_object_size) 35 | file_name = self.object_key_from_file_path(file_path) 36 | bucket_objects = [file_name] 37 | bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, False) 38 | set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.SUSPENDED) 39 | 40 | with allure.step("Put object into bucket"): 41 | s3_gate_object.put_object_s3(self.s3_client, bucket, file_path) 42 | objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket) 43 | assert ( 44 | objects_list == bucket_objects 45 | ), f"Expected list with single objects in bucket, got {objects_list}" 46 | object_version = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket) 47 | actual_version = [ 48 | version.get("VersionId") 49 | for version in object_version 50 | if version.get("Key") == file_name 51 | ] 52 | assert actual_version == [ 53 | "null" 54 | ], f"Expected version is null in list-object-versions, got {object_version}" 55 | object_0 = s3_gate_object.head_object_s3(self.s3_client, bucket, file_name) 56 | assert ( 57 | object_0.get("VersionId") == "null" 58 | ), f"Expected version is null in head-object, got {object_0.get('VersionId')}" 59 | 60 | set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED) 61 | 62 | with allure.step("Put several versions of object into bucket"): 63 | version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_path) 64 | file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path) 65 | version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_1) 66 | 67 | with allure.step("Check bucket shows all versions"): 68 | versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket) 69 | obj_versions = [ 70 | version.get("VersionId") for version in versions if version.get("Key") == file_name 71 | ] 72 | assert ( 73 | obj_versions.sort() == [version_id_1, version_id_2, "null"].sort() 74 | ), f"Expected object has versions: {version_id_1, version_id_2, 'null'}" 75 | 76 | with allure.step("Get object"): 77 | object_1 = s3_gate_object.get_object_s3( 78 | self.s3_client, bucket, file_name, full_output=True 79 | ) 80 | assert ( 81 | object_1.get("VersionId") == version_id_2 82 | ), f"Get object with version {version_id_2}" 83 | 84 | with allure.step("Get first version of object"): 85 | object_2 = s3_gate_object.get_object_s3( 86 | self.s3_client, bucket, file_name, version_id_1, full_output=True 87 | ) 88 | assert ( 89 | object_2.get("VersionId") == version_id_1 90 | ), f"Get object with version {version_id_1}" 91 | 92 | with allure.step("Get second version of object"): 93 | object_3 = s3_gate_object.get_object_s3( 94 | self.s3_client, bucket, file_name, version_id_2, full_output=True 95 | ) 96 | assert ( 97 | object_3.get("VersionId") == version_id_2 98 | ), f"Get object with version {version_id_2}" 99 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/services/test_binaries.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from http import HTTPStatus 3 | from re import match 4 | 5 | import allure 6 | import pytest 7 | import requests 8 | from frostfs_testlib.hosting import Hosting 9 | 10 | from pytest_tests.helpers.binary_version import get_remote_binaries_versions 11 | from pytest_tests.helpers.env_properties import read_env_properties, save_env_properties 12 | from pytest_tests.resources.common import BIN_VERSIONS_FILE 13 | 14 | logger = logging.getLogger("NeoLogger") 15 | 16 | 17 | @allure.title("Check binaries versions") 18 | @pytest.mark.sanity 19 | @pytest.mark.check_binaries 20 | @pytest.mark.skip("Skipped due to https://j.yadro.com/browse/OBJECT-628") 21 | def test_binaries_versions(request, hosting: Hosting): 22 | """ 23 | Compare binaries versions from external source (url) and deployed on servers. 24 | """ 25 | if not BIN_VERSIONS_FILE: 26 | pytest.skip("File with binaries and versions was not provided") 27 | 28 | binaries_to_check = download_versions_info(BIN_VERSIONS_FILE) 29 | with allure.step("Get binaries versions from servers"): 30 | got_versions = get_remote_binaries_versions(hosting) 31 | 32 | env_properties = read_env_properties(request.config) 33 | 34 | # compare versions from servers and file 35 | failed_versions = {} 36 | additional_env_properties = {} 37 | for binary, version in binaries_to_check.items(): 38 | actual_version = got_versions.get(binary) 39 | if actual_version != version: 40 | failed_versions[binary] = f"Expected version {version}, found version {actual_version}" 41 | 42 | # If some binary was not listed in the env properties file, let's add it 43 | # so that we have full information about versions in allure report 44 | if env_properties and binary not in env_properties: 45 | additional_env_properties[binary] = actual_version 46 | 47 | if env_properties and additional_env_properties: 48 | save_env_properties(request.config, additional_env_properties) 49 | 50 | # create clear beautiful error with aggregation info 51 | if failed_versions: 52 | msg = "\n".join({f"{binary}: {error}" for binary, error in failed_versions.items()}) 53 | raise AssertionError(f"Found binaries with unexpected versions:\n{msg}") 54 | 55 | 56 | @allure.step("Download versions info from {url}") 57 | def download_versions_info(url: str) -> dict: 58 | binaries_to_version = {} 59 | 60 | response = requests.get(url) 61 | 62 | assert ( 63 | response.status_code == HTTPStatus.OK 64 | ), f"Got {response.status_code} code. Content {response.json()}" 65 | 66 | content = response.text 67 | assert content, f"Expected file with content, got {response}" 68 | 69 | for line in content.split("\n"): 70 | m = match("(.*)=(.*)", line) 71 | if not m: 72 | logger.warning(f"Could not get binary/version from {line}") 73 | continue 74 | bin_name, bin_version = m.group(1), m.group(2) 75 | binaries_to_version[bin_name] = bin_version 76 | 77 | return binaries_to_version 78 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/session_token/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from pytest_tests.helpers.wallet import WalletFactory, WalletFile 4 | 5 | 6 | @pytest.fixture(scope="module") 7 | def owner_wallet(wallet_factory: WalletFactory) -> WalletFile: 8 | """ 9 | Returns wallet which owns containers and objects 10 | """ 11 | return wallet_factory.create_wallet() 12 | 13 | 14 | @pytest.fixture(scope="module") 15 | def user_wallet(wallet_factory: WalletFactory) -> WalletFile: 16 | """ 17 | Returns wallet which will use objects from owner via static session 18 | """ 19 | return wallet_factory.create_wallet() 20 | 21 | 22 | @pytest.fixture(scope="module") 23 | def stranger_wallet(wallet_factory: WalletFactory) -> WalletFile: 24 | """ 25 | Returns stranger wallet which should fail to obtain data 26 | """ 27 | return wallet_factory.create_wallet() 28 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/session_token/test_object_session_token.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import allure 4 | import pytest 5 | from frostfs_testlib.resources.common import SESSION_NOT_FOUND 6 | from frostfs_testlib.utils import wallet_utils 7 | 8 | from pytest_tests.helpers.container import create_container 9 | from pytest_tests.helpers.file_helper import generate_file 10 | from pytest_tests.helpers.frostfs_verbs import delete_object, put_object, put_object_to_random_node 11 | from pytest_tests.resources.common import WALLET_PASS 12 | from pytest_tests.steps.cluster_test_base import ClusterTestBase 13 | from pytest_tests.steps.session_token import create_session_token 14 | 15 | 16 | @pytest.mark.sanity 17 | @pytest.mark.session_token 18 | class TestDynamicObjectSession(ClusterTestBase): 19 | @allure.title("Test Object Operations with Session Token") 20 | @pytest.mark.parametrize( 21 | "object_size", 22 | [pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")], 23 | ids=["simple object", "complex object"], 24 | ) 25 | def test_object_session_token(self, default_wallet, object_size): 26 | """ 27 | Test how operations over objects are executed with a session token 28 | 29 | Steps: 30 | 1. Create a private container 31 | 2. Obj operation requests to the node which IS NOT in the container but granted 32 | with a session token 33 | 3. Obj operation requests to the node which IS in the container and NOT granted 34 | with a session token 35 | 4. Obj operation requests to the node which IS NOT in the container and NOT granted 36 | with a session token 37 | """ 38 | 39 | with allure.step("Init wallet"): 40 | wallet = default_wallet 41 | address = wallet_utils.get_last_address_from_wallet(wallet, "") 42 | 43 | with allure.step("Nodes Settlements"): 44 | ( 45 | session_token_node, 46 | container_node, 47 | non_container_node, 48 | ) = random.sample(self.cluster.storage_nodes, 3) 49 | 50 | with allure.step("Create Session Token"): 51 | session_token = create_session_token( 52 | shell=self.shell, 53 | owner=address, 54 | wallet_path=wallet, 55 | wallet_password=WALLET_PASS, 56 | rpc_endpoint=session_token_node.get_rpc_endpoint(), 57 | ) 58 | 59 | with allure.step("Create Private Container"): 60 | un_locode = container_node.get_un_locode() 61 | locode = "SPB" if un_locode == "RU LED" else un_locode.split()[1] 62 | placement_policy = ( 63 | f"REP 1 IN LOC_{locode}_PLACE CBF 1 SELECT 1 FROM LOC_{locode} " 64 | f'AS LOC_{locode}_PLACE FILTER "UN-LOCODE" ' 65 | f'EQ "{un_locode}" AS LOC_{locode}' 66 | ) 67 | cid = create_container( 68 | wallet, 69 | shell=self.shell, 70 | endpoint=self.cluster.default_rpc_endpoint, 71 | rule=placement_policy, 72 | ) 73 | 74 | with allure.step("Put Objects"): 75 | file_path = generate_file(object_size) 76 | oid = put_object_to_random_node( 77 | wallet=wallet, 78 | path=file_path, 79 | cid=cid, 80 | shell=self.shell, 81 | cluster=self.cluster, 82 | ) 83 | oid_delete = put_object_to_random_node( 84 | wallet=wallet, 85 | path=file_path, 86 | cid=cid, 87 | shell=self.shell, 88 | cluster=self.cluster, 89 | ) 90 | 91 | with allure.step("Node not in container but granted a session token"): 92 | put_object( 93 | wallet=wallet, 94 | path=file_path, 95 | cid=cid, 96 | shell=self.shell, 97 | endpoint=session_token_node.get_rpc_endpoint(), 98 | session=session_token, 99 | ) 100 | delete_object( 101 | wallet=wallet, 102 | cid=cid, 103 | oid=oid_delete, 104 | shell=self.shell, 105 | endpoint=session_token_node.get_rpc_endpoint(), 106 | session=session_token, 107 | ) 108 | 109 | with allure.step("Node in container and not granted a session token"): 110 | with pytest.raises(Exception, match=SESSION_NOT_FOUND): 111 | put_object( 112 | wallet=wallet, 113 | path=file_path, 114 | cid=cid, 115 | shell=self.shell, 116 | endpoint=container_node.get_rpc_endpoint(), 117 | session=session_token, 118 | ) 119 | with pytest.raises(Exception, match=SESSION_NOT_FOUND): 120 | delete_object( 121 | wallet=wallet, 122 | cid=cid, 123 | oid=oid, 124 | shell=self.shell, 125 | endpoint=container_node.get_rpc_endpoint(), 126 | session=session_token, 127 | ) 128 | 129 | with allure.step("Node not in container and not granted a session token"): 130 | with pytest.raises(Exception, match=SESSION_NOT_FOUND): 131 | put_object( 132 | wallet=wallet, 133 | path=file_path, 134 | cid=cid, 135 | shell=self.shell, 136 | endpoint=non_container_node.get_rpc_endpoint(), 137 | session=session_token, 138 | ) 139 | with pytest.raises(Exception, match=SESSION_NOT_FOUND): 140 | delete_object( 141 | wallet=wallet, 142 | cid=cid, 143 | oid=oid, 144 | shell=self.shell, 145 | endpoint=non_container_node.get_rpc_endpoint(), 146 | session=session_token, 147 | ) 148 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/session_token/test_static_session_token_container.py: -------------------------------------------------------------------------------- 1 | import allure 2 | import pytest 3 | from frostfs_testlib.resources.common import PUBLIC_ACL 4 | from frostfs_testlib.shell import Shell 5 | 6 | from pytest_tests.helpers.acl import ( 7 | EACLAccess, 8 | EACLOperation, 9 | EACLRole, 10 | EACLRule, 11 | create_eacl, 12 | set_eacl, 13 | wait_for_cache_expired, 14 | ) 15 | from pytest_tests.helpers.container import ( 16 | create_container, 17 | delete_container, 18 | get_container, 19 | list_containers, 20 | ) 21 | from pytest_tests.helpers.file_helper import generate_file 22 | from pytest_tests.helpers.object_access import can_put_object 23 | from pytest_tests.helpers.wallet import WalletFile 24 | from pytest_tests.steps.cluster_test_base import ClusterTestBase 25 | from pytest_tests.steps.session_token import ContainerVerb, get_container_signed_token 26 | 27 | 28 | @pytest.mark.static_session_container 29 | class TestSessionTokenContainer(ClusterTestBase): 30 | @pytest.fixture(scope="module") 31 | def static_sessions( 32 | self, 33 | owner_wallet: WalletFile, 34 | user_wallet: WalletFile, 35 | client_shell: Shell, 36 | temp_directory: str, 37 | ) -> dict[ContainerVerb, str]: 38 | """ 39 | Returns dict with static session token file paths for all verbs with default lifetime 40 | """ 41 | return { 42 | verb: get_container_signed_token( 43 | owner_wallet, user_wallet, verb, client_shell, temp_directory 44 | ) 45 | for verb in ContainerVerb 46 | } 47 | 48 | def test_static_session_token_container_create( 49 | self, 50 | owner_wallet: WalletFile, 51 | user_wallet: WalletFile, 52 | static_sessions: dict[ContainerVerb, str], 53 | ): 54 | """ 55 | Validate static session with create operation 56 | """ 57 | with allure.step("Create container with static session token"): 58 | cid = create_container( 59 | user_wallet.path, 60 | session_token=static_sessions[ContainerVerb.CREATE], 61 | shell=self.shell, 62 | endpoint=self.cluster.default_rpc_endpoint, 63 | wait_for_creation=False, 64 | ) 65 | 66 | container_info: dict[str, str] = get_container( 67 | owner_wallet.path, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint 68 | ) 69 | assert container_info["ownerID"] == owner_wallet.get_address() 70 | 71 | assert cid not in list_containers( 72 | user_wallet.path, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint 73 | ) 74 | assert cid in list_containers( 75 | owner_wallet.path, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint 76 | ) 77 | 78 | def test_static_session_token_container_create_with_other_verb( 79 | self, 80 | user_wallet: WalletFile, 81 | static_sessions: dict[ContainerVerb, str], 82 | ): 83 | """ 84 | Validate static session without create operation 85 | """ 86 | with allure.step("Try create container with static session token without PUT rule"): 87 | for verb in [verb for verb in ContainerVerb if verb != ContainerVerb.CREATE]: 88 | with pytest.raises(Exception): 89 | create_container( 90 | user_wallet.path, 91 | session_token=static_sessions[verb], 92 | shell=self.shell, 93 | endpoint=self.cluster.default_rpc_endpoint, 94 | wait_for_creation=False, 95 | ) 96 | 97 | def test_static_session_token_container_create_with_other_wallet( 98 | self, 99 | stranger_wallet: WalletFile, 100 | static_sessions: dict[ContainerVerb, str], 101 | ): 102 | """ 103 | Validate static session with create operation for other wallet 104 | """ 105 | with allure.step("Try create container with static session token without PUT rule"): 106 | with pytest.raises(Exception): 107 | create_container( 108 | stranger_wallet.path, 109 | session_token=static_sessions[ContainerVerb.CREATE], 110 | shell=self.shell, 111 | endpoint=self.cluster.default_rpc_endpoint, 112 | wait_for_creation=False, 113 | ) 114 | 115 | def test_static_session_token_container_delete( 116 | self, 117 | owner_wallet: WalletFile, 118 | user_wallet: WalletFile, 119 | static_sessions: dict[ContainerVerb, str], 120 | ): 121 | """ 122 | Validate static session with delete operation 123 | """ 124 | with allure.step("Create container"): 125 | cid = create_container( 126 | owner_wallet.path, 127 | shell=self.shell, 128 | endpoint=self.cluster.default_rpc_endpoint, 129 | wait_for_creation=False, 130 | ) 131 | with allure.step("Delete container with static session token"): 132 | delete_container( 133 | wallet=user_wallet.path, 134 | cid=cid, 135 | session_token=static_sessions[ContainerVerb.DELETE], 136 | shell=self.shell, 137 | endpoint=self.cluster.default_rpc_endpoint, 138 | await_mode=True, 139 | ) 140 | 141 | assert cid not in list_containers( 142 | owner_wallet.path, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint 143 | ) 144 | 145 | def test_static_session_token_container_set_eacl( 146 | self, 147 | owner_wallet: WalletFile, 148 | user_wallet: WalletFile, 149 | stranger_wallet: WalletFile, 150 | static_sessions: dict[ContainerVerb, str], 151 | simple_object_size, 152 | ): 153 | """ 154 | Validate static session with set eacl operation 155 | """ 156 | with allure.step("Create container"): 157 | cid = create_container( 158 | owner_wallet.path, 159 | basic_acl=PUBLIC_ACL, 160 | shell=self.shell, 161 | endpoint=self.cluster.default_rpc_endpoint, 162 | ) 163 | file_path = generate_file(simple_object_size) 164 | assert can_put_object(stranger_wallet.path, cid, file_path, self.shell, self.cluster) 165 | 166 | with allure.step(f"Deny all operations for other via eACL"): 167 | eacl_deny = [ 168 | EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=op) 169 | for op in EACLOperation 170 | ] 171 | set_eacl( 172 | user_wallet.path, 173 | cid, 174 | create_eacl(cid, eacl_deny, shell=self.shell), 175 | shell=self.shell, 176 | endpoint=self.cluster.default_rpc_endpoint, 177 | session_token=static_sessions[ContainerVerb.SETEACL], 178 | ) 179 | wait_for_cache_expired() 180 | 181 | assert not can_put_object(stranger_wallet.path, cid, file_path, self.shell, self.cluster) 182 | -------------------------------------------------------------------------------- /pytest_tests/testsuites/shard/test_control_shard.py: -------------------------------------------------------------------------------- 1 | import json 2 | import pathlib 3 | import re 4 | from dataclasses import dataclass 5 | from io import StringIO 6 | 7 | import allure 8 | import pytest 9 | import yaml 10 | from configobj import ConfigObj 11 | from frostfs_testlib.cli import FrostfsCli 12 | 13 | from pytest_tests.helpers.cluster import Cluster, StorageNode 14 | from pytest_tests.resources.common import WALLET_CONFIG 15 | 16 | SHARD_PREFIX = "FROSTFS_STORAGE_SHARD_" 17 | BLOBSTOR_PREFIX = "_BLOBSTOR_" 18 | 19 | 20 | @dataclass 21 | class Blobstor: 22 | path: str 23 | path_type: str 24 | 25 | def __eq__(self, other) -> bool: 26 | if not isinstance(other, self.__class__): 27 | raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared") 28 | return self.path == other.path and self.path_type == other.path_type 29 | 30 | def __hash__(self): 31 | return hash((self.path, self.path_type)) 32 | 33 | @staticmethod 34 | def from_config_object(section: ConfigObj, shard_id: str, blobstor_id: str): 35 | var_prefix = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}{blobstor_id}" 36 | return Blobstor(section.get(f"{var_prefix}_PATH"), section.get(f"{var_prefix}_TYPE")) 37 | 38 | 39 | @dataclass 40 | class Shard: 41 | blobstor: list[Blobstor] 42 | metabase: str 43 | writecache: str 44 | 45 | def __eq__(self, other) -> bool: 46 | if not isinstance(other, self.__class__): 47 | raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared") 48 | return ( 49 | set(self.blobstor) == set(other.blobstor) 50 | and self.metabase == other.metabase 51 | and self.writecache == other.writecache 52 | ) 53 | 54 | def __hash__(self): 55 | return hash((self.metabase, self.writecache)) 56 | 57 | @staticmethod 58 | def _get_blobstor_count_from_section(config_object: ConfigObj, shard_id: int): 59 | pattern = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}" 60 | blobstors = {key[: len(pattern) + 2] for key in config_object.keys() if pattern in key} 61 | return len(blobstors) 62 | 63 | @staticmethod 64 | def from_config_object(config_object: ConfigObj, shard_id: int): 65 | var_prefix = f"{SHARD_PREFIX}{shard_id}" 66 | 67 | blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id) 68 | blobstors = [ 69 | Blobstor.from_config_object(config_object, shard_id, blobstor_id) 70 | for blobstor_id in range(blobstor_count) 71 | ] 72 | 73 | write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED") 74 | 75 | return Shard( 76 | blobstors, 77 | config_object.get(f"{var_prefix}_METABASE_PATH"), 78 | config_object.get(f"{var_prefix}_WRITECACHE_PATH") if write_cache_enabled else "", 79 | ) 80 | 81 | @staticmethod 82 | def from_object(shard): 83 | metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"] 84 | writecache = ( 85 | shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"] 86 | ) 87 | 88 | return Shard( 89 | blobstor=[ 90 | Blobstor(path=blobstor["path"], path_type=blobstor["type"]) 91 | for blobstor in shard["blobstor"] 92 | ], 93 | metabase=metabase, 94 | writecache=writecache, 95 | ) 96 | 97 | 98 | def shards_from_yaml(contents: str) -> list[Shard]: 99 | config = yaml.safe_load(contents) 100 | config["storage"]["shard"].pop("default") 101 | 102 | return [Shard.from_object(shard) for shard in config["storage"]["shard"].values()] 103 | 104 | 105 | def shards_from_env(contents: str) -> list[Shard]: 106 | configObj = ConfigObj(StringIO(contents)) 107 | 108 | pattern = f"{SHARD_PREFIX}\d*" 109 | num_shards = len(set(re.findall(pattern, contents))) 110 | 111 | return [Shard.from_config_object(configObj, shard_id) for shard_id in range(num_shards)] 112 | 113 | 114 | @pytest.mark.sanity 115 | @pytest.mark.shard 116 | class TestControlShard: 117 | @staticmethod 118 | def get_shards_from_config(node: StorageNode) -> list[Shard]: 119 | config_file = node.get_remote_config_path() 120 | file_type = pathlib.Path(config_file).suffix 121 | contents = node.host.get_shell().exec(f"cat {config_file}").stdout 122 | 123 | parser_method = { 124 | ".env": shards_from_env, 125 | ".yaml": shards_from_yaml, 126 | ".yml": shards_from_yaml, 127 | } 128 | 129 | shards = parser_method[file_type](contents) 130 | return shards 131 | 132 | @staticmethod 133 | def get_shards_from_cli(node: StorageNode) -> list[Shard]: 134 | wallet_path = node.get_remote_wallet_path() 135 | wallet_password = node.get_wallet_password() 136 | control_endpoint = node.get_control_endpoint() 137 | 138 | cli_config = node.host.get_cli_config("frostfs-cli") 139 | 140 | cli = FrostfsCli(node.host.get_shell(), cli_config.exec_path, WALLET_CONFIG) 141 | result = cli.shards.list( 142 | endpoint=control_endpoint, 143 | wallet=wallet_path, 144 | wallet_password=wallet_password, 145 | json_mode=True, 146 | ) 147 | return [Shard.from_object(shard) for shard in json.loads(result.stdout.split(">", 1)[1])] 148 | 149 | @allure.title("All shards are available") 150 | def test_control_shard(self, cluster: Cluster): 151 | for storage_node in cluster.storage_nodes: 152 | shards_from_config = self.get_shards_from_config(storage_node) 153 | shards_from_cli = self.get_shards_from_cli(storage_node) 154 | assert set(shards_from_config) == set(shards_from_cli) 155 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | allure-pytest==2.9.45 2 | allure-python-commons==2.9.45 3 | base58==2.1.0 4 | boto3==1.16.33 5 | botocore==1.19.33 6 | configobj==5.0.6 7 | frostfs-testlib==1.3.1 8 | neo-mamba==1.0.0 9 | pexpect==4.8.0 10 | pyyaml==6.0 11 | pytest==7.1.2 12 | pytest-lazy-fixture==0.6.3 13 | python-dateutil==2.8.2 14 | requests==2.28.0 15 | tenacity==8.0.1 16 | urllib3==1.26.9 -------------------------------------------------------------------------------- /requirements_dev.txt: -------------------------------------------------------------------------------- 1 | pre-commit==2.20.0 2 | isort==5.12.0 -------------------------------------------------------------------------------- /venv/local-pytest/environment.sh: -------------------------------------------------------------------------------- 1 | # DevEnv variables 2 | export NEOFS_MORPH_DISABLE_CACHE=true 3 | export DEVENV_PATH="${DEVENV_PATH:-${VIRTUAL_ENV}/../../frostfs-dev-env}" 4 | pushd $DEVENV_PATH > /dev/null 5 | export `make env` 6 | popd > /dev/null -------------------------------------------------------------------------------- /venv_template.mk: -------------------------------------------------------------------------------- 1 | define VENV_template 2 | venv.$(1): venv.$(1)/bin/activate venv.$(1)/bin/environment.sh 3 | 4 | venv.$(1)/bin/activate: 5 | @echo "Creating $(1) venv in $$@ from $$<" 6 | virtualenv --python=python3.9 --prompt="($(1))" venv.$(1) 7 | . venv.$(1)/bin/activate && \ 8 | pip3.9 install -U setuptools==56.0.0 && \ 9 | pip3.9 install -Ur requirements.txt 10 | @echo "Applying activate script patch" 11 | patch -R --dry-run -p1 -s -f -d venv.$(1)/bin/ < build_assets/activate.patch || \ 12 | patch -p1 -d venv.$(1)/bin/ < build_assets/activate.patch 13 | 14 | venv.$(1)/bin/environment.sh: | venv/$(1)/environment.sh 15 | ln -s ../../venv/$(1)/environment.sh venv.$(1)/bin/environment.sh 16 | 17 | endef 18 | --------------------------------------------------------------------------------