├── .env.example ├── .github └── workflows │ └── main.yml ├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── README.md ├── pyproject.toml ├── pytestomatio ├── __init__.py ├── connect │ ├── __init__.py │ ├── connector.py │ └── s3_connector.py ├── decor │ ├── __init__.py │ ├── decorator_updater.py │ ├── default.py │ └── pep8.py ├── main.py ├── testing │ ├── __init__.py │ ├── code_collector.py │ └── testItem.py ├── testomatio │ ├── __init__.py │ ├── filter_plugin.py │ ├── testRunConfig.py │ ├── testomat_item.py │ └── testomatio.py └── utils │ ├── __init__.py │ ├── helper.py │ ├── parser_setup.py │ └── validations.py ├── smoke.py └── tests ├── __init__.py ├── conftest.py ├── sub ├── __init__.py ├── sub_mob │ ├── __init__.py │ ├── sub_sub_class_test.py │ └── sub_sub_test.py ├── test_class_sub.py └── test_sub.py ├── test_class_root.py ├── test_cli_param_test_id.py ├── test_cli_params.py ├── test_decorators.py ├── test_parameters.py ├── test_root.py └── test_sync.py /.env.example: -------------------------------------------------------------------------------- 1 | TESTOMATIO_URL=https://beta.testomat.io 2 | TESTOMATIO= -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Publish Python distributions to PyPI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | build-and-publish: 14 | name: Build and publish Python distributions to PyPI 15 | runs-on: ubuntu-latest 16 | environment: 17 | name: DEV 18 | permissions: 19 | id-token: write 20 | steps: 21 | - name: Check out repository code 22 | uses: actions/checkout@v4 23 | - name: Setup Python 24 | uses: actions/setup-python@v5 25 | with: 26 | python-version: "3.x" 27 | - name: Install dependencies 28 | run: | 29 | python -m pip install --upgrade pip 30 | pip install build 31 | - name: Build package 32 | run: | 33 | python -m build 34 | - name: Publish package distributions to PyPI 35 | uses: pypa/gh-action-pypi-publish@release/v1 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .pytest_cache 2 | */**/__pycache__ 3 | .idea 4 | # file for test and debug 5 | temp_test.py 6 | build 7 | pytest*.egg-info 8 | dist 9 | .venv* 10 | pytest.ini 11 | .env -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## 2.10.0 (2024-12-30) 2 | 3 | ### Fix 4 | - support test parameters that comes from the fixtures 5 | - Fix shared runs 6 | - pytestomatio plugin usage with xdist, add tests, sync tests 7 | - Parallel run must be True all the time so that testomatio doesn't create new test runs when update test status 8 | - enforce artifacts to be returning inline when requested 9 | - add_artifacts depends on the pytest node 10 | - Fix uploading artifacts to the bucket with user defined path 11 | - read S3 creads from env acc to the testomatio docs 12 | 13 | ### Feat 14 | - upload artifacts in bulk 15 | - resolve content type for uploaded artifacts 16 | - support private and public artifact configuration 17 | - Support --test-id parameters that accepts testomatio test id to filter tests 18 | - send labels and tags on the test run update call 19 | - support HTTP_PROXY, HTTPS_PROXY 20 | 21 | ### Refactor 22 | - Smoke tests 23 | - Use system temp folder when resolving concurrent test run with xdist 24 | 25 | ## 2.8.1 (2024-08-14) 26 | 27 | ## 2.8.1rc2 (2024-08-12) 28 | 29 | ### Feat 30 | 31 | - Provide an option to not update build URL in a test run 32 | 33 | ### Fix 34 | 35 | - process test id that might start with @ 36 | 37 | ## 2.8.1rc1 (2024-08-11) 38 | 39 | ### Fix 40 | 41 | - Accept testid provided by testomatio before test starts 42 | 43 | ## 2.8.1rc0 (2024-08-11) 44 | 45 | ### Feat 46 | 47 | - Pytestomatio sends test executor url to Testomatio test run (on CI) 48 | - Allow user to asign label at test synchronisation 49 | - Allow filtering tests by ids when running test execution 50 | 51 | ### Fix 52 | 53 | - Handle pytest.skip() so that test status on testomatio is valid 54 | - fix dependencies and gitignore 55 | 56 | ## 2.6.1 (2024-08-05) 57 | 58 | ## 2.5.0 (2024-05-08) 59 | 60 | ## 2.2.0 (2024-04-03) 61 | 62 | ### Fix 63 | 64 | - fix regex 65 | - disable label and env parameters when updating test run due to 500 error from API 66 | - Fix dot and space in parameterised test, fix project dependency 67 | 68 | ## 2.3.1 (2024-03-13) 69 | 70 | ### Fix 71 | 72 | - Fix shared run reporting into new test run 73 | - Fix/workarround of the incorreclty processed parameterised test on API 74 | 75 | ## 2.3.0 (2024-03-11) 76 | 77 | ### Feat 78 | 79 | - Add TESTOMATIO_RUN to support test runs created on testomatio 80 | - Add https://www.conventionalcommits.org/en/v1.0.0/ support 81 | 82 | ### Fix 83 | 84 | - Fix to check testomatio session 85 | - Allow all pytest hooks execution when running sync command that run before pytest_runtestloop (actual tests) 86 | - Fix syncing local test with testomatio that are imported in a custom folder (on testomatio end) 87 | 88 | ## 2.1.0 (2024-03-07) 89 | 90 | ## 2.0.0 (2024-03-05) 91 | 92 | ## 1.7.0 (2024-02-26) 93 | 94 | ## 1.6.0 (2024-02-21) 95 | 96 | ## 1.5.0 (2024-02-12) 97 | 98 | ## 1.4.0 (2024-02-06) 99 | 100 | ## 1.3.0 (2023-12-06) 101 | 102 | ## 1.2.8 (2023-12-06) 103 | 104 | ## 1.2.5 (2023-10-21) 105 | 106 | ## 1.2.4 (2023-09-05) 107 | 108 | ## 1.2.3 (2023-09-03) 109 | 110 | ## 1.2.0 (2023-08-20) 111 | 112 | ## 1.1.1 (2023-08-17) 113 | 114 | ## 1.0.9 (2023-07-31) 115 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 pytest-analyzer 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Support Ukraine Badge](https://bit.ly/support-ukraine-now)](https://github.com/support-ukraine/support-ukraine) 2 | 3 | # testomat.io plugin for pytest 4 | 5 | ## uses testomat.io API: 6 | 7 | - https://testomatio.github.io/check-tests/ 8 | - https://testomatio.github.io/reporter/ 9 | 10 | ## Installation 11 | 12 | ```bash 13 | pip install pytestomatio 14 | ``` 15 | 16 | ## Usage 17 | 18 | Synchronize tests to testomat.io and get back test id. 19 | Tests will not be executed 20 | 21 | ```bash 22 | pytest --testomatio sync 23 | ``` 24 | 25 | Remove all test ids from source code. Tests will not be executed 26 | 27 | ```bash 28 | pytest --testomatio remove 29 | ``` 30 | 31 | Run pytest and send test results into testomat.io. 32 | Test can be sent to testomat.io without ids in your test code. If testomat.io failed to match tests by title, it will create 33 | new tests for the run 34 | 35 | ```bash 36 | pytest --testomatio report 37 | ``` 38 | 39 | Run pytest with debug parameter to get test data collected in metadata.json file 40 | 41 | ```bash 42 | pytest --testomatio debug 43 | ``` 44 | 45 | ## Configuration with environment variables 46 | You can use environment variable to control certain features of testomat.io 47 | 48 | | Env variable | What it does | Examples | 49 | |--------|--------|-------| 50 | | TESTOMATIO| Provides token for pytestomatio to access and push data to testomat.io | TESTOMATIO=tstmt_***** pytest --testomatio sync| 51 | |TESTOMATIO_RUN|Name of a test run to create on testomat.io|TESTOMATIO_RUN="Smoke test" pytest --testomatio report| 52 | |TESTOMATIO_RUN_ID| Id of existing test run to use for sending test results to | TESTOMATIO_RUN_ID=98dfas0 pytest --testomatio report | 53 | |TESTOMATIO_RUNGROUP_TITLE| Create a group (folder) for a test run | TESTOMATIO_RUNGROUP_TITLE="Release 2.0" pytest --testomatio report| 54 | |TESTOMATIO_ENV|Assign environment to a test run |TESTOMATIO_ENV="linux,chrome,1920x1080" pytest --testomatio report| 55 | |TESTOMATIO_LABELS|Assign labels to a test run |TESTOMATIO_ENV="smoke,regression" pytest --testomatio report| 56 | |TESTOMATIO_SYNC_LABELS|Assign labels to a test case when you synchronise test from code with testomat.io|TESTOMATIO_SYNC_LABELS="number:1,list:one,standalone" pytest --testomatio report| 57 | |TESTOMATIO_CODE_STYLE|If you are not sure, don't set this variable. Default value is 'default'|TESTOMATIO_CODE_STYLE=pep8 pytest --testomatio sync| 58 | |TESTOMATIO_CI_DOWNSTREAM|If set, pytestomatio will not set or update build url for a test run. This is useful in scenarios where build url is already set in the test run by Testomat.io for test runs that a created directly on Testomat.io.|TESTOMATIO_CI_DOWNSTREAM=true pytest --testomatio report| 59 | 60 | 61 | ### Run groups parameter 62 | There is environment variable `TESTOMATIO_RUNGROUP_TITLE` that can be used to specify run tests with specific group title. 63 | 64 | ### pytest.ini 65 | In case you are using private testomat.io service, create `pytest.ini` file in your project root directory. Specify 66 | testomat.io url in it 67 | 68 | ```ini 69 | [pytest] 70 | testomatio_url = https://app.testomat.io 71 | 72 | ``` 73 | 74 | ### Submitting Test Run Environment 75 | 76 | to configure test environment, you can use additional option: 77 | 78 | ```bash 79 | pytest --testomatio report --testRunEnv "windows11,chrome,1920x1080" 80 | ``` 81 | 82 | Environment values are comma separated, please use double quotation. 83 | 84 | 85 | ### Submitting Test Artifacts 86 | 87 | testomat.io does not store any screenshots, logs or other artifacts. 88 | 89 | In order to manage them it is advised to use S3 Buckets (GCP Storage). 90 | https://docs.testomat.io/usage/test-artifacts/ 91 | 92 | Analyser needs to be aware of the cloud storage credentials. 93 | There are two options: 94 | 1. Enable **Share credentials with testomat.io Reporter** option in testomat.io Settings -> Artifacts. 95 | 2. Use environment variables `ACCESS_KEY_ID, SECRET_ACCESS_KEY, ENDPOINT, BUCKET, BUCKET_PATH` 96 | 97 | You would need to decide when you want to upload your test artifacts to cloud storage 98 | 99 | 1) Upload page screenshot when test fails, using fixtures [reference](https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures) 100 | 101 | ```python 102 | # content of conftest.py 103 | import pytest 104 | import random 105 | import os 106 | from typing import Dict 107 | from pytest import StashKey, CollectReport 108 | from playwright.sync_api import Page 109 | 110 | phase_report_key = StashKey[Dict[str, CollectReport]]() 111 | 112 | @pytest.hookimpl(wrapper=True, tryfirst=True) 113 | def pytest_runtest_makereport(item, call): 114 | rep = yield 115 | item.stash.setdefault(phase_report_key, {})[rep.when] = rep 116 | return rep 117 | 118 | 119 | @pytest.fixture(scope="function") 120 | def handle_artifacts(page: Page, request): 121 | yield 122 | report = request.node.stash[phase_report_key] 123 | if ("call" not in report) or report["setup"].failed or report["call"].failed: 124 | random_string = ''.join(random.choices(string.ascii_letters + string.digits, k=8)) 125 | 126 | filename = f"{random_string}.png" 127 | screenshot_path = os.path.join(artifacts_dir, filename) 128 | page.screenshot(path=screenshot_path) 129 | # file_path - required, path to file to be uploaded 130 | # file_bytes - required, bytes of the file to be uploaded 131 | # key - required, file name in the s3 bucket 132 | # bucket_name - optional,name of the bucket to upload file to. Default value is taken from testomat.io 133 | artifact_url = pytest.testomatio.upload_file(screenshot_path, filename) 134 | # or 135 | # artifact_url = pytest.testomatio.upload_file_object(file_bytes, key, bucket_name) 136 | pytest.testomatio.add_artifacts(request.node, [artifact_url]) 137 | page.close() 138 | ``` 139 | 140 | ⚠️ Please take into account s3_connector available only after **pytest_collection_modifyitems()** hook is executed. 141 | 142 | 2) If you prefer to use pytest hooks - add `pytest_runtest_makereport` hook in your `conftest.py` file. 143 | 144 | ```python 145 | def pytest_runtest_makereport(item, call): 146 | artifact_url = pytest.testomatio.upload_file(screenshot_path, filename) 147 | pytest.testomatio.add_artifacts([artifact_url]) 148 | ``` 149 | 150 | ### Clarifications 151 | 152 | - tests can be synced even without `@patest.mark.testomatio('@T96c700e6')` decorator. 153 | - test title in testomat.io == test name in pytest 154 | - test suit title in testomat.io == test file name in pytest 155 | 156 | ## Example of test 157 | 158 | To make the experience more consistent, it uses standard pytest markers. 159 | testomat.io test id is a string value that starts with `@T` and has 8 symbols after. 160 | 161 | ```python 162 | from pytest import mark 163 | 164 | 165 | @mark.testomatio('@T96c700e6') 166 | def test_example(): 167 | assert 2 + 2 == 4 168 | ``` 169 | 170 | ### Compatibility table with [Testomatio check-tests](https://github.com/testomatio/check-tests) 171 | 172 | | Action | Compatibility | Method | 173 | |--------|--------|-------| 174 | | Report tests to testomat.io | complete | `pytest --testomatio report` | 175 | | Filter test by id to run | complete | `pytest --testomatio report --test-id="T00C73028\|T00C73029"` | 176 | | Importing test into testomat.io | complete | `pytest --testomatio sync` | 177 | | Exclude hook code of a test | N/A | N/A | 178 | | Include line number code of a test | N/A | N/A | 179 | | Import Parametrized Tests | complete | default behaviour | 180 | | Disable Detached Tests | complete | `pytest --testomatio sync --no-detach` | 181 | | Synchronous Import | complete | default behaviour | 182 | | Auto-assign Test IDs in Source Code | complete | default behaviour | 183 | | Keep Test IDs Between Projects | complete | `pytest --testomatio sync --create` | 184 | | Clean Test IDs | complete | `pytest --testomatio remove` | 185 | | Import Into a Branch | N/A | N/A | 186 | | Keep Structure of Source Code | complete | `pytest --testomatio sync --keep-structure` | 187 | | Delete Empty Suites | complete | `pytest --testomatio sync --no-empty` | 188 | | Import Into a Specific Folder | complete | `pytest --testomatio sync --directory "Windows\smoke"` | 189 | | Debugging | parity | `pytest --testomatio debug` | 190 | 191 | 192 | ## Test 193 | - import into empty project 194 | - updated test - (resync) 195 | - test run 196 | - test run into a folder 197 | - test run labels, tags 198 | 199 | ## TODO 200 | - retry test run update with less attributes, we get 500 from api 201 | - handler non configured s3 bucket error 202 | - Fix test duration 203 | 204 | ## Contribution 205 | Use python 3.12 206 | 207 | 1. `pip install ".[dev]"` 208 | 1. `python ./smoke.py` 209 | 1. Test things manually 210 | 1. Verify no regression bugs 211 | 1. `cz commit` 212 | 1. `cz bump` 213 | 1. `git push remoteName branchName --tags` -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=65.5.1", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [tool.setuptools.packages.find] 6 | exclude = [".github", "tests", "build", "dist", ".venv", "pytestomatio.egg-info", ".env", ".gitignore", "CHANGELOG.md"] 7 | 8 | [tool.commitizen] 9 | name = "cz_conventional_commits" 10 | tag_format = "$version" 11 | version_scheme = "pep440" 12 | version_provider = "pep621" 13 | update_changelog_on_bump = false 14 | [project] 15 | name = "pytestomatio" 16 | version = "2.10.0" 17 | 18 | dependencies = [ 19 | "requests>=2.29.0", 20 | "pytest>7.2.0", 21 | "boto3>=1.28.28", 22 | "libcst==1.1.0", 23 | "commitizen>=3.18.1", 24 | "autopep8>=2.1.0" 25 | ] 26 | 27 | authors = [ 28 | { name = "Oleksii Ostapov" }, 29 | { name = "TikoQA" }, 30 | ] 31 | description = "Pytest plugin to sync test with testomat.io" 32 | readme = "README.md" 33 | requires-python = ">=3.10" 34 | classifiers = [ 35 | "Framework :: Pytest", 36 | "Programming Language :: Python :: 3", 37 | "License :: OSI Approved :: MIT License", 38 | "Operating System :: OS Independent", 39 | ] 40 | 41 | [project.urls] 42 | "Testomat.io" = "https://testomat.io/" 43 | "Homepage" = "https://github.com/testomatio/pytestomatio" 44 | "Bug Tracker" = "https://github.com/testomatio/pytestomatio/issues" 45 | 46 | [project.entry-points.pytest11] 47 | pytestomatio = "pytestomatio.main" 48 | 49 | [project.optional-dependencies] 50 | dev = [ 51 | "pytest>=7.2.0", 52 | "pytest-testdox>=2.0.0", 53 | "pytest-xdist==3.6.1", 54 | "python-dotenv==1.0.1", 55 | "toml==0.10.2" 56 | ] 57 | 58 | [tool.pytest.ini_options] 59 | testpaths = ["tests"] 60 | markers = [ 61 | "smoke: indicates smoke tests" 62 | ] 63 | -------------------------------------------------------------------------------- /pytestomatio/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testomatio/pytestomatio/58db8a2d20b9e577513b80fd675d195b68797ddd/pytestomatio/__init__.py -------------------------------------------------------------------------------- /pytestomatio/connect/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testomatio/pytestomatio/58db8a2d20b9e577513b80fd675d195b68797ddd/pytestomatio/connect/__init__.py -------------------------------------------------------------------------------- /pytestomatio/connect/connector.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from requests.exceptions import HTTPError, ConnectionError 3 | import logging 4 | from os.path import join, normpath 5 | from os import getenv 6 | from pytestomatio.utils.helper import safe_string_list 7 | from pytestomatio.testing.testItem import TestItem 8 | import time 9 | 10 | log = logging.getLogger('pytestomatio') 11 | 12 | 13 | class Connector: 14 | def __init__(self, base_url: str = '', api_key: str = None): 15 | self.base_url = base_url 16 | self._session = requests.Session() 17 | self.jwt: str = '' 18 | self.api_key = api_key 19 | 20 | @property 21 | def session(self): 22 | """Get the session, creating it and applying proxy settings if necessary.""" 23 | self._apply_proxy_settings() 24 | return self._session 25 | 26 | @session.setter 27 | def session(self, value): 28 | """Allow setting a custom session, while still applying proxy settings.""" 29 | self._session = value 30 | self._apply_proxy_settings() 31 | 32 | def _apply_proxy_settings(self): 33 | """Apply proxy settings based on environment variables, fallback to no proxy if unavailable.""" 34 | http_proxy = getenv("HTTP_PROXY") 35 | log.debug(f"HTTP_PROXY: {http_proxy}") 36 | if http_proxy: 37 | self._session.proxies = {"http": http_proxy, "https": http_proxy} 38 | self._session.verify = False 39 | log.debug(f"Proxy settings applied: {self._session.proxies}") 40 | 41 | if not self._test_proxy_connection(timeout=1): 42 | log.debug("Proxy is unavailable. Falling back to a direct connection.") 43 | self._session.proxies.clear() 44 | self._session.verify = True 45 | else: 46 | log.debug("No proxy settings found. Using a direct connection.") 47 | self._session.proxies.clear() 48 | self._session.verify = True 49 | self._test_proxy_connection() 50 | 51 | def _test_proxy_connection(self, test_url="https://api.ipify.org?format=json", timeout=30, retry_interval=1): 52 | log.debug("Current session: %s", self._session.proxies) 53 | log.debug("Current verify: %s", self._session.verify) 54 | 55 | start_time = time.time() 56 | while time.time() - start_time < timeout: 57 | try: 58 | response = self._session.get(test_url, timeout=5) 59 | response.raise_for_status() 60 | log.debug("Internet connection is available.") 61 | return True 62 | except requests.exceptions.RequestException as e: 63 | log.error("Internet connection is unavailable. Error: %s", e) 64 | time.sleep(retry_interval) 65 | 66 | log.error("Internet connection check timed out after %d seconds.", timeout) 67 | return False 68 | 69 | def load_tests( 70 | self, 71 | tests: list[TestItem], 72 | no_empty: bool = False, 73 | no_detach: bool = False, 74 | structure: bool = False, 75 | create: bool = False, 76 | directory: str = None 77 | ): 78 | request = { 79 | "framework": "pytest", 80 | "language": "python", 81 | "noempty": no_empty, 82 | "no-detach": no_detach, 83 | "structure": structure if not no_empty else False, 84 | "create": create, 85 | "sync": True, 86 | "tests": [] 87 | } 88 | for test in tests: 89 | request['tests'].append({ 90 | "name": test.sync_title, 91 | "suites": [ 92 | test.class_name 93 | ], 94 | "code": test.source_code, 95 | "file": test.file_path if structure else ( 96 | test.file_name if directory is None else normpath(join(directory, test.file_name))), 97 | "labels": safe_string_list(getenv('TESTOMATIO_SYNC_LABELS')), 98 | }) 99 | 100 | try: 101 | response = self.session.post(f'{self.base_url}/api/load?api_key={self.api_key}', json=request) 102 | except ConnectionError as ce: 103 | log.error(f'Failed to connect to {self.base_url}: {ce}') 104 | return 105 | except HTTPError as he: 106 | log.error(f'HTTP error occurred while connecting to {self.base_url}: {he}') 107 | return 108 | except Exception as e: 109 | log.error(f'An unexpected exception occurred. Please report an issue: {e}') 110 | return 111 | 112 | if response.status_code < 400: 113 | log.info(f'Tests loaded to {self.base_url}') 114 | else: 115 | log.error(f'Failed to load tests to {self.base_url}. Status code: {response.status_code}') 116 | 117 | def get_tests(self, test_metadata: list[TestItem]) -> dict: 118 | # with safe_request('Failed to get test ids from testomat.io'): 119 | response = self.session.get(f'{self.base_url}/api/test_data?api_key={self.api_key}') 120 | return response.json() 121 | 122 | def create_test_run(self, title: str, group_title, env: str, label: str, shared_run: bool, parallel, ci_build_url: str) -> dict | None: 123 | request = { 124 | "api_key": self.api_key, 125 | "title": title, 126 | "group_title": group_title, 127 | "env": env, 128 | "label": label, 129 | "parallel": parallel, 130 | "ci_build_url": ci_build_url, 131 | "shared_run": shared_run 132 | } 133 | filtered_request = {k: v for k, v in request.items() if v is not None} 134 | try: 135 | response = self.session.post(f'{self.base_url}/api/reporter', json=filtered_request) 136 | except ConnectionError as ce: 137 | log.error(f'Failed to connect to {self.base_url}: {ce}') 138 | return 139 | except HTTPError as he: 140 | log.error(f'HTTP error occurred while connecting to {self.base_url}: {he}') 141 | return 142 | except Exception as e: 143 | log.error(f'An unexpected exception occurred. Please report an issue: {e}') 144 | return 145 | 146 | if response.status_code == 200: 147 | log.info(f'Test run created {response.json()["uid"]}') 148 | return response.json() 149 | 150 | def update_test_run(self, id: str, title: str, group_title, 151 | env: str, label: str, shared_run: bool, parallel, ci_build_url: str) -> dict | None: 152 | request = { 153 | "api_key": self.api_key, 154 | "title": title, 155 | "group_title": group_title, 156 | "env": env, 157 | "label": label, 158 | "parallel": parallel, 159 | "ci_build_url": ci_build_url, 160 | "shared_run": shared_run 161 | } 162 | filtered_request = {k: v for k, v in request.items() if v is not None} 163 | 164 | try: 165 | response = self.session.put(f'{self.base_url}/api/reporter/{id}', json=filtered_request) 166 | except ConnectionError as ce: 167 | log.error(f'Failed to connect to {self.base_url}: {ce}') 168 | return 169 | except HTTPError as he: 170 | log.error(f'HTTP error occurred while connecting to {self.base_url}: {he}') 171 | return 172 | except Exception as e: 173 | log.error(f'An unexpected exception occurred. Please report an issue: {e}') 174 | return 175 | 176 | if response.status_code == 200: 177 | log.info(f'Test run updated {response.json()["uid"]}') 178 | return response.json() 179 | 180 | def update_test_status(self, run_id: str, 181 | status: str, 182 | title: str, 183 | suite_title: str, 184 | suite_id: str, 185 | test_id: str, 186 | message: str, 187 | stack: str, 188 | run_time: float, 189 | artifacts: list[str], 190 | steps: str, 191 | code: str, 192 | example: dict) -> None: 193 | 194 | request = { 195 | "status": status, # Enum: "passed" "failed" "skipped" 196 | "title": title, 197 | "suite_title": suite_title, 198 | "suite_id": suite_id, 199 | "test_id": test_id, 200 | "message": message, 201 | "stack": stack, 202 | "run_time": run_time, 203 | "example": example, 204 | "artifacts": artifacts, 205 | "steps": steps, 206 | "code": code 207 | } 208 | filtered_request = {k: v for k, v in request.items() if v is not None} 209 | try: 210 | response = self.session.post(f'{self.base_url}/api/reporter/{run_id}/testrun?api_key={self.api_key}', 211 | json=filtered_request) 212 | except ConnectionError as ce: 213 | log.error(f'Failed to connect to {self.base_url}: {ce}') 214 | return 215 | except HTTPError as he: 216 | log.error(f'HTTP error occurred while connecting to {self.base_url}: {he}') 217 | return 218 | except Exception as e: 219 | log.error(f'An unexpected exception occurred. Please report an issue: {e}') 220 | return 221 | if response.status_code == 200: 222 | log.info('Test status updated') 223 | 224 | # TODO: I guess this class should be just an API client and used within testRun (testRunConfig) 225 | def finish_test_run(self, run_id: str, is_final=False) -> None: 226 | status_event = 'finish_parallel' if is_final else 'finish' 227 | try: 228 | self.session.put(f'{self.base_url}/api/reporter/{run_id}?api_key={self.api_key}', 229 | json={"status_event": status_event}) 230 | except ConnectionError as ce: 231 | log.error(f'Failed to connect to {self.base_url}: {ce}') 232 | return 233 | except HTTPError as he: 234 | log.error(f'HTTP error occurred while connecting to {self.base_url}: {he}') 235 | return 236 | except Exception as e: 237 | log.error(f'An unexpected exception occurred. Please report an issue: {e}') 238 | return 239 | 240 | def disconnect(self): 241 | self.session.close() 242 | -------------------------------------------------------------------------------- /pytestomatio/connect/s3_connector.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | import boto3 3 | import logging 4 | from io import BytesIO 5 | import mimetypes 6 | 7 | log = logging.getLogger(__name__) 8 | log.setLevel('INFO') 9 | 10 | 11 | def parse_endpoint(endpoint: str = None) -> Optional[str]: 12 | if endpoint.startswith('https://'): 13 | return endpoint[8:] 14 | elif endpoint.startswith('http://'): 15 | return endpoint[7:] 16 | return endpoint 17 | 18 | # TODO: review error handling. It should be save, and only create log entries without effecting test execution. 19 | class S3Connector: 20 | def __init__(self, 21 | aws_region_name: Optional[str], 22 | aws_access_key_id: Optional[str], 23 | aws_secret_access_key: Optional[str], 24 | endpoint: Optional[str], 25 | bucket_name: Optional[str], 26 | bucker_prefix: Optional[str], 27 | acl: Optional[str] = 'public-read' 28 | ): 29 | 30 | self.aws_region_name = aws_region_name 31 | self.endpoint = parse_endpoint(endpoint) 32 | self.bucket_name = bucket_name 33 | self.bucker_prefix = bucker_prefix 34 | self.client = None 35 | self._is_logged_in = False 36 | self.aws_access_key_id = aws_access_key_id 37 | self.aws_secret_access_key = aws_secret_access_key 38 | self.acl = acl 39 | 40 | def login(self): 41 | log.debug('creating s3 session') 42 | self.client = boto3.client( 43 | 's3', 44 | endpoint_url=f'https://{self.endpoint}', 45 | aws_access_key_id=self.aws_access_key_id, 46 | aws_secret_access_key=self.aws_secret_access_key, 47 | region_name=self.aws_region_name 48 | ) 49 | 50 | self._is_logged_in = True 51 | log.info('s3 session created') 52 | 53 | # TODO: upload files async 54 | def upload_files(self, file_list, bucket_name: str = None): 55 | links = [] 56 | for file_path, key in file_list: 57 | link = self.upload_file(file_path=file_path, key=key, bucket_name=bucket_name) 58 | links.append(link) 59 | return [link for link in links if link is not None] 60 | 61 | 62 | def upload_file(self, file_path: str, key: str = None, bucket_name: str = None) -> Optional[str]: 63 | if not self._is_logged_in: 64 | log.warning('s3 session is not created, creating new one') 65 | return 66 | if not key: 67 | key = file_path 68 | key = f"{self.bucker_prefix}/{key}" 69 | if not bucket_name: 70 | bucket_name = self.bucket_name 71 | 72 | content_type, _ = mimetypes.guess_type(key) 73 | if content_type is None: 74 | content_type = 'application/octet-stream' 75 | 76 | try: 77 | log.info(f'uploading artifact {file_path} to s3://{bucket_name}/{key}') 78 | self.client.upload_file( 79 | file_path, 80 | bucket_name, 81 | key, 82 | ExtraArgs={ 83 | 'ACL': self.acl, 84 | 'ContentType': content_type, 85 | 'ContentDisposition': 'inline' 86 | } 87 | ) 88 | log.info(f'artifact {file_path} uploaded to s3://{bucket_name}/{key}') 89 | return f'https://{bucket_name}.{self.endpoint}/{key}' 90 | except Exception as e: 91 | log.error(f'failed to upload file {file_path} to s3://{bucket_name}/{key}: {e}') 92 | 93 | def upload_file_object(self, file_bytes: bytes, key: str, bucket_name: str = None) -> Optional[str]: 94 | if not self._is_logged_in: 95 | log.warning('s3 session is not created, creating new one') 96 | return 97 | file = BytesIO(file_bytes) 98 | if not bucket_name: 99 | bucket_name = self.bucket_name 100 | key = f"{self.bucker_prefix}/{key}" 101 | 102 | content_type, _ = mimetypes.guess_type(key) 103 | if content_type is None: 104 | content_type = 'application/octet-stream' 105 | 106 | try: 107 | log.info(f'uploading artifact {key} to s3://{bucket_name}/{key}') 108 | self.client.upload_fileobj( 109 | file, 110 | bucket_name, 111 | key, 112 | ExtraArgs={ 113 | 'ACL': self.acl, 114 | 'ContentType': content_type, 115 | 'ContentDisposition': 'inline' 116 | } 117 | ) 118 | log.info(f'artifact {key} uploaded to s3://{bucket_name}/{key}') 119 | return f'https://{bucket_name}.{self.endpoint}/{key}' 120 | except Exception as e: 121 | log.error(f'failed to upload file {key} to s3://{bucket_name}/{key}: {e}') 122 | -------------------------------------------------------------------------------- /pytestomatio/decor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testomatio/pytestomatio/58db8a2d20b9e577513b80fd675d195b68797ddd/pytestomatio/decor/__init__.py -------------------------------------------------------------------------------- /pytestomatio/decor/decorator_updater.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pytestomatio.decor.pep8 import update_tests as update_tests_pep8 3 | from pytestomatio.decor.default import update_tests as update_tests_default 4 | 5 | 6 | def update_tests(file: str, 7 | mapped_tests: list[tuple[str, int]], 8 | all_tests: list[str], 9 | decorator_name: str, 10 | remove=False): 11 | code_style = os.getenv('TESTOMATIO_CODE_STYLE', 'default') 12 | if code_style == 'pep8': 13 | update_tests_pep8(file, mapped_tests, all_tests, decorator_name, remove) 14 | else: 15 | update_tests_default(file, mapped_tests, all_tests, decorator_name, remove) 16 | -------------------------------------------------------------------------------- /pytestomatio/decor/default.py: -------------------------------------------------------------------------------- 1 | import libcst as cst 2 | from typing import List, Tuple, Union 3 | 4 | 5 | class DecoratorUpdater(cst.CSTTransformer): 6 | def __init__(self, mapped_tests: List[Tuple[str, int]], all_tests: List[str], decorator_name: str): 7 | self.mapped_tests = mapped_tests 8 | self.all_tests = all_tests 9 | self.decorator_name = decorator_name 10 | 11 | def _get_id_by_title(self, title: str): 12 | for pair in self.mapped_tests: 13 | if pair[0] == title: 14 | return pair[1] 15 | 16 | def _remove_decorator(self, node: cst.FunctionDef) -> cst.FunctionDef: 17 | node.decorator_list = [decorator for decorator in node.decorator_list if 18 | not (isinstance(decorator, cst.Call) and decorator.func.attr == self.decorator_name)] 19 | return node 20 | 21 | def remove_decorators(self, tree: cst.Module) -> cst.Module: 22 | for node in cst.walk(tree): 23 | if isinstance(node, cst.FunctionDef): 24 | self.visit_FunctionDef(node, remove=True) 25 | return tree 26 | 27 | def leave_FunctionDef(self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef) -> cst.FunctionDef: 28 | if original_node.name.value in self.all_tests: 29 | test_id = self._get_id_by_title(original_node.name.value) 30 | if test_id is None: 31 | return updated_node 32 | 33 | deco_name = f'pytest.mark.{self.decorator_name}("{test_id}")' 34 | decorator = cst.Decorator(decorator=cst.parse_expression(deco_name)) 35 | 36 | # Check if the decorator already exists 37 | for existing_decorator in original_node.decorators: 38 | if isinstance(existing_decorator.decorator, cst.Call) and \ 39 | isinstance(existing_decorator.decorator.func, cst.Attribute) and \ 40 | existing_decorator.decorator.func.attr.value == self.decorator_name: 41 | # The decorator already exists, so we don't add it 42 | return updated_node 43 | 44 | # The decorator doesn't exist, so we add it 45 | return updated_node.with_changes(decorators=[decorator] + list(updated_node.decorators)) 46 | return updated_node 47 | 48 | 49 | class DecoratorRemover(cst.CSTTransformer): 50 | def __init__(self, decorator_name: str): 51 | self.decorator_name = decorator_name 52 | 53 | def leave_Decorator(self, original_node: cst.Decorator, updated_node: cst.Decorator) -> Union[ 54 | cst.Decorator, cst.RemovalSentinel]: 55 | if isinstance(original_node.decorator, cst.Call) and \ 56 | isinstance(original_node.decorator.func, cst.Attribute) and \ 57 | original_node.decorator.func.attr.value == self.decorator_name and \ 58 | isinstance(original_node.decorator.func.value, cst.Attribute) and \ 59 | original_node.decorator.func.value.attr.value == 'mark' and \ 60 | isinstance(original_node.decorator.func.value.value, cst.Name) and \ 61 | original_node.decorator.func.value.value.value == 'pytest': 62 | return cst.RemovalSentinel.REMOVE 63 | return updated_node 64 | 65 | 66 | def update_tests(file: str, 67 | mapped_tests: List[Tuple[str, int]], 68 | all_tests: List[str], 69 | decorator_name: str, 70 | remove=False): 71 | with open(file, 'r') as f: 72 | source_code = f.read() 73 | 74 | tree = cst.parse_module(source_code) 75 | transform = DecoratorUpdater(mapped_tests, all_tests, decorator_name) 76 | if remove: 77 | transform = DecoratorRemover(decorator_name) 78 | tree = tree.visit(transform) 79 | else: 80 | transform = DecoratorUpdater(mapped_tests, all_tests, decorator_name) 81 | tree = tree.visit(transform) 82 | updated_source_code = tree.code 83 | 84 | with open(file, "w") as file: 85 | file.write(updated_source_code) -------------------------------------------------------------------------------- /pytestomatio/decor/pep8.py: -------------------------------------------------------------------------------- 1 | import ast 2 | import autopep8 3 | 4 | pytest_mark = 'pytest', 'mark' 5 | 6 | 7 | class DecoratorUpdater(ast.NodeTransformer): 8 | def __init__(self, mapped_tests: list[tuple[str, int]], all_tests: list[str], decorator_name: str): 9 | self.mapped_tests = mapped_tests 10 | self.all_tests = all_tests 11 | self.decorator_name = decorator_name 12 | 13 | def _get_id_by_title(self, title: str): 14 | for pair in self.mapped_tests: 15 | if pair[0] == title: 16 | return pair[1] 17 | 18 | def _remove_decorator(self, node: ast.FunctionDef) -> ast.FunctionDef: 19 | node.decorator_list = [decorator for decorator in node.decorator_list if 20 | not (isinstance(decorator, ast.Call) and decorator.func.attr == self.decorator_name)] 21 | return node 22 | 23 | def remove_decorators(self, tree: ast.Module) -> ast.Module: 24 | for node in ast.walk(tree): 25 | if isinstance(node, ast.FunctionDef): 26 | self.visit_FunctionDef(node, remove=True) 27 | return tree 28 | 29 | def visit_FunctionDef(self, node: ast.FunctionDef, remove=False) -> ast.FunctionDef: 30 | if remove: 31 | return self._remove_decorator(node) 32 | else: 33 | if node.name in self.all_tests: 34 | if not any(isinstance(decorator, ast.Call) and 35 | decorator.func.attr == self.decorator_name 36 | for decorator in node.decorator_list): 37 | test_id = self._get_id_by_title(node.name) 38 | deco_name = f'mark.{self.decorator_name}(\'{test_id}\')' 39 | decorator = ast.Name(id=deco_name, ctx=ast.Load()) 40 | node.decorator_list = [decorator] + node.decorator_list 41 | return node 42 | 43 | def insert_pytest_mark_import(self, tree: ast.Module, module_name: str, decorator_name: str) -> None: 44 | # Check if the import statement already exists 45 | if not any( 46 | isinstance(node, ast.ImportFrom) and 47 | node.module == module_name and 48 | any(alias.name == decorator_name for alias in node.names) 49 | for node in tree.body 50 | ): 51 | import_node = ast.ImportFrom( 52 | module=module_name, 53 | names=[ast.alias(name=decorator_name, asname=None)], 54 | level=0 55 | ) 56 | tree.body.insert(0, import_node) 57 | 58 | 59 | def update_tests(file: str, 60 | mapped_tests: list[tuple[str, int]], 61 | all_tests: list[str], 62 | decorator_name: str, 63 | remove=False): 64 | with open(file, 'r') as f: 65 | source_code = f.read() 66 | 67 | tree = ast.parse(source_code) 68 | transform = DecoratorUpdater(mapped_tests, all_tests, decorator_name) 69 | if remove: 70 | transform.remove_decorators(tree) 71 | else: 72 | tree = transform.visit(tree) 73 | transform.insert_pytest_mark_import(tree, *pytest_mark) 74 | updated_source_code = ast.unparse(tree) 75 | 76 | pep8_source_code = autopep8.fix_code(updated_source_code) 77 | 78 | with open(file, "w") as file: 79 | file.write(pep8_source_code) -------------------------------------------------------------------------------- /pytestomatio/main.py: -------------------------------------------------------------------------------- 1 | import os, pytest, logging, json, time 2 | 3 | from pytest import Parser, Session, Config, Item, CallInfo 4 | from pytestomatio.connect.connector import Connector 5 | from pytestomatio.connect.s3_connector import S3Connector 6 | from pytestomatio.testing.testItem import TestItem 7 | from pytestomatio.decor.decorator_updater import update_tests 8 | 9 | from pytestomatio.utils.helper import add_and_enrich_tests, get_test_mapping, collect_tests, read_env_s3_keys 10 | from pytestomatio.utils.parser_setup import parser_options 11 | from pytestomatio.utils import validations 12 | 13 | from pytestomatio.testomatio.testRunConfig import TestRunConfig 14 | from pytestomatio.testomatio.testomatio import Testomatio 15 | from pytestomatio.testomatio.filter_plugin import TestomatioFilterPlugin 16 | 17 | log = logging.getLogger(__name__) 18 | log.setLevel('INFO') 19 | 20 | metadata_file = 'metadata.json' 21 | decorator_name = 'testomatio' 22 | testomatio = 'testomatio' 23 | TESTOMATIO_URL = 'https://app.testomat.io' 24 | 25 | 26 | def pytest_addoption(parser: Parser) -> None: 27 | parser_options(parser, testomatio) 28 | 29 | 30 | def pytest_collection(session): 31 | """Capture original collected items before any filters are applied.""" 32 | # This hook is called after initial test collection, before other filters. 33 | # We'll store the items in a session attribute for later use. 34 | session._pytestomatio_original_collected_items = [] 35 | 36 | 37 | def pytest_configure(config: Config): 38 | config.addinivalue_line( 39 | "markers", "testomatio(arg): built in marker to connect test case with testomat.io by unique id" 40 | ) 41 | 42 | option = validations.validate_option(config) 43 | if option == 'debug': 44 | return 45 | 46 | pytest.testomatio = Testomatio(TestRunConfig()) 47 | 48 | url = os.environ.get('TESTOMATIO_URL') or config.getini('testomatio_url') or TESTOMATIO_URL 49 | project = os.environ.get('TESTOMATIO') 50 | 51 | pytest.testomatio.connector = Connector(url, project) 52 | run_env = config.getoption('testRunEnv') 53 | if run_env: 54 | pytest.testomatio.test_run_config.set_env(run_env) 55 | 56 | if config.getoption(testomatio) and config.getoption(testomatio).lower() == 'report': 57 | run: TestRunConfig = pytest.testomatio.test_run_config 58 | 59 | # for xdist - main process 60 | if not hasattr(config, 'workerinput'): 61 | run_id = pytest.testomatio.test_run_config.test_run_id 62 | if not run_id: 63 | run_details = pytest.testomatio.connector.create_test_run(**run.to_dict()) 64 | if run_details: 65 | run_id = run_details.get('uid') 66 | run.save_run_id(run_id) 67 | else: 68 | log.error("Failed to create testrun on Testomat.io") 69 | 70 | # Mark our pytest_collection_modifyitems hook to run last, 71 | # so that it sees the effect of all built-in and other filters first. 72 | # This ensures we only apply our OR logic after other filters have done their job. 73 | config.pluginmanager.register(TestomatioFilterPlugin(), "testomatio_filter_plugin") 74 | 75 | @pytest.hookimpl(tryfirst=True) 76 | def pytest_collection_modifyitems(session: Session, config: Config, items: list[Item]) -> None: 77 | if config.getoption(testomatio) is None: 78 | return 79 | 80 | # Store a copy of all initially collected items (the first time this hook runs) 81 | # The first call to this hook happens before built-in filters like -k, -m fully apply. 82 | # By the time this runs, items might still be unfiltered or only partially filtered. 83 | # To ensure we get the full original list, we use pytest_collection hook above. 84 | if not session._pytestomatio_original_collected_items: 85 | # The initial call here gives us the full collected list of tests 86 | session._pytestomatio_original_collected_items = items[:] 87 | 88 | # At this point, if other plugins or internal filters like -m and -k run, 89 | # they may modify `items` (removing some tests). We run after them by using a hook wrapper 90 | # or a trylast marker to ensure our logic runs after most filters. 91 | 92 | meta, test_files, test_names = collect_tests(items) 93 | match config.getoption(testomatio): 94 | case 'sync': 95 | pytest.testomatio.connector.load_tests( 96 | meta, 97 | no_empty=config.getoption('no_empty'), 98 | no_detach=config.getoption('no_detach'), 99 | structure=config.getoption('keep_structure'), 100 | create=config.getoption('create'), 101 | directory=config.getoption('directory') 102 | ) 103 | testomatio_tests = pytest.testomatio.connector.get_tests(meta) 104 | add_and_enrich_tests(meta, test_files, test_names, testomatio_tests, decorator_name) 105 | pytest.exit('Sync completed without test execution') 106 | case 'remove': 107 | mapping = get_test_mapping(meta) 108 | for test_file in test_files: 109 | update_tests(test_file, mapping, test_names, decorator_name, remove=True) 110 | pytest.exit('Sync completed without test execution') 111 | case 'report': 112 | # for xdist workers - get run id from the main process 113 | run: TestRunConfig = pytest.testomatio.test_run_config 114 | run.get_run_id() 115 | 116 | # send update without status just to get artifact details from the server 117 | run_details = pytest.testomatio.connector.update_test_run(**run.to_dict()) 118 | 119 | if run_details is None: 120 | log.error('Test run failed to create. Reporting skipped') 121 | return 122 | 123 | s3_details = read_env_s3_keys(run_details) 124 | 125 | if all(s3_details): 126 | pytest.testomatio.s3_connector = S3Connector(*s3_details) 127 | pytest.testomatio.s3_connector.login() 128 | 129 | case 'debug': 130 | with open(metadata_file, 'w') as file: 131 | data = json.dumps([i.to_dict() for i in meta], indent=4) 132 | file.write(data) 133 | pytest.exit('Debug file created. Exiting...') 134 | case _: 135 | raise Exception('Unknown pytestomatio parameter. Use one of: add, remove, sync, debug') 136 | 137 | def pytest_runtest_makereport(item: Item, call: CallInfo): 138 | pytest.testomatio_config_option = item.config.getoption(testomatio) 139 | if pytest.testomatio_config_option is None or pytest.testomatio_config_option != 'report': 140 | return 141 | elif not pytest.testomatio.test_run_config.test_run_id: 142 | return 143 | 144 | test_item = TestItem(item) 145 | if test_item.id is None: 146 | test_id = None 147 | else: 148 | test_id = test_item.id if not test_item.id.startswith("@T") else test_item.id[2:] 149 | 150 | request = { 151 | 'status': None, 152 | 'title': test_item.exec_title, 153 | 'run_time': call.duration, 154 | 'suite_title': test_item.file_name, 155 | 'suite_id': None, 156 | 'test_id': test_id, 157 | 'message': None, 158 | 'stack': None, 159 | 'example': None, 160 | 'artifacts': test_item.artifacts, 161 | 'steps': None, 162 | 'code': None, 163 | } 164 | 165 | # TODO: refactor it and use TestItem setter to upate those attributes 166 | if call.when in ['setup', 'call']: 167 | if call.excinfo is not None: 168 | if call.excinfo.typename == 'Skipped': 169 | request['status'] = 'skipped' 170 | request['message'] = str(call.excinfo.value) 171 | else: 172 | request['message'] = str(call.excinfo.value) 173 | request['stack'] = '\n'.join((str(tb) for tb in call.excinfo.traceback)) 174 | request['status'] = 'failed' 175 | else: 176 | request['status'] = 'passed' if call.when == 'call' else request['status'] 177 | 178 | if hasattr(item, 'callspec'): 179 | request['example'] = test_item.safe_params(item.callspec.params) 180 | 181 | if item.nodeid not in pytest.testomatio.test_run_config.status_request: 182 | pytest.testomatio.test_run_config.status_request[item.nodeid] = request 183 | else: 184 | for key, value in request.items(): 185 | if key == 'title' and call.when == 'teardown': 186 | continue 187 | if value is not None: 188 | pytest.testomatio.test_run_config.status_request[item.nodeid][key] = value 189 | 190 | 191 | def pytest_runtest_logfinish(nodeid, location): 192 | if not hasattr(pytest, 'testomatio_config_option'): 193 | return 194 | if pytest.testomatio_config_option is None or pytest.testomatio_config_option != 'report': 195 | return 196 | elif not pytest.testomatio.test_run_config.test_run_id: 197 | return 198 | 199 | for nodeid, request in pytest.testomatio.test_run_config.status_request.items(): 200 | if request['status']: 201 | pytest.testomatio.connector.update_test_status(run_id=pytest.testomatio.test_run_config.test_run_id, 202 | **request) 203 | pytest.testomatio.test_run_config.status_request = {} 204 | 205 | 206 | def pytest_unconfigure(config: Config): 207 | if not hasattr(pytest, 'testomatio'): 208 | return 209 | 210 | run: TestRunConfig = pytest.testomatio.test_run_config 211 | # for xdist - main process 212 | if not hasattr(config, 'workerinput'): 213 | time.sleep(1) 214 | pytest.testomatio.connector.finish_test_run(run.test_run_id, True) 215 | run.clear_run_id() 216 | 217 | # for xdist - worker process 218 | else: 219 | pytest.testomatio.connector.finish_test_run(run.test_run_id, False) 220 | -------------------------------------------------------------------------------- /pytestomatio/testing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testomatio/pytestomatio/58db8a2d20b9e577513b80fd675d195b68797ddd/pytestomatio/testing/__init__.py -------------------------------------------------------------------------------- /pytestomatio/testing/code_collector.py: -------------------------------------------------------------------------------- 1 | import importlib.util 2 | import inspect 3 | 4 | 5 | def get_functions_source_by_name(abs_file_path: str, all_tests: list[str]): 6 | spec = importlib.util.spec_from_file_location('name', abs_file_path) 7 | module = importlib.util.module_from_spec(spec) 8 | spec.loader.exec_module(module) 9 | functions = inspect.getmembers(module, inspect.isfunction) 10 | classes = inspect.getmembers(module, inspect.isclass) 11 | for class_name, cls in classes: 12 | functions += inspect.getmembers(cls, inspect.isfunction) 13 | for function_name, function in functions: 14 | if function_name in all_tests: 15 | yield function_name, inspect.getsource(function) 16 | -------------------------------------------------------------------------------- /pytestomatio/testing/testItem.py: -------------------------------------------------------------------------------- 1 | from re import sub 2 | from typing import Iterable 3 | import uuid 4 | import json 5 | from pytest import Item 6 | import inspect 7 | 8 | MARKER = 'testomatio' 9 | class TestItem: 10 | def __init__(self, item: Item): 11 | self.uid = uuid.uuid4() 12 | self.id: str = TestItem.get_test_id(item) 13 | self.title = self._get_pytest_title(item.name) 14 | self.sync_title = self._get_sync_test_title(item) 15 | self.resync_title = self._get_resync_test_title(item) 16 | self.exec_title = self._get_execution_test_title(item) 17 | self.parameters = self._get_test_parameter_key(item) 18 | self.file_name = item.path.name 19 | self.abs_path = str(item.path) 20 | self.file_path = item.location[0] 21 | self.module = item.module.__name__ 22 | self.source_code = inspect.getsource(item.function) 23 | self.class_name = item.cls.__name__ if item.cls else None 24 | self.artifacts = item.stash.get("artifact_urls", []) 25 | 26 | def to_dict(self) -> dict: 27 | result = dict() 28 | result['uid'] = str(self.uid) 29 | result['id'] = self.id 30 | result['title'] = self.title 31 | result['fileName'] = self.file_name 32 | result['absolutePath'] = self.abs_path 33 | result['filePath'] = self.file_path 34 | result['module'] = self.module 35 | result['className'] = self.class_name 36 | result['sourceCode'] = self.source_code 37 | result['artifacts'] = self.artifacts 38 | return result 39 | 40 | def json(self) -> str: 41 | return json.dumps(self.to_dict(), indent=4) 42 | 43 | @staticmethod 44 | def get_test_id(item: Item) -> str | None: 45 | for marker in item.iter_markers(MARKER): 46 | if marker.args: 47 | return marker.args[0] 48 | 49 | def __str__(self) -> str: 50 | return f'TestItem: {self.id} - {self.title} - {self.file_path}' 51 | 52 | def __repr__(self): 53 | return f'TestItem: {self.id} - {self.title} - {self.file_path}' 54 | 55 | def _get_pytest_title(self, name: str) -> str: 56 | point = name.find('[') 57 | if point > -1: 58 | return name[0:point] 59 | return name 60 | 61 | # Testomatio resolves test id on BE by parsing test name to find test id 62 | def _get_sync_test_title(self, item: Item) -> str: 63 | test_name = self.pytest_title_to_testomatio_title(item.name) 64 | test_name = self._resolve_parameter_key_in_test_name(item, test_name) 65 | # Test id is present on already synced tests 66 | # New test don't have testomatio test id. 67 | test_id = TestItem.get_test_id(item) 68 | if (test_id): 69 | test_name = f'{test_name} {test_id}' 70 | # ex. "User adds item to cart" 71 | # ex. "User adds item to cart @T1234" 72 | # ex. "User adds ${item} to cart @T1234" 73 | # ex. "User adds ${variation} ${item} to cart @T1234" 74 | return test_name 75 | 76 | # Fix such example @pytest.mark.parametrize("version", "1.0.0"), ref. https://github.com/testomatio/check-tests/issues/147 77 | # that doesn't parse value correctly 78 | def _get_execution_test_title(self, item: Item) -> str: 79 | test_name = self.pytest_title_to_testomatio_title(item.name) 80 | test_name = self._resolve_parameter_value_in_test_name(item, test_name) 81 | # ex. "User adds item to cart" 82 | # ex. "User adds item to cart @T1234" 83 | # ex. "User adds phone to cart @T1234" 84 | # ex. "User adds green phone to cart @T1234" 85 | return test_name 86 | 87 | def pytest_title_to_testomatio_title(self, test_name: str) -> str: 88 | return test_name.lower().replace('_', ' ').replace("test", "", 1).strip().capitalize() 89 | 90 | def _get_resync_test_title(self, name: str) -> str: 91 | name = self._get_sync_test_title(name) 92 | tag_at = name.find("@T") 93 | if tag_at > 0: 94 | return name[0:tag_at].strip() 95 | else: 96 | return name 97 | 98 | def _get_test_parameter_key(self, item: Item): 99 | """Return a list of parameter names for a given test item.""" 100 | param_names = set() 101 | 102 | # 1) Look for @pytest.mark.parametrize 103 | for mark in item.iter_markers('parametrize'): 104 | # mark.args[0] is often a string like "param1,param2" 105 | # or just "param1" if there's only one. 106 | if len(mark.args) > 0 and isinstance(mark.args[0], str): 107 | arg_string = mark.args[0] 108 | # If the string has commas, split it into multiple names 109 | if ',' in arg_string: 110 | param_names.update(name.strip() for name in arg_string.split(',')) 111 | else: 112 | param_names.add(arg_string.strip()) 113 | 114 | # 2) Look for fixture parameterization (including dynamically generated) 115 | # via callspec, which holds *all* final parameters for an item. 116 | callspec = getattr(item, 'callspec', None) 117 | if callspec: 118 | # callspec.params is a dict: fixture_name -> parameter_value 119 | # We only want fixture names, not the values. 120 | param_names.update(callspec.params.keys()) 121 | 122 | # Return them as a list, or keep it as a set—whatever you prefer. 123 | return list(param_names) 124 | 125 | 126 | def _resolve_parameter_key_in_test_name(self, item: Item, test_name: str) -> str: 127 | test_params = self._get_test_parameter_key(item) 128 | if not test_params: 129 | return test_name 130 | # Remove parameters from test name 131 | parameter_at = test_name.find('[') 132 | if parameter_at > -1: 133 | test_name = test_name[0:parameter_at] 134 | # Add parameters to test name 135 | test_name = test_name + " " + " ".join([f"${{{param}}}" for param in test_params]) 136 | return test_name 137 | 138 | def _resolve_parameter_value_in_test_name(self, item: Item, test_name: str) -> str: 139 | param_keys = self._get_test_parameter_key(item) 140 | sync_title = self._get_sync_test_title(item) 141 | 142 | if not param_keys: 143 | return test_name 144 | if not item.callspec: 145 | return test_name 146 | 147 | pattern = r'\$\{(.*?)\}' 148 | 149 | def repl(match): 150 | key = match.group(1) 151 | value = item.callspec.params.get(key, '') 152 | 153 | string_value = self._to_string_value(value) 154 | # TODO: handle "value with space" on testomatio BE https://github.com/testomatio/check-tests/issues/147 155 | return sub(r"[\.\s]", "_", string_value) # Temporary fix for spaces in parameter values 156 | 157 | test_name = sub(pattern, repl, sync_title) 158 | return test_name 159 | 160 | def _to_string_value(self, value): 161 | if callable(value): 162 | return value.__name__ if hasattr(value, "__name__") else "anonymous_function" 163 | elif isinstance(value, bytes): 164 | return value.decode('utf-8') 165 | elif isinstance(value, (str, int, float, bool)) or value is None: 166 | return str(value) 167 | else: 168 | return str(value) # Fallback to a string representation 169 | 170 | # TODO: leverage as an attribute setter 171 | def safe_params(self, params): 172 | return {key: self._to_string_value(value) for key, value in params.items()} 173 | 174 | -------------------------------------------------------------------------------- /pytestomatio/testomatio/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testomatio/pytestomatio/58db8a2d20b9e577513b80fd675d195b68797ddd/pytestomatio/testomatio/__init__.py -------------------------------------------------------------------------------- /pytestomatio/testomatio/filter_plugin.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | class TestomatioFilterPlugin: 4 | @pytest.hookimpl(trylast=True) 5 | def pytest_collection_modifyitems(self, session, config, items): 6 | # By now all other filters (like -m, -k, name-based) have been applied 7 | # and `items` is the filtered set after all their conditions. 8 | test_ids_str = config.getoption("test_id") 9 | if not test_ids_str: 10 | # No custom IDs specified, nothing to do 11 | return 12 | 13 | test_ids = test_ids_str.split("|") 14 | # Remove "@" from the start of test IDs if present 15 | test_ids = [test_id.lstrip("@T") for test_id in test_ids] 16 | if not test_ids: 17 | return 18 | 19 | # Now let's find all tests that match these test IDs from the original full list. 20 | # We use the originally collected tests to avoid losing tests filtered out by others. 21 | original_items = session._pytestomatio_original_collected_items 22 | testomatio_matched = [] 23 | 24 | for item in original_items: 25 | # Check for testomatio marker 26 | for marker in item.iter_markers(name="testomatio"): 27 | 28 | marker_id = marker.args[0].lstrip("@T") # Strip "@" from the marker argument 29 | if marker_id in test_ids: 30 | testomatio_matched.append(item) 31 | break 32 | 33 | # We'll check common filters: -k, -m and a few others. 34 | # If they are empty or None, they are not active. 35 | 36 | other_filters_active = bool( 37 | config.option.keyword or # -k 38 | config.option.markexpr or # -m 39 | getattr(config.option, 'last_failed', False) or 40 | getattr(config.option, 'ff', False) or 41 | getattr(config.option, 'lf', False) or 42 | False 43 | ) 44 | 45 | if other_filters_active and "not" in config.option.keyword: 46 | # If a "not" keyword filter exist - it means we have exclusion filter applied. 47 | # In such scenario we respect the exclusion filters in a way 48 | # that we accept tests with requested test ids as long as such tests do not fall into exclusion filter 49 | items[:] = [item for item in testomatio_matched if item in items] 50 | return 51 | 52 | if other_filters_active: 53 | # If other filters are applied, use OR logic: 54 | # the final set is all items that passed previous filters plus those matched by test-ids 55 | # preserving original order of test 56 | items[:] = items + [item for item in testomatio_matched if item not in items] 57 | return 58 | 59 | # If no other filters are applied, test-ids filter acts as an exclusive filter: 60 | # only run tests that match the given test IDs 61 | items[:] = testomatio_matched 62 | -------------------------------------------------------------------------------- /pytestomatio/testomatio/testRunConfig.py: -------------------------------------------------------------------------------- 1 | import os 2 | import datetime as dt 3 | import tempfile 4 | from pytestomatio.utils.helper import safe_string_list 5 | from typing import Optional 6 | 7 | TESTOMATIO_TEST_RUN_LOCK_FILE = ".testomatio_test_run_id_lock" 8 | 9 | class TestRunConfig: 10 | def __init__(self): 11 | run_id = os.environ.get('TESTOMATIO_RUN_ID') or os.environ.get('TESTOMATIO_RUN') 12 | title = os.environ.get('TESTOMATIO_TITLE') if os.environ.get('TESTOMATIO_TITLE') else 'test run at ' + dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S") 13 | shared_run = os.environ.get('TESTOMATIO_SHARED_RUN') in ['True', 'true', '1'] 14 | self.test_run_id = run_id 15 | self.title = title 16 | self.environment = safe_string_list(os.environ.get('TESTOMATIO_ENV')) 17 | self.label = safe_string_list(os.environ.get('TESTOMATIO_LABEL')) 18 | self.group_title = os.environ.get('TESTOMATIO_RUNGROUP_TITLE') 19 | # This allows to report tests to the test run by it's id. https://docs.testomat.io/getting-started/running-automated-tests/#reporting-parallel-tests 20 | self.parallel = False if shared_run else True 21 | # This allows using test run title to group tests under a single test run. This is needed when running tests in different processes or servers. 22 | self.shared_run = shared_run 23 | self.status_request = {} 24 | self.build_url = self.resolve_build_url() 25 | 26 | def to_dict(self) -> dict: 27 | result = dict() 28 | if self.test_run_id: 29 | result['id'] = self.test_run_id 30 | result['title'] = self.title 31 | result['group_title'] = self.group_title 32 | result['env'] = self.environment 33 | result['label'] = self.label 34 | result['parallel'] = self.parallel 35 | result['shared_run'] = self.shared_run 36 | result['ci_build_url'] = self.build_url 37 | return result 38 | 39 | def set_env(self, env: str) -> None: 40 | self.environment = safe_string_list(env) 41 | 42 | def save_run_id(self, run_id: str) -> None: 43 | self.test_run_id = run_id 44 | temp_dir = tempfile.gettempdir() 45 | temp_file_path = os.path.join(temp_dir, TESTOMATIO_TEST_RUN_LOCK_FILE) 46 | with open(temp_file_path, 'w') as f: 47 | f.write(run_id) 48 | 49 | 50 | def get_run_id(self) -> Optional[str]: 51 | if self.test_run_id: 52 | return self.test_run_id 53 | temp_dir = tempfile.gettempdir() 54 | temp_file_path = os.path.join(temp_dir, TESTOMATIO_TEST_RUN_LOCK_FILE) 55 | if os.path.exists(temp_file_path): 56 | with open(temp_file_path, 'r') as f: 57 | self.test_run_id = f.read() 58 | return self.test_run_id 59 | return None 60 | 61 | def clear_run_id(self) -> None: 62 | temp_dir = tempfile.gettempdir() 63 | temp_file_path = os.path.join(temp_dir, TESTOMATIO_TEST_RUN_LOCK_FILE) 64 | if os.path.exists(temp_file_path): 65 | os.remove(temp_file_path) 66 | 67 | def resolve_build_url(self) -> Optional[str]: 68 | # You might not always want the build URL to change in the Testomat.io test run 69 | if os.getenv('TESTOMATIO_CI_DOWNSTREAM'): 70 | return None 71 | build_url = os.getenv('BUILD_URL') or os.getenv('CI_JOB_URL') or os.getenv('CIRCLE_BUILD_URL') 72 | 73 | # GitHub Actions URL 74 | if not build_url and os.getenv('GITHUB_RUN_ID'): 75 | github_server_url = os.getenv('GITHUB_SERVER_URL') 76 | github_repository = os.getenv('GITHUB_REPOSITORY') 77 | github_run_id = os.getenv('GITHUB_RUN_ID') 78 | build_url = f"{github_server_url}/{github_repository}/actions/runs/{github_run_id}" 79 | 80 | # Azure DevOps URL 81 | if not build_url and os.getenv('SYSTEM_TEAMFOUNDATIONCOLLECTIONURI'): 82 | collection_uri = os.getenv('SYSTEM_TEAMFOUNDATIONCOLLECTIONURI') 83 | project = os.getenv('SYSTEM_TEAMPROJECT') 84 | build_id = os.getenv('BUILD_BUILDID') 85 | build_url = f"{collection_uri}/{project}/_build/results?buildId={build_id}" 86 | 87 | if build_url and not build_url.startswith('http'): 88 | build_url = None 89 | 90 | return build_url -------------------------------------------------------------------------------- /pytestomatio/testomatio/testomat_item.py: -------------------------------------------------------------------------------- 1 | class TestomatItem: 2 | id: str 3 | title: str 4 | file_name: str 5 | 6 | def __init__(self, id: str, title: str, file_name: str): 7 | self.id = id 8 | self.title = title 9 | self.file_name = file_name 10 | 11 | def __str__(self) -> str: 12 | return f'TestomatItem: {self.id} - {self.title} - {self.file_name}' 13 | 14 | def __repr__(self): 15 | return f'TestomatItem: {self.id} - {self.title} - {self.file_name}' 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /pytestomatio/testomatio/testomatio.py: -------------------------------------------------------------------------------- 1 | from _pytest.python import Function 2 | from .testRunConfig import TestRunConfig 3 | from pytestomatio.connect.s3_connector import S3Connector 4 | from pytestomatio.connect.connector import Connector 5 | import logging 6 | 7 | log = logging.getLogger(__name__) 8 | 9 | 10 | class Testomatio: 11 | def __init__(self, test_run_config: TestRunConfig = None, 12 | s3_connector: S3Connector = None) -> None: 13 | self.s3_connector: S3Connector = s3_connector 14 | self.test_run_config: TestRunConfig = test_run_config 15 | self.connector: Connector = None 16 | 17 | def upload_files(self, files_list, bucket_name: str = None) -> str: 18 | if self.test_run_config.test_run_id is None: 19 | log.debug("Skipping file upload when testomatio test run is not created") 20 | return "" 21 | return self.s3_connector.upload_files(files_list, bucket_name) 22 | 23 | def upload_file(self, file_path: str, key: str = None, bucket_name: str = None) -> str: 24 | if self.test_run_config.test_run_id is None: 25 | log.debug("Skipping file upload when testomatio test run is not created") 26 | return "" 27 | return self.s3_connector.upload_file(file_path, key, bucket_name) 28 | 29 | def upload_file_object(self, file_bytes: bytes, key: str, bucket_name: str = None) -> str: 30 | if self.test_run_config.test_run_id is None: 31 | log.debug("Skipping file upload when testomatio test run is not created") 32 | return "" 33 | return self.s3_connector.upload_file_object(file_bytes, key, bucket_name) 34 | 35 | def add_artifacts(self, node: Function, url_list) -> None: 36 | artifact_urls = node.stash.get("artifact_urls", []) 37 | artifact_urls.extend(url_list) 38 | node.stash["artifact_urls"] = [ url for url in artifact_urls if url is not None] 39 | 40 | -------------------------------------------------------------------------------- /pytestomatio/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testomatio/pytestomatio/58db8a2d20b9e577513b80fd675d195b68797ddd/pytestomatio/utils/__init__.py -------------------------------------------------------------------------------- /pytestomatio/utils/helper.py: -------------------------------------------------------------------------------- 1 | from os import getenv 2 | from os.path import basename 3 | from pytest import Item 4 | from pytestomatio.testomatio.testomat_item import TestomatItem 5 | from pytestomatio.testing.testItem import TestItem 6 | from pytestomatio.decor.decorator_updater import update_tests 7 | from pytestomatio.testing.code_collector import get_functions_source_by_name 8 | from re import sub 9 | 10 | 11 | def collect_tests(items: list[Item]): 12 | meta: list[TestItem] = list() 13 | test_files: set = set() 14 | test_names: list = list() 15 | parameter_filter: set[Item] = set() 16 | for item in items: 17 | if item.function not in parameter_filter: 18 | parameter_filter.add(item.function) 19 | ti = TestItem(item) 20 | test_files.add(ti.abs_path) 21 | test_names.append(ti.title) 22 | meta.append(ti) 23 | 24 | for test_file in test_files: 25 | pairs = [p for p in get_functions_source_by_name(test_file, test_names)] 26 | for ti in meta: 27 | for name, source_code in pairs: 28 | if ti.title == name and ti.abs_path == test_file: 29 | ti.source_code = source_code 30 | break 31 | return meta, test_files, test_names 32 | 33 | 34 | def get_test_mapping(tests: list[TestItem]) -> list[tuple[str, int]]: 35 | return [(test.title, test.id) for test in tests] 36 | 37 | 38 | def parse_test_list(raw_response: dict) -> list[TestomatItem]: 39 | suites = set([suite for suite in raw_response['suites'].keys() if '#' not in suite]) 40 | result = dict() 41 | for key, value in raw_response['tests'].items(): 42 | test = result.get(value) 43 | if test is None: 44 | test = { 45 | 'name': None, 46 | 'suite': None, 47 | 'file': None 48 | } 49 | parts = [part for part in key.split('#') if part != ''] 50 | if len(parts) == 1: 51 | test['name'] = parts[0] 52 | elif len(parts) == 2: 53 | if parts[0] in suites: 54 | test['suite'] = parts[0] 55 | test['name'] = parts[1] 56 | elif len(parts) == 3: 57 | test['file'] = parts[0] 58 | test['name'] = parts[-1] 59 | result[value] = test 60 | return [TestomatItem(id, test['name'], test['file']) for id, test in result.items()] 61 | 62 | 63 | def add_and_enrich_tests(meta: list[TestItem], test_files: set, 64 | test_names: list, testomatio_tests: dict, decorator_name: str): 65 | # set test ids from testomatio to test metadata 66 | tcm_test_data = parse_test_list(testomatio_tests) 67 | for test in meta: 68 | for tcm_test in tcm_test_data: 69 | if not tcm_test.file_name: 70 | continue 71 | # Test that are synced into user specified folder - might end up with altered file path in testomatio 72 | # making file path not match between source code and testomatio 73 | # to mitigate this we compare only file names, skipping the path 74 | # while it works it might not be the most reliable approach 75 | # however, the underlying issue is the ability to alter the file path in testomatio 76 | # https://github.com/testomatio/check-tests?tab=readme-ov-file#import-into-a-specific-suite 77 | if test.resync_title == tcm_test.title and basename(test.file_name) == basename(tcm_test.file_name): 78 | test.id = tcm_test.id 79 | tcm_test_data.remove(tcm_test) 80 | break 81 | 82 | mapping = get_test_mapping(meta) 83 | for test_file in test_files: 84 | update_tests(test_file, mapping, test_names, decorator_name) 85 | 86 | 87 | def read_env_s3_keys(testRunConfig: dict) -> tuple: 88 | artifacts = testRunConfig.get('artifacts', {}) 89 | bucket_path = (getenv('BUCKET_PATH') or getenv('S3_BUCKET_PATH')) 90 | acl = 'private' if (getenv('TESTOMATIO_PRIVATE_ARTIFACTS') or artifacts.get('presign')) else "public-read" 91 | return ( 92 | getenv('REGION') or getenv('S3_REGION') or artifacts.get('REGION'), 93 | getenv('ACCESS_KEY_ID') or getenv('S3_ACCESS_KEY_ID') or artifacts.get('ACCESS_KEY_ID'), 94 | getenv('SECRET_ACCESS_KEY') or getenv('S3_SECRET_ACCESS_KEY') or artifacts.get('SECRET_ACCESS_KEY'), 95 | getenv('ENDPOINT') or getenv('S3_ENDPOINT') or artifacts.get('ENDPOINT'), 96 | getenv('BUCKET') or getenv('S3_BUCKET') or artifacts.get('BUCKET'), 97 | bucket_path + "/" + testRunConfig.get("uid") if bucket_path else testRunConfig.get("uid"), 98 | acl 99 | ) 100 | 101 | def safe_string_list(param: str): 102 | if not param: 103 | return None 104 | return ",".join([sub(r"\s", "", part) for part in param.split(',')]) 105 | -------------------------------------------------------------------------------- /pytestomatio/utils/parser_setup.py: -------------------------------------------------------------------------------- 1 | from pytest import Parser 2 | 3 | help_text = """ 4 | synchronise and connect test with testomat.io. Use parameters: 5 | sync - synchronize tests and set test ids in the code 6 | remove - removes testomat.io ids from the ALL test 7 | report - report tests into testomat.io 8 | debug - saves analysed test metadata to the json in the test project root 9 | """ 10 | 11 | 12 | def parser_options(parser: Parser, testomatio='testomatio') -> None: 13 | group = parser.getgroup(testomatio, 'synchronise and connect test with testomat.io') 14 | group.addoption(f'--{testomatio}', 15 | action='store', 16 | help=help_text) 17 | group.addoption(f'--testRunEnv', 18 | action='store', 19 | help=f'specify test run environment for testomat.io. Works only with --testomatio sync') 20 | group.addoption(f'--create', 21 | action='store_true', 22 | default=False, 23 | dest="create", 24 | help=""" 25 | To import tests with Test IDs set in source code into a project use --create option. 26 | In this case a project will be populated with the same Test IDs as in the code. 27 | Use --testomatio sync together with --create option to enable this behavior. 28 | """ 29 | ) 30 | group.addoption(f'--no-empty', 31 | action='store_true', 32 | default=False, 33 | dest="no_empty", 34 | help=""" 35 | Delete empty suites. 36 | When tests are marked with IDs and imported to already created suites in Testomat.io newly imported suites may become empty. 37 | Use --testomatio sync together with --no-empty option to clean them up after import. 38 | """ 39 | ) 40 | group.addoption(f'--no-detach', 41 | action='store_true', 42 | default=False, 43 | dest="no_detach", 44 | help=""" 45 | Disable detaching tests. 46 | If a test from a previous import was not found on next import it is marked as "detached". 47 | This is done to ensure that deleted tests are not staying in Testomatio while deleted in codebase. 48 | To disable this behaviour and don\'t mark anything on detached on import use sync together with --no-detached option. 49 | """ 50 | ) 51 | group.addoption(f'--keep-structure', 52 | action='store_true', 53 | default=False, 54 | dest="keep_structure", 55 | help=""" 56 | Keep structure of source code. If suites are not created in Testomat.io they will be created based on the file structure. 57 | Use --testomatio sync together with --structure option to enable this behaviour. 58 | """ 59 | ) 60 | group.addoption('--directory', 61 | default=None, 62 | dest="directory", 63 | help=""" 64 | Specify directory to use for test file structure, ex. --directory Windows\\smoke or --directory Linux/e2e 65 | Use --testomatio sync together with --directory option to enable this behaviour. 66 | Default is the root of the project. 67 | Note: --structure option takes precedence over --directory option. If both are used --structure will be used. 68 | """ 69 | ) 70 | group.addoption('--test-id', 71 | default=None, 72 | dest="test_id", 73 | help=""" 74 | help="Filter tests by Test IDs (e.g., single test id 'T00C73028' or multiply 'T00C73028|T00C73029') 75 | """ 76 | ) 77 | parser.addini('testomatio_url', 'testomat.io base url') 78 | -------------------------------------------------------------------------------- /pytestomatio/utils/validations.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Literal 3 | from pytest import Config 4 | from _pytest.config.exceptions import UsageError 5 | 6 | 7 | 8 | def validate_option(config: Config) -> Literal['sync', 'report', 'remove', 'debug', None]: 9 | option = config.getoption('testomatio') 10 | option = option.lower() if option else None 11 | if option in ('sync', 'report', 'remove'): 12 | if os.getenv('TESTOMATIO') is None: 13 | raise ValueError('TESTOMATIO env variable is not set') 14 | 15 | xdist_plugin = config.pluginmanager.getplugin('xdist') 16 | if xdist_plugin and option in ('sync', 'debug', 'remove'): 17 | if config.option.numprocesses == 0: 18 | return 19 | 20 | raise UsageError("The 'sync' mode does not support parallel execution! " 21 | "In order to synchronise test run command sync as '--testomatio sync -n 0'") 22 | 23 | return option 24 | -------------------------------------------------------------------------------- /smoke.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import os 3 | import toml 4 | 5 | def get_version_from_pyproject(): 6 | try: 7 | # Load the pyproject.toml file 8 | pyproject_data = toml.load("pyproject.toml") 9 | # Extract the version from the project metadata 10 | return pyproject_data.get("project", {}).get("version", "unknown") 11 | except FileNotFoundError: 12 | print("pyproject.toml not found. Using default version.") 13 | return "unknown" 14 | except Exception as e: 15 | print(f"An error occurred while reading pyproject.toml: {e}") 16 | return "unknown" 17 | 18 | def run_pytest(): 19 | # Get version from pyproject.toml 20 | version = get_version_from_pyproject() 21 | 22 | # Set environment variables 23 | env = os.environ.copy() 24 | env["TESTOMATIO_SHARED_RUN"] = "1" 25 | env["TESTOMATIO_TITLE"] = f"smoke-{version}" 26 | 27 | # Pytest command 28 | pytest_command = [ 29 | "pytest", 30 | "-p", "pytester", # Load the pytester plugin 31 | "-m", "smoke", # Run only tests with the "smoke" marker 32 | "-vv" # Verbose output 33 | ] 34 | 35 | try: 36 | # Run the pytest command, streaming output to the console 37 | process = subprocess.Popen( 38 | pytest_command, 39 | env=env, 40 | stdout=None, # Allow real-time streaming of stdout 41 | stderr=None, # Allow real-time streaming of stderr 42 | ) 43 | 44 | # Wait for the process to complete 45 | process.wait() 46 | 47 | # Check the exit code 48 | if process.returncode == 0: 49 | print("All tests passed successfully!") 50 | else: 51 | print(f"Some tests failed with exit code: {process.returncode}") 52 | 53 | except Exception as e: 54 | print(f"An error occurred while running pytest: {e}") 55 | 56 | if __name__ == "__main__": 57 | run_pytest() 58 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testomatio/pytestomatio/58db8a2d20b9e577513b80fd675d195b68797ddd/tests/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | from pytest import mark 2 | from pytest import fixture 3 | from dotenv import load_dotenv 4 | 5 | load_dotenv() 6 | -------------------------------------------------------------------------------- /tests/sub/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testomatio/pytestomatio/58db8a2d20b9e577513b80fd675d195b68797ddd/tests/sub/__init__.py -------------------------------------------------------------------------------- /tests/sub/sub_mob/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/testomatio/pytestomatio/58db8a2d20b9e577513b80fd675d195b68797ddd/tests/sub/sub_mob/__init__.py -------------------------------------------------------------------------------- /tests/sub/sub_mob/sub_sub_class_test.py: -------------------------------------------------------------------------------- 1 | from pytest import mark 2 | import pytest 3 | 4 | 5 | class TestClassSubSub: 6 | 7 | @pytest.mark.testomatio("@T7e1cf6d3") 8 | def test_one_pass_sub(self): 9 | x = 'this' 10 | assert 'h' in x 11 | 12 | @pytest.mark.testomatio("@T64c0abec") 13 | def test_two_fail_sub(self): 14 | x = 'hello' 15 | assert hasattr(x, 'check') 16 | 17 | @pytest.mark.testomatio("@Ta488bdcb") 18 | @mark.skip 19 | def test_three_skip_sub(self, dummy_fixture): 20 | x = 'hello' 21 | assert hasattr(x, 'check') 22 | -------------------------------------------------------------------------------- /tests/sub/sub_mob/sub_sub_test.py: -------------------------------------------------------------------------------- 1 | from pytest import mark 2 | import pytest 3 | 4 | @pytest.mark.testomatio("@T761fa328") 5 | def test_pass_sub_sub(): 6 | assert 2 + 2 == 4 7 | 8 | 9 | @pytest.mark.testomatio("@T327cdc55") 10 | def test_pass_fix_sub_sub(dummy_fixture): 11 | assert 3 + 3 == 6 12 | 13 | 14 | @pytest.mark.testomatio("@T0c63a54a") 15 | def test_fail_sub_sub(): 16 | assert 2 + 2 == 11 17 | 18 | 19 | @pytest.mark.testomatio("@T3dd906d1") 20 | @mark.parametrize('data', [1, 2, 3, 4, 5, 'a']) 21 | def test_ddt_parametrized_sub_sub(data): 22 | assert str(data).isnumeric() 23 | 24 | 25 | @pytest.mark.testomatio("@T1aec685a") 26 | @mark.skip 27 | def test_skip_sub_sub(): 28 | n = 3 29 | p = 7 30 | assert n * p == 21 31 | -------------------------------------------------------------------------------- /tests/sub/test_class_sub.py: -------------------------------------------------------------------------------- 1 | from pytest import mark 2 | 3 | 4 | class TestClassSub: 5 | 6 | @mark.testomatio("@T7e1cf6d3") 7 | def test_one_pass_sub(self): 8 | x = 'this' 9 | assert 'h' in x 10 | 11 | @mark.testomatio("@T64c0abec") 12 | def test_two_fail_sub(self): 13 | x = 'hello' 14 | assert hasattr(x, 'check') 15 | 16 | @mark.testomatio("@Ta488bdcb") 17 | @mark.skip 18 | def test_three_skip_sub(self): 19 | x = 'hello' 20 | assert hasattr(x, 'check') 21 | -------------------------------------------------------------------------------- /tests/sub/test_sub.py: -------------------------------------------------------------------------------- 1 | from pytest import mark 2 | 3 | 4 | @mark.testomatio("@T9c322c95") 5 | def test_pass_sub(): 6 | assert 2 + 2 == 4 7 | 8 | 9 | @mark.testomatio("@T4e6f250b") 10 | def test_pass_fix_sub(dummy_fixture): 11 | assert 3 + 3 == 6 12 | 13 | 14 | @mark.testomatio("@T0bf7108d") 15 | def test_fail_sub(): 16 | assert 2 + 2 == 11 17 | 18 | 19 | @mark.testomatio("@T7e069711") 20 | @mark.parametrize('data', [1, 2, 3, 4, 5]) 21 | def test_ddt_parametrized_sub(data): 22 | assert str(data).isnumeric() 23 | 24 | 25 | @mark.testomatio("@Tad0d98ed") 26 | @mark.skip 27 | def test_skip_sub(): 28 | n = 3 29 | p = 7 30 | assert n * p == 21 31 | -------------------------------------------------------------------------------- /tests/test_class_root.py: -------------------------------------------------------------------------------- 1 | from pytest import mark 2 | 3 | 4 | class TestClass: 5 | 6 | @mark.testomatio('@T4a0527af') 7 | def test_one_pass(self): 8 | x = 'this' 9 | assert 'h' in x 10 | 11 | @mark.testomatio('@T4bc8a939') 12 | def test_two_fail(self): 13 | x = 'hello' 14 | assert hasattr(x, 'check') 15 | 16 | @mark.testomatio('@T3dd32910') 17 | @mark.skip 18 | def test_three_skip(self): 19 | x = 'hello' 20 | assert hasattr(x, 'check') 21 | -------------------------------------------------------------------------------- /tests/test_cli_param_test_id.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | pytestmark = pytest.mark.smoke 3 | 4 | test_file = """ 5 | import pytest 6 | 7 | def test_smoke(): 8 | pass 9 | 10 | @pytest.mark.testomatio("@T123") 11 | def test_testomatio_only(): 12 | pass 13 | 14 | @pytest.mark.testomatio("@T456") 15 | def test_smoke_and_testomatio(): 16 | pass 17 | 18 | def test_neither_marker(): 19 | pass 20 | """ 21 | 22 | @pytest.mark.testomatio("@T7b058966") 23 | def test_cli_param_test_id_without_filters(pytester): 24 | pytester.makepyfile(test_file) 25 | 26 | result = pytester.runpytest_subprocess("--testomatio", "report", "-vv") 27 | result.assert_outcomes(passed=4, failed=0, skipped=0) 28 | result.stdout.fnmatch_lines([ 29 | "*::test_smoke*", 30 | "*::test_testomatio_only*", 31 | "*::test_smoke_and_testomatio*", 32 | "*::test_neither_marker*", 33 | ]) 34 | 35 | @pytest.mark.testomatio("@T3cf626ca") 36 | def test_cli_param_test_id_with_k_filter(pytester): 37 | pytester.makepyfile(test_file) 38 | 39 | result = pytester.runpytest_subprocess("--testomatio" ,"report", "-vv", "-k", "test_neither_marker") 40 | result.assert_outcomes(passed=1, failed=0, skipped=0) 41 | result.stdout.fnmatch_lines([ 42 | "*::test_neither_marker*", 43 | ]) 44 | 45 | @pytest.mark.testomatio("@T709adc8a") 46 | def test_cli_param_test_id_without_k_filter_matching_2_tests(pytester): 47 | pytester.makepyfile(test_file) 48 | 49 | result = pytester.runpytest_subprocess("--testomatio", "report", "-vv", "-k", "test_smoke") 50 | result.assert_outcomes(passed=2, failed=0, skipped=0) 51 | result.stdout.fnmatch_lines([ 52 | "*::test_smoke*", 53 | "*::test_smoke_and_testomatio*", 54 | ]) 55 | 56 | # TODO: troubleshoot pytester env 57 | # The testomatio and test-id parameters are lost in the pytester env. 58 | # Please test it in a semiautomated way with "test_cli_params.py" test 59 | @pytest.mark.testomatio("@T5a965adf") 60 | def test_cli_param_test_id_with_test_id_filter(pytester): 61 | pytest.skip() 62 | pytester.makepyfile(test_file) 63 | 64 | result = pytester.runpytest_subprocess("--testomatio", "report", '--test-id="@T123"', "-vv") 65 | result.assert_outcomes(passed=1, failed=0, skipped=0) 66 | result.stdout.fnmatch_lines([ 67 | "*::test_testomatio_only*", 68 | ]) -------------------------------------------------------------------------------- /tests/test_cli_params.py: -------------------------------------------------------------------------------- 1 | # pytest --testomatio report tests --test-id="@T123" -k test_smoke 2 | # verify 3 test passes 3 | # tests/test_cli_params.py::test_smoke PASSED [ 33%] 4 | # tests/test_cli_params.py::test_smoke_and_testomatio PASSED [ 66%] 5 | # tests/test_cli_params.py::test_testomatio_only PASSED [100%] 6 | # 7 | # ======================================= 3 passed, 50 deselected in 0.89s ======================================= 8 | 9 | import pytest 10 | 11 | @pytest.mark.testomatio("@T55ecbca9") 12 | def test_smoke(): 13 | pass 14 | 15 | @pytest.mark.testomatio("@T123") 16 | def test_testomatio_only(): 17 | pass 18 | 19 | @pytest.mark.testomatio("@T456") 20 | def test_smoke_and_testomatio(): 21 | pass 22 | 23 | @pytest.mark.testomatio("@T06f3da52") 24 | def test_neither_marker(): 25 | pass -------------------------------------------------------------------------------- /tests/test_decorators.py: -------------------------------------------------------------------------------- 1 | from pytest import mark 2 | import os 3 | 4 | 5 | @mark.testomatio('@Ta44e5a34') 6 | def test_something(): 7 | assert 1 == 1 8 | 9 | 10 | @mark.testomatio("@T81850b4b") 11 | def test_no_decorator(): 12 | assert 1 == 1 13 | 14 | 15 | @mark.testomatio("@T9c91e8e7") 16 | def test_some_test(): 17 | x = os.getenv('TESTOMATIO_CODE_STYLE') 18 | assert x == 'pep8' 19 | -------------------------------------------------------------------------------- /tests/test_parameters.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | pytestmark = pytest.mark.smoke 3 | 4 | test_file = """ 5 | import pytest 6 | 7 | # Define some dummy callables 8 | def add(a, b): 9 | return a + b 10 | 11 | def multiply(a, b): 12 | return a * b 13 | 14 | @pytest.mark.testomatio("@Tbca18714") 15 | @pytest.mark.parametrize( 16 | "operation, a, b, expected", 17 | [ 18 | (add, 2, 3, 5), # Test add function 19 | (multiply, 2, 3, 6), # Test multiply function 20 | ], 21 | ) 22 | def test_operations(operation, a, b, expected): 23 | # Call the provided operation 24 | result = operation(a, b) 25 | assert result == expected, f"Expected {expected}, got {result}" 26 | """ 27 | 28 | @pytest.mark.testomatio("@Tb8930394") 29 | def test_callable_in_params(pytester): 30 | pytester.makepyfile(test_file) 31 | 32 | pytester.runpytest("--testomatio", "sync", "-n", "0", "--no-detach") 33 | result = pytester.runpytest("--testomatio", "report", "-vv") 34 | result.assert_outcomes(passed=2, failed=0, skipped=0) 35 | cleaned_lines = [line.strip() for line in result.stdout.lines if line.strip()] 36 | 37 | assert any("test_callable_in_params.py::test_operations[add-2-3-5]" in line for line in cleaned_lines) 38 | assert any("test_callable_in_params.py::test_operations[multiply-2-3-6]" in line for line in cleaned_lines) 39 | 40 | session_fixture_file = """ 41 | import pytest 42 | 43 | @pytest.fixture(scope="session", params=["db_connection_1", "db_connection_2"]) 44 | def session_fixture(request): 45 | # Simulate setting up a database connection 46 | db_connection = request.param 47 | yield db_connection 48 | # Simulate tearing down the database connection 49 | 50 | def test_session_fixture_usage(session_fixture): 51 | assert session_fixture in ["db_connection_1", "db_connection_2"], ( 52 | f"Unexpected session fixture value: {session_fixture}" 53 | ) 54 | """ 55 | 56 | def test_session_fixture_with_param(pytester): 57 | pytester.makepyfile(session_fixture_file) 58 | 59 | pytester.runpytest("--testomatio", "sync", "-n", "0", "--no-detach") 60 | result = pytester.runpytest("--testomatio", "report", "-vv", "--full-trace") 61 | result.assert_outcomes(passed=2, failed=0, skipped=0) 62 | 63 | cleaned_lines = [line.strip() for line in result.stdout.lines if line.strip()] 64 | 65 | assert any("test_session_fixture_usage[db_connection_1]" in line for line in cleaned_lines) 66 | assert any("test_session_fixture_usage[db_connection_2]" in line for line in cleaned_lines) 67 | 68 | -------------------------------------------------------------------------------- /tests/test_root.py: -------------------------------------------------------------------------------- 1 | from pytest import mark, fixture 2 | 3 | 4 | @fixture 5 | def some_fixture(): 6 | yield 7 | 8 | 9 | @mark.testomatio('@T67a02d9f') 10 | def test_pass(some_fixture): 11 | import time 12 | assert 2 + 2 == 4 13 | 14 | 15 | @mark.testomatio('@T4e2f8df1') 16 | def test_pass_fix(dummy_fixture): 17 | assert 3 + 3 == 6 18 | 19 | 20 | # ------------------------------------------- 21 | @mark.testomatio('@Tefe6c6a2') 22 | def test_fail(): 23 | assert 2 + 2 == 11 24 | 25 | 26 | # ------------------------------------------- 27 | 28 | @mark.testomatio('@Tca8a4366') 29 | @mark.parametrize('data', [8, 1, 2, 3, 4, 5, 'a', b'123', b'asdasd', {'hello': 'world'}, [1, 2, 3]]) 30 | def test_ddt_parametrized(data): 31 | assert str(data).isnumeric() 32 | 33 | 34 | @mark.testomatio('@Tc5045fa6') 35 | @mark.skip 36 | def test_skip(): 37 | n = 3 38 | p = 7 39 | assert n * p == 21 40 | 41 | 42 | class TestClassCom: 43 | 44 | @mark.testomatio('@T7716c8f8') 45 | def test_cls_same_file(self): 46 | assert True 47 | -------------------------------------------------------------------------------- /tests/test_sync.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | #TODO verify requests to testomatio 3 | 4 | @pytest.mark.testomatio("@Tfaf4da53") 5 | @pytest.mark.smoke 6 | def test_sync_stop_when_xdist_in_use(pytester): 7 | pytester.makepyfile(""" 8 | def test_example(): 9 | assert True 10 | """) 11 | 12 | # Ensure that your plugin code raises UsageError for this scenario instead of a generic Exception. 13 | # Something like: 14 | # if option == 'sync' and parallel_set: 15 | # raise UsageError("The 'sync' mode does not support parallel execution! In order to synchronise test run command sync as '--testomatio sync -n 0'") 16 | 17 | result = pytester.runpytest('-p', 'xdist', '--testomatio', 'sync', '-vv') 18 | 19 | # Match the entire error line as it appears in stderr 20 | result.stderr.fnmatch_lines([ 21 | "ERROR: The 'sync' mode does not support parallel execution! In order to synchronise test run command sync as '--testomatio sync -n 0'" 22 | ]) 23 | 24 | # Now that it's a usage error, pytest should produce a summary line that we can assert on 25 | assert result.ret != 0 26 | 27 | @pytest.mark.smoke 28 | def test_sync_works_with_xdist_set_to_0(pytester): 29 | pytester.makepyfile(""" 30 | def test_example(): 31 | assert True 32 | """) 33 | 34 | result = pytester.runpytest_subprocess('-p', 'xdist', '--testomatio', 'sync', '-n', '0', '-vv') 35 | 36 | # Assert that the special exit message is printed to stderr 37 | result.stdout.fnmatch_lines([ 38 | "*Sync completed without test execution*" 39 | ]) 40 | 41 | # Optional: Verify the process exited successfully (0 means no error) 42 | assert result.ret == 2 --------------------------------------------------------------------------------