├── .github
├── ISSUE_TEMPLATE
│ ├── 1-bug-report.yml
│ ├── 2-feature.yml
│ └── config.yml
├── PULL_REQUEST_TEMPLATE.md
└── workflows
│ ├── end2end-tests.yml
│ ├── python-app.yml
│ └── python-publish.yml
├── .gitignore
├── .pre-commit-config.yaml
├── CHANGELOG.MD
├── CONTRIBUTING.md
├── Jenkinsfile
├── LICENSE.md
├── README.md
├── setup.py
├── tests
├── __init__.py
├── helpers
│ ├── __init__.py
│ ├── api_client_helpers.py
│ ├── cli_helpers.py
│ ├── load_data_from_config_helper.py
│ └── results_uploader_helper.py
├── pytest.ini
├── requirements-tox.txt
├── requirements-variable-deps.txt
├── requirements.txt
├── test_api_client.py
├── test_api_client_proxy.py
├── test_api_data_provider.py
├── test_api_request_handler.py
├── test_cli.py
├── test_cmd_add_run.py
├── test_data
│ ├── XML
│ │ ├── custom_automation_id_in_property.xml
│ │ ├── empty.xml
│ │ ├── invalid.xml
│ │ ├── invalid_empty.xml
│ │ ├── milliseconds.xml
│ │ ├── no_root.xml
│ │ ├── required_only.xml
│ │ ├── robotframework_id_in_name_RF50.xml
│ │ ├── robotframework_id_in_name_RF70.xml
│ │ ├── robotframework_id_in_property_RF50.xml
│ │ ├── robotframework_id_in_property_RF70.xml
│ │ ├── robotframework_simple_RF50.xml
│ │ ├── robotframework_simple_RF70.xml
│ │ ├── root.xml
│ │ ├── root_id_in_name.xml
│ │ ├── root_id_in_property.xml
│ │ └── sauce.xml
│ ├── __init__.py
│ ├── api_client_test_data.py
│ ├── api_data_provider_test_data.py
│ ├── cli_test_data.py
│ ├── dataclass_creation.py
│ ├── json
│ │ ├── api_request_handler.json
│ │ ├── api_request_handler_long_testcase.json
│ │ ├── custom_automation_id_in_property.json
│ │ ├── data_provider.json
│ │ ├── data_provider_duplicated_case_names.json
│ │ ├── milliseconds.json
│ │ ├── no_root.json
│ │ ├── required_only.json
│ │ ├── robotframework_id_in_name_RF50.json
│ │ ├── robotframework_id_in_name_RF70.json
│ │ ├── robotframework_id_in_property_RF50.json
│ │ ├── robotframework_id_in_property_RF70.json
│ │ ├── robotframework_simple_RF50.json
│ │ ├── robotframework_simple_RF70.json
│ │ ├── root.json
│ │ ├── root_id_in_name.json
│ │ ├── root_id_in_property.json
│ │ ├── sauce1.json
│ │ ├── sauce2.json
│ │ ├── update_case_result_single_with_id.json
│ │ └── update_case_result_single_without_id.json
│ ├── load_data_from_config_test_data.py
│ ├── project_based_client_test_data.py
│ ├── proxy_test_data.py
│ ├── results_provider_test_data.py
│ └── yaml
│ │ ├── correct_config_file.yaml
│ │ ├── correct_config_file_loop_check.yaml
│ │ ├── correct_config_file_multiple_documents.yaml
│ │ ├── correct_config_file_multiple_documents_with_custom_config_path.yaml
│ │ ├── correct_config_file_with_custom_config_empty.yaml
│ │ ├── correct_config_file_with_custom_config_path.yaml
│ │ ├── corrupted_config_file.yaml
│ │ ├── corrupted_config_file_multiple_documents.yaml
│ │ ├── corrupted_config_file_with_empty_document.yaml
│ │ ├── corrupted_config_file_with_list.yaml
│ │ ├── corrupted_config_file_with_start_indicator_at_the_end.yaml
│ │ ├── corrupted_config_file_with_string.yaml
│ │ ├── custom_config_file.yaml
│ │ └── default_config_file.yaml
├── test_dataclass_creation.py
├── test_junit_parser.py
├── test_load_data_from_config.py
├── test_project_based_client.py
├── test_response_verify.py
├── test_results_uploader.py
└── test_robot_parser.py
├── tests_e2e
├── __init__.py
├── attachments
│ ├── evidence.json
│ └── testrail.jpg
├── openapi_specs
│ └── openapi.yml
├── pytest.ini
├── reports_junit
│ ├── attachments.xml
│ ├── duplicate-names.xml
│ ├── generic_ids_auto.xml
│ ├── generic_ids_auto_plus_one.xml
│ ├── generic_ids_name.xml
│ ├── generic_ids_property.xml
│ ├── junit_multiple_parts_pt1.xml
│ ├── junit_multiple_parts_pt2.xml
│ └── saucelabs.xml
├── reports_robot
│ └── simple_report_rf50.xml
├── requirements.txt
└── test_end2end.py
├── tox.ini
└── trcli
├── __init__.py
├── api
├── __init__.py
├── api_client.py
├── api_request_handler.py
├── api_response_verify.py
├── project_based_client.py
└── results_uploader.py
├── backports.py
├── cli.py
├── commands
├── __init__.py
├── cmd_add_run.py
├── cmd_parse_junit.py
├── cmd_parse_openapi.py
├── cmd_parse_robot.py
└── results_parser_helpers.py
├── constants.py
├── data_classes
├── __init__.py
├── data_parsers.py
├── dataclass_testrail.py
└── validation_exception.py
├── data_providers
└── api_data_provider.py
├── readers
├── __init__.py
├── file_parser.py
├── junit_xml.py
├── openapi_yml.py
└── robot_xml.py
└── settings.py
/.github/ISSUE_TEMPLATE/1-bug-report.yml:
--------------------------------------------------------------------------------
1 | name: "🐞 Bug report"
2 | description: Report a bug found while using the TestRail CLI.
3 | labels: ["bug", "triage"]
4 | body:
5 | - type: input
6 | id: cli-version
7 | attributes:
8 | label: TestRail CLI Version
9 | description: You can check the version by just executing `trcli` or `pip show trcli` on your command line.
10 | placeholder: ex. 1.1.0
11 | validations:
12 | required: true
13 | - type: input
14 | id: cli-system
15 | attributes:
16 | label: CLI Environment
17 | description: Information about operating system, python version, etc
18 | placeholder: ex. Windows 10 Pro, Python 3.10.2, etc
19 | - type: input
20 | id: testrail-version
21 | attributes:
22 | label: TestRail Version
23 | description: You can check your TestRail version on `Help & Feedback` > `About TestRail` on the top right corner.
24 | placeholder: ex. 7.3.0.3040
25 | validations:
26 | required: true
27 | - type: dropdown
28 | id: testrail-type
29 | attributes:
30 | label: TestRail Instance Type
31 | description: Your TestRail installation type. This can be found on the Administration page.
32 | options:
33 | - Professional Cloud
34 | - Professional Server
35 | - Enterprise Cloud
36 | - Enterprise Server
37 | - NA
38 | validations:
39 | required: true
40 | - type: textarea
41 | id: current-behavior
42 | attributes:
43 | label: Current behavior
44 | description: Add a short description of the issue, screenshots, logs, command being executed, config files, report samples, etc.
45 | placeholder: Currently...
46 | validations:
47 | required: true
48 | - type: textarea
49 | id: desired-behavior
50 | attributes:
51 | label: Desired behavior
52 | description: Explain what would be the expected behavior for the situation you just described.
53 | placeholder: I would expect that...
54 | - type: textarea
55 | id: more-details
56 | attributes:
57 | label: More Details
58 | placeholder: Other details you would like to include.
59 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/2-feature.yml:
--------------------------------------------------------------------------------
1 | name: "🆕 Feature or improvement"
2 | description: Suggest a feature or improvement for the TestRail CLI
3 | labels: ["enhancement", "triage"]
4 | body:
5 | - type: textarea
6 | id: feature
7 | attributes:
8 | label: What would you like the TestRail CLI to be able to do?
9 | description: A clear description of the feature or improvement, including files and other examples if it applies.
10 | placeholder: I'd like to be able to...
11 | validations:
12 | required: true
13 | - type: textarea
14 | id: reason
15 | attributes:
16 | label: Why is this feature necessary on the TestRail CLI?
17 | description: Please state your use case so we can assess if this is something valuable.
18 | placeholder: I want this because...
19 | - type: textarea
20 | id: other
21 | attributes:
22 | label: More details
23 | placeholder: Other details you would like to include.
24 | - type: dropdown
25 | id: will-implement
26 | attributes:
27 | label: Interested in implementing it yourself?
28 | description: Please let us know if you want to contribute by creating the solution yourself.
29 | options:
30 | - "Yes"
31 | - "No"
32 | - "Maybe, let's talk!"
33 | validations:
34 | required: true
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: true
2 | contact_links:
3 | - name: 💬 Community
4 | url: https://discuss.gurock.com/
5 | about: Want to discuss about the TestRail CLI with others? Check out our community forums.
6 | - name: 📃 Documentation
7 | url: https://www.gurock.com/testrail/docs/integrate/testrail-test-automation/testrail-cli/
8 | about: Check the TestRail documentation for more CLI info and real usage examples.
9 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
7 |
8 | ## Issue being resolved: https://github.com/gurock/trcli/issues/N
9 |
10 | ### Solution description
11 | How are we solving the problem?
12 |
13 | ### Changes
14 | What changes where made?
15 |
16 | ### Potential impacts
17 | What could potentially be affected by the implemented changes?
18 |
19 | ### Steps to test
20 | Happy path to test implemented scenario
21 |
22 | ### PR Tasks
23 | - [ ] PR reference added to issue
24 | - [ ] README updated
25 | - [ ] Unit tests added/updated
26 |
--------------------------------------------------------------------------------
/.github/workflows/end2end-tests.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install the trcli package and run e2e tests
2 |
3 | name: End-To-End Tests
4 |
5 | on:
6 | workflow_dispatch:
7 |
8 | permissions:
9 | contents: read
10 |
11 | jobs:
12 | test:
13 |
14 | runs-on: ubuntu-latest
15 |
16 | steps:
17 | - uses: actions/checkout@v3
18 | - name: Set up Python 3.10
19 | uses: actions/setup-python@v3
20 | with:
21 | python-version: "3.10"
22 | - name: Install dependencies
23 | run: |
24 | python -m pip install --upgrade pip
25 | pip install -r tests_e2e/requirements.txt
26 | - name: Test with pytest
27 | run: |
28 | cd tests_e2e
29 | export TR_CLI_USERNAME="trcli@testrail.com"
30 | export TR_CLI_PASSWORD="${{ secrets.TESTRAIL_USER_KEY }}"
31 | pytest -c ./pytest.ini -W ignore::pytest.PytestCollectionWarning \
32 | --md-report --md-report-output=report.md --md-report-color=never \
33 | . || pytest_exit_code=$?
34 | echo "## :clipboard: Test Results" >> $GITHUB_STEP_SUMMARY
35 | cat report.md >> $GITHUB_STEP_SUMMARY
36 | if [[ $pytest_exit_code -gt 0 ]]
37 | then
38 | echo "::error::Unit tests failed" && exit_code=1
39 | fi
40 | exit $exit_code
41 |
--------------------------------------------------------------------------------
/.github/workflows/python-app.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies and run tests with a single version of Python
2 | # For more information see:
3 | # https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
4 |
5 | name: Build App
6 |
7 | on:
8 | push:
9 | branches:
10 | - main
11 | pull_request:
12 | branches:
13 | - '*'
14 |
15 | permissions:
16 | contents: read
17 |
18 | jobs:
19 | build:
20 |
21 | strategy:
22 | matrix:
23 | python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
24 | os: [ubuntu-latest, windows-latest]
25 | runs-on: ${{ matrix.os }}
26 |
27 | steps:
28 | - uses: actions/checkout@v4
29 | - name: Set up Python ${{ matrix.python_version }}
30 | uses: actions/setup-python@v5
31 | with:
32 | python-version: "${{ matrix.python_version }}"
33 | - name: Install dependencies
34 | run: |
35 | python -m pip install --upgrade pip
36 | pip install -r tests/requirements.txt
37 | - name: Test with pytest (Linux)
38 | if: runner.os == 'Linux'
39 | run: |
40 | coverage run -m pytest \
41 | -c ./tests/pytest.ini \
42 | -W ignore::pytest.PytestCollectionWarning \
43 | --md-report --md-report-output=report.md --md-report-color=never \
44 | tests || pytest_exit_code=$?
45 | echo "## :clipboard: Test Results" >> $GITHUB_STEP_SUMMARY
46 | cat report.md >> $GITHUB_STEP_SUMMARY
47 | echo "## :bar_chart: Code coverage" >> $GITHUB_STEP_SUMMARY
48 | coverage report --format markdown >> $GITHUB_STEP_SUMMARY
49 | if [[ "$(coverage report --format total)" -lt 80 ]]
50 | then
51 | echo "::error::Code coverage is less than 80%" && exit_code=1
52 | fi
53 | if [[ $pytest_exit_code -gt 0 ]]
54 | then
55 | echo "::error::Unit tests failed" && exit_code=1
56 | fi
57 | exit $exit_code
58 | - name: Test with pytest (Windows)
59 | if: runner.os == 'Windows'
60 | run: |
61 | coverage run -m pytest -c ./tests/pytest.ini -W ignore::pytest.PytestCollectionWarning --md-report --md-report-output=report.md --md-report-color=never tests || pytest_exit_code=$?
62 | echo "## :clipboard: Test Results" >> $GITHUB_STEP_SUMMARY
63 | cat report.md >> $GITHUB_STEP_SUMMARY
64 | echo "## :bar_chart: Code coverage" >> $GITHUB_STEP_SUMMARY
65 | coverage report --format markdown >> $GITHUB_STEP_SUMMARY
66 | if ( "$(coverage report --format total)" -lt 80 )
67 | {
68 | echo "::error::Code coverage is less than 80%" && exit_code=1
69 | }
70 | if ( $pytest_exit_code -gt 0 )
71 | {
72 | echo "::error::Unit tests failed" && exit_code=1
73 | }
74 | exit $exit_code
75 | - name: Install the TR CLI on Linux (just to make sure it installs)
76 | if: runner.os == 'Linux'
77 | run: |
78 | pip install build
79 | python -m build
80 | pip install $(ls dist/*.tar.gz| head -n 1)
81 |
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflow will upload a Python Package using Twine when a release is created
2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3 |
4 | # This workflow uses actions that are not certified by GitHub.
5 | # They are provided by a third-party and are governed by
6 | # separate terms of service, privacy policy, and support
7 | # documentation.
8 |
9 | name: Upload Python Package
10 |
11 | on:
12 | release:
13 | types: [published]
14 |
15 | permissions:
16 | contents: read
17 |
18 | jobs:
19 | deploy:
20 |
21 | runs-on: ubuntu-latest
22 |
23 | steps:
24 | - uses: actions/checkout@v4
25 | - name: Set up Python
26 | uses: actions/setup-python@v5
27 | with:
28 | python-version: '3.x'
29 | - name: Install dependencies
30 | run: |
31 | python -m pip install --upgrade pip
32 | pip install build
33 | - name: Build package
34 | run: python -m build
35 | - name: Publish package to PyPI
36 | uses: pypa/gh-action-pypi-publish@release/v1
37 | with:
38 | user: __token__
39 | password: ${{ secrets.PYPI_API_TOKEN }}
40 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/
2 | __pycache__
3 | build
4 | *.egg-info
5 | .venv
6 | .pytest_cache
7 | .coverage
8 | TEMP_*
9 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/psf/black
3 | rev: stable
4 | hooks:
5 | - id: black
6 | language_version: python3.8
--------------------------------------------------------------------------------
/CHANGELOG.MD:
--------------------------------------------------------------------------------
1 | TRCLI Changelog
2 |
3 | This project adheres to [Semantic Versioning](https://semver.org/). Version numbers are formatted as `MAJOR.MINOR.PATCH`, where:
4 |
5 | - **MAJOR**: Breaking changes, where backward compatibility is broken.
6 | - **MINOR**: New features that are backward-compatible.
7 | - **PATCH**: Bug fixes or minor changes that do not affect backward compatibility.
8 |
9 | ## [1.9.12]
10 |
11 | _released 05-30-2025
12 |
13 | ### Fixed
14 | - Corrected CLI parameter for --include-all
15 | - Improved help output for using --key parameter
16 | - Fixed an issue where automation id is not detected correctly during uploads
17 |
18 | ## [1.9.11]
19 |
20 | _released 05-21-2025
21 |
22 | ### Added
23 | - Application build support for Python 3.13
24 |
25 | ## [1.9.10]
26 |
27 | _released 05-15-2025
28 |
29 | ### Fixed
30 | - Fixed an issue where automation id is not mapping correctly when uploading test results
31 |
32 | ## [1.9.9]
33 |
34 | _released 05-06-2025
35 |
36 | ### Fixed
37 | - Improve security on data parser
38 |
39 | ## [1.9.8]
40 |
41 | _released 10-04-2024
42 |
43 | ### Fixed
44 | - Add Run description bug getting wiped; Fixes issue #250
45 | ### Added
46 | - NEW HTTP Proxy feature facility!
47 |
48 | ## [1.9.7]
49 |
50 | _released 09-02-2024
51 |
52 | ### Fixed
53 | - Fix a dependency issue on pyserde, reverted back to previous version in the 0.12.* series with less stricter type enforcement. Fixes #266 and #267.
54 |
55 | ## [1.9.6]
56 |
57 | _released 08-30-2024
58 |
59 | ### Fixed
60 | - Request dependency version update; Fixes issue #234
61 | - Parsing for Glob; Fixes issue #241
62 | - Cleanup End to End tests
63 | - Fix unit tests assertions when ran via TOX
64 | ### Added
65 | - Add RUN Command; Resolves issue #236
66 | - Updated required dependencies flexibility
67 | - Added TOX for testing and dependencies automation
68 |
69 | ## [1.9.5]
70 |
71 | _released 03-22-2024
72 |
73 | ### Fixed
74 | - fixes #218 by trimming title to max characters
75 | - fix #219 by adding missing elapsed time on test results for RF tests
76 |
77 | ## [1.9.4]
78 |
79 | _released 03-09-2024
80 |
81 | ### Fixed
82 |
83 | -[XML] Fix parsing of testrail_case_field with empty xml but value present
84 |
85 | ## [1.9.3]
86 |
87 | _released 03-07-2024
88 |
89 | ### Added
90 | - added test for #212; support for processing properties using cdata
91 |
92 | ## [1.9.2]
93 |
94 | _released 03-06-2024
95 |
96 | ### Fixed
97 | - fix #211 on wrong beartype dependency
98 |
99 | ## [1.9.1]
100 |
101 | _released 03-05-2024
102 |
103 | ### Fixed
104 |
105 | - limit test case title. fixes #207
106 |
107 | ## [1.9.0]
108 |
109 | _released 02-08-2024
110 |
111 | ### Added
112 | - improve README.md copy paste
113 | - preliminary support for Robot Framework 7.0 new output.xml format
114 |
115 | ## [1.8.0]
116 |
117 | _released 12-21-2023
118 |
119 | ### Fixed
120 | - Update setup.py with newer click version
121 | - compatibility run on python 3.8 and 3.9
122 | - Added python 3.8 support
123 |
124 | ## [1.7.0]
125 |
126 | _released 12-12-2023
127 |
128 | ### Fixed
129 |
130 | - make title argument optional
131 | - fixes #177, adding workaround for buggy legacy server response
132 |
133 | ## [1.6.3]
134 |
135 | _released 12-07-2023
136 |
137 | ### Fixed
138 | - remove unnecessary created sections
139 | - fix issue #181
140 |
141 | ## [1.6.2]
142 |
143 | _released 08-27-2023
144 |
145 | ### Added
146 | - End-to-end tests
147 | - Keep existing suite on rollback: fix#166
148 |
149 | ## [1.6.1]
150 |
151 | _released 09-11-2023
152 |
153 | ###Fixed
154 | - Resolve legacy endpoint call
155 |
156 | ## [1.6.0]
157 |
158 | _released 09-06-2023
159 |
160 | ### Added
161 | - Native Robot Framework report parser using command parse_robot
162 | - Test run update allows adding new tests
163 | - Add new test run to a test plan using --plan-id option (with configurations support using the --config-ids option)
164 | - Update test run inside test plans using --run-id only
165 | - New JUnit property for result steps named (i.e.: )
166 | - Select suite or create new suite using name with the --suite-name option
167 | - Report path with wildcard support to merge multiple reports and submit results to one run (i.e.: -f junit-report-*)
168 |
169 | ## Fixed
170 | - Fixed suite lookup by name method
171 |
172 | ## [1.5.0]
173 |
174 | _released 04-12-2023
175 |
176 | ### Added
177 | - OpenAPI parser
178 |
179 | ## [v1.4.4]
180 | _released 04-07-2023
181 |
182 | ### Fixed
183 | - Ignore inline ids
184 |
185 | ## [v1.4.3]
186 | _released 02-23-2023
187 |
188 | ### Fixed
189 | - Ignore suite and section IDs in JUnit report
190 |
191 | ## [v1.4.2]
192 | _released 02-10-2023
193 |
194 | ### Fixed
195 | - Fix automation_id check to support empty case field configs
196 | ### Added
197 | - Add Content-Type headers to GET requests
198 |
199 | ## [v1.4.1]
200 | _released 02-10-2023
201 |
202 | ### Fixed
203 | - Fix case and result fields bug
204 |
205 | ## [v1.4.0]
206 | _released 02-09-2023
207 |
208 | ### Added
209 | - Integration with SauceLabs saucectl reports
210 | - Support for multi-select field values in cmd args and properties (i.e.: "custom_multitype:[1,3]")
211 | - Specify test case IDs in test case name or property
212 | - Create the test run under a milestone using the --milestone-id option
213 | - Elapsed time in milliseconds using the --allow-ms option
214 | - Ability to enrich test cases and test case results through properties with: Case fields, Result fields, Result comments
215 |
216 | ## [v1.3.1]
217 | _released 11-26-2022
218 |
219 | ### Fixed
220 | - Fix case and result fields bug
221 |
222 | ## [v1.3.0]
223 | _released 08-01-2022
224 |
225 | ### Added
226 | - Updates to README
227 | - New attachments on results functionality using testcase properties
228 | - ADD attachment parsing
229 |
230 | ## [v1.2.0]
231 | _released 11-09-2022
232 |
233 | ### Added
234 | - Support for legacy endpoint responses without pagination
235 |
236 | ## [v1.1.0]
237 | _released 10-07-2022
238 |
239 | ### Fixed
240 | - Fix run description in one line
241 |
242 | ### Added
243 | - Support for --case-fields to add regular and custom fields values to test cases
244 | - Support for --run-description to add your custom run description
245 | - Support for --insecure requests
246 |
247 | ## [1.0]
248 | _released 16-04-2022
249 | - **Initial Public Release:** The first public release of `trcli`.
--------------------------------------------------------------------------------
/Jenkinsfile:
--------------------------------------------------------------------------------
1 | pipeline {
2 | agent any
3 |
4 | stages {
5 | stage('Checkout code') {
6 | steps {
7 | checkout scm
8 | }
9 | }
10 | stage('Install dependencies') {
11 | steps {
12 | sh "python3 -m pip install -r ./tests/requirements.txt"
13 | }
14 | }
15 | stage('Test') {
16 | steps {
17 | sh "python3 -m pytest -c ./tests/pytest.ini -W ignore::pytest.PytestCollectionWarning --alluredir=./allure-results"
18 | }
19 | }
20 | stage('Allure report') {
21 | steps {
22 | script {
23 | allure([
24 | includeProperties: false,
25 | properties: [],
26 | reportBuildPolicy: 'ALWAYS',
27 | results: [[path: './allure-results']]
28 | ])
29 | }
30 | }
31 | }
32 | }
33 | }
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 | from trcli import __version__
3 |
4 | setup(
5 | name="trcli",
6 | long_description="The TR CLI (trcli) is a command line tool for interacting with TestRail and uploading test automation results.",
7 | version=__version__,
8 | packages=[
9 | "trcli",
10 | "trcli.commands",
11 | "trcli.readers",
12 | "trcli.data_providers",
13 | "trcli.data_classes",
14 | "trcli.api",
15 | ],
16 | include_package_data=True,
17 | install_requires=[
18 | "click==8.0.3",
19 | "pyyaml>=6.0.0,<7.0.0",
20 | "junitparser>=3.1.0,<4.0.0",
21 | "pyserde==0.12.*",
22 | "requests>=2.31.0,<3.0.0",
23 | "tqdm>=4.65.0,<5.0.0",
24 | "humanfriendly>=10.0.0,<11.0.0",
25 | "openapi-spec-validator>=0.5.0,<1.0.0",
26 | "beartype>=0.17.0,<1.0.0",
27 | "prance" # Does not use semantic versioning
28 | ],
29 | entry_points="""
30 | [console_scripts]
31 | trcli=trcli.cli:cli
32 | """,
33 | )
34 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gurock/trcli/bf238450b555f28c9cd9db8b77a222ef909415e9/tests/__init__.py
--------------------------------------------------------------------------------
/tests/helpers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gurock/trcli/bf238450b555f28c9cd9db8b77a222ef909415e9/tests/helpers/__init__.py
--------------------------------------------------------------------------------
/tests/helpers/api_client_helpers.py:
--------------------------------------------------------------------------------
1 | from trcli.api.api_client import APIClientResult, APIClient
2 |
3 | TEST_RAIL_URL = "https://FakeTestRail.io/"
4 |
5 |
6 | def create_url(resource: str):
7 | """Create url based on given resource and predefined host and API version suffix"""
8 | return TEST_RAIL_URL + APIClient.SUFFIX_API_V2_VERSION + resource
9 |
10 |
11 | def check_response(
12 | expected_status_code: int,
13 | expected_response_text: str,
14 | expected_error_message: str,
15 | response: APIClientResult,
16 | ):
17 | assert (
18 | response.status_code == expected_status_code
19 | ), f"Status code {expected_status_code} expected. Got {response.status_code} instead."
20 | assert str(response.response_text) == str(
21 | expected_response_text
22 | ), f"response_text not equal to expected: {expected_response_text}. Got: {response.response_text}"
23 | assert str(response.error_message) == str(
24 | expected_error_message
25 | ), f"error_message not equal to expected: {expected_error_message}. Got: {response.error_message}"
26 |
27 |
28 | def check_calls_count(function_mock, expected_call_count=1):
29 | assert (
30 | function_mock.call_count == expected_call_count
31 | ), f"Function expected to be called {expected_call_count} but it was called: {function_mock.call_count}."
32 |
--------------------------------------------------------------------------------
/tests/helpers/cli_helpers.py:
--------------------------------------------------------------------------------
1 | import itertools
2 | from typing import List, Tuple
3 |
4 |
5 | class CLIParametersHelper:
6 | def __init__(self):
7 | self.optional_arguments = {}
8 |
9 | self.required_arguments = {
10 | "host": ["--host", "fake host name"],
11 | "project": ["--project", "fake project name"],
12 | "username": ["--username", "fake user name"],
13 | "password": ["--password", "fake password"],
14 | "key": ["--key", "fake API key"],
15 | "parse_junit": ["parse_junit"],
16 | "title": ["--title", "fake run title"],
17 | "file": ["--file", "fake_result_file.xml"],
18 | }
19 |
20 | def get_all_required_parameters(self) -> List[str]:
21 | return list(
22 | itertools.chain(*[value for key, value in self.required_arguments.items()])
23 | )
24 |
25 | def get_all_required_parameters_without_specified(
26 | self, args_to_remove: List[str]
27 | ) -> List[str]:
28 | return list(
29 | itertools.chain(
30 | *[
31 | value
32 | for key, value in self.required_arguments.items()
33 | if key not in args_to_remove
34 | ]
35 | )
36 | )
37 |
38 | def get_all_required_parameters_plus_optional(
39 | self, args_to_add: List[str]
40 | ) -> List[str]:
41 | required_args = self.get_all_required_parameters()
42 | return args_to_add + required_args
43 |
44 | def get_required_parameters_without_command_no_dashes(
45 | self,
46 | ) -> List[Tuple[str, str]]:
47 | return [
48 | (key, value[-1])
49 | for key, value in self.required_arguments.items()
50 | if key != "parse_junit"
51 | ]
52 |
--------------------------------------------------------------------------------
/tests/helpers/load_data_from_config_helper.py:
--------------------------------------------------------------------------------
1 | def check_parsed_data(expected_result: dict, result_to_compare: dict):
2 | assert (
3 | expected_result == result_to_compare
4 | ), f"Wrong data received after config parsing. Expected: {expected_result} but received: {result_to_compare}."
5 |
6 |
7 | def check_verbose_message(expected_message: str, result_to_compare: str):
8 | assert (
9 | result_to_compare in expected_message
10 | ), f"Wrong verbose message. Expected: {expected_message} but got {result_to_compare} instead."
11 |
--------------------------------------------------------------------------------
/tests/helpers/results_uploader_helper.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 | from trcli.api.api_request_handler import ProjectData
3 | from trcli.api.results_uploader import ResultsUploader
4 |
5 |
6 | def upload_results_inner_functions_mocker(
7 | results_uploader: ResultsUploader, mocker, failing_functions: List[str]
8 | ):
9 | mocker_functions = [
10 | get_suite_id_mocker,
11 | check_for_missing_sections_and_add_mocker,
12 | check_for_missing_test_cases_and_add_mocker,
13 | add_run_mocker,
14 | update_run_mocker,
15 | add_results_mocker,
16 | close_run_mocker,
17 | ]
18 |
19 | for mocker_function in mocker_functions:
20 | failing = (
21 | True
22 | if mocker_function.__name__.replace("_mocker", "") in failing_functions
23 | else False
24 | )
25 | mocker_function(results_uploader, mocker, failing=failing)
26 |
27 |
28 | def api_request_handler_delete_mocker(
29 | results_uploader: ResultsUploader, mocker, failing_functions: List[str]
30 | ):
31 | mocker_functions = [
32 | delete_suite_mocker,
33 | delete_sections_mocker,
34 | delete_cases_mocker,
35 | delete_run_mocker,
36 | ]
37 |
38 | for mocker_function in mocker_functions:
39 | failing = (
40 | True
41 | if mocker_function.__name__.replace("_mocker", "") in failing_functions
42 | else False
43 | )
44 | mocker_function(results_uploader, mocker, failing=failing)
45 |
46 |
47 | def get_project_id_mocker(
48 | results_uploader: ResultsUploader, project_id, error_message: str, failing=False
49 | ):
50 | if failing:
51 | results_uploader.api_request_handler.get_project_data.return_value = ProjectData(
52 | project_id=project_id, suite_mode=-1, error_message=error_message
53 | )
54 | else:
55 | results_uploader.api_request_handler.get_project_data.return_value = ProjectData(
56 | project_id=project_id, suite_mode=1, error_message=""
57 | )
58 |
59 |
60 | def get_suite_id_mocker(results_uploader: ResultsUploader, mocker, failing=False):
61 | suite_id = 10
62 | results_uploader.get_suite_id = mocker.Mock()
63 | if failing:
64 | results_uploader.get_suite_id.return_value = (suite_id, -1, True)
65 | else:
66 | results_uploader.get_suite_id.return_value = (suite_id, 1, False)
67 |
68 |
69 | def check_for_missing_sections_and_add_mocker(
70 | results_uploader: ResultsUploader, mocker, failing=False
71 | ):
72 | results_uploader.add_missing_sections = mocker.Mock()
73 | if failing:
74 | results_uploader.add_missing_sections.return_value = (
75 | [10],
76 | -1,
77 | )
78 | else:
79 | results_uploader.add_missing_sections.return_value = (
80 | [10],
81 | 1,
82 | )
83 |
84 |
85 | def check_for_missing_test_cases_and_add_mocker(
86 | results_uploader: ResultsUploader, mocker, failing=False
87 | ):
88 | results_uploader.add_missing_test_cases = mocker.Mock()
89 | if failing:
90 | results_uploader.add_missing_test_cases.return_value = (
91 | [20, 30],
92 | -1,
93 | )
94 | else:
95 | results_uploader.add_missing_test_cases.return_value = (
96 | [20, 30],
97 | 1,
98 | )
99 |
100 |
101 | def add_run_mocker(results_uploader: ResultsUploader, mocker=None, failing=False):
102 | if failing:
103 | results_uploader.api_request_handler.add_run.return_value = (
104 | [],
105 | "Failed to add run.",
106 | )
107 | else:
108 | results_uploader.api_request_handler.add_run.return_value = (100, "")
109 |
110 |
111 | def update_run_mocker(results_uploader: ResultsUploader, mocker=None, failing=False):
112 | if failing:
113 | results_uploader.api_request_handler.add_run.return_value = (
114 | [],
115 | "Failed to add run.",
116 | )
117 | else:
118 | results_uploader.api_request_handler.update_run.return_value = (101, "")
119 |
120 | def add_results_mocker(results_uploader: ResultsUploader, mocker=None, failing=False):
121 | if failing:
122 | results_uploader.api_request_handler.add_results.return_value = (
123 | [],
124 | "Failed to add results.",
125 | 0,
126 | )
127 | else:
128 | results_uploader.api_request_handler.add_results.return_value = (
129 | [1, 2, 3],
130 | "",
131 | 3,
132 | )
133 |
134 |
135 | def close_run_mocker(results_uploader: ResultsUploader, mocker=None, failing=False):
136 | if failing:
137 | results_uploader.api_request_handler.close_run.return_value = (
138 | [],
139 | "Failed to close run.",
140 | )
141 | else:
142 | results_uploader.api_request_handler.close_run.return_value = ([100], "")
143 |
144 |
145 | def delete_suite_mocker(results_uploader: ResultsUploader, mocker=None, failing=False):
146 | if failing:
147 | results_uploader.api_request_handler.delete_suite.return_value = (
148 | [],
149 | "No permissions to delete suite.",
150 | )
151 | else:
152 | results_uploader.api_request_handler.delete_suite.return_value = ([100], "")
153 |
154 |
155 | def delete_sections_mocker(
156 | results_uploader: ResultsUploader, mocker=None, failing=False
157 | ):
158 | if failing:
159 | results_uploader.api_request_handler.delete_sections.return_value = (
160 | [],
161 | "No permissions to delete sections.",
162 | )
163 | else:
164 | results_uploader.api_request_handler.delete_sections.return_value = ([100], "")
165 |
166 |
167 | def delete_cases_mocker(results_uploader: ResultsUploader, mocker=None, failing=False):
168 | if failing:
169 | results_uploader.api_request_handler.delete_cases.return_value = (
170 | [],
171 | "No permissions to delete cases.",
172 | )
173 | else:
174 | results_uploader.api_request_handler.delete_cases.return_value = ([100], "")
175 |
176 |
177 | def delete_run_mocker(results_uploader: ResultsUploader, mocker=None, failing=False):
178 | if failing:
179 | results_uploader.api_request_handler.delete_run.return_value = (
180 | [],
181 | "No permissions to delete run.",
182 | )
183 | else:
184 | results_uploader.api_request_handler.delete_run.return_value = ([100], "")
185 |
--------------------------------------------------------------------------------
/tests/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | markers =
3 | cli: mark test as a command line interface test.
4 | results_uploader: tests for result uploader
5 | api_client: tests for api client
6 | load_config: tests for load config
7 | verifier: tests for verifier response
8 | parse_junit: tests for junit parser
9 | parse_robot: tests for Robot Framework parser
10 | dataclass: tests for dataclass
11 | api_handler: tests for api handler
12 | data_provider: tests for data provider
13 | project_based_client: mark a test as a project-based client test.
14 | proxy: test for proxy feature
15 |
--------------------------------------------------------------------------------
/tests/requirements-tox.txt:
--------------------------------------------------------------------------------
1 | pytest==8.0.*
2 | pytest-md-report
3 | coverage
4 | allure-pytest
5 | pytest-freezegun
6 | pytest-mock
7 | requests-mock
8 | deepdiff
9 | numpy
--------------------------------------------------------------------------------
/tests/requirements-variable-deps.txt:
--------------------------------------------------------------------------------
1 | click==8.1.*
2 | pyyaml
3 | junitparser
4 | pyserde==0.12.*
5 | requests
6 | tqdm
7 | humanfriendly
8 | beartype
--------------------------------------------------------------------------------
/tests/requirements.txt:
--------------------------------------------------------------------------------
1 | pytest==8.0.*
2 | pytest-md-report
3 | coverage
4 | allure-pytest
5 | pytest-freezegun
6 | pytest-mock
7 | requests-mock
8 | click==8.0.3
9 | pyyaml
10 | junitparser
11 | pyserde==0.12.*
12 | requests
13 | tqdm
14 | humanfriendly
15 | deepdiff
16 | beartype
--------------------------------------------------------------------------------
/tests/test_api_client_proxy.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from trcli.constants import FAULT_MAPPING
3 | from trcli.cli import Environment
4 | from requests.exceptions import ProxyError
5 | from trcli.api.api_client import APIClient
6 | from tests.helpers.api_client_helpers import (
7 | TEST_RAIL_URL,
8 | create_url,
9 | check_response,
10 | check_calls_count,
11 | )
12 | from tests.test_data.proxy_test_data import FAKE_PROJECT_DATA, FAKE_PROXY, FAKE_PROXY_USER, PROXY_ERROR_MESSAGE
13 |
14 |
15 | @pytest.fixture(scope="class")
16 | def api_resources_maker():
17 | def _make_api_resources(retries=3, environment=None, timeout=30, proxy=None, proxy_user=None, noproxy=None):
18 | if environment is None:
19 | environment = Environment()
20 | environment.verbose = False
21 | api_client = APIClient(
22 | host_name=TEST_RAIL_URL,
23 | verbose_logging_function=environment.vlog,
24 | logging_function=environment.log,
25 | retries=retries,
26 | timeout=timeout,
27 | proxy=proxy,
28 | proxy_user=proxy_user,
29 | noproxy=noproxy, # For bypassing proxy when using --noproxy
30 | )
31 | return api_client
32 |
33 | return _make_api_resources
34 |
35 |
36 | @pytest.fixture(scope="class")
37 | def api_resources(api_resources_maker):
38 | yield api_resources_maker()
39 |
40 |
41 | class TestAPIClientProxy:
42 | @pytest.mark.proxy
43 | def test_send_get_with_proxy(self, api_resources_maker, requests_mock):
44 | """Test send_get works correctly with a proxy."""
45 | requests_mock.get(create_url("get_projects"), status_code=200, json=FAKE_PROJECT_DATA)
46 | api_client = api_resources_maker(proxy=FAKE_PROXY)
47 |
48 | response = api_client.send_get("get_projects")
49 |
50 | check_calls_count(requests_mock)
51 | check_response(200, FAKE_PROJECT_DATA, "", response)
52 |
53 | @pytest.mark.proxy
54 | def test_send_post_with_proxy(self, api_resources_maker, requests_mock):
55 | """Test send_post works correctly with a proxy."""
56 | requests_mock.post(create_url("add_project"), status_code=201, json=FAKE_PROJECT_DATA)
57 | api_client = api_resources_maker(proxy=FAKE_PROXY)
58 |
59 | response = api_client.send_post("add_project", FAKE_PROJECT_DATA)
60 |
61 | check_calls_count(requests_mock)
62 | check_response(201, FAKE_PROJECT_DATA, "", response)
63 |
64 | @pytest.mark.proxy
65 | def test_send_get_with_proxy_authentication(self, api_resources_maker, requests_mock, mocker):
66 | """Test proxy with authentication (proxy_user)."""
67 | requests_mock.get(create_url("get_projects"), status_code=200, json=FAKE_PROJECT_DATA)
68 | basic_auth_mock = mocker.patch("trcli.api.api_client.b64encode")
69 |
70 | api_client = api_resources_maker(proxy=FAKE_PROXY, proxy_user=FAKE_PROXY_USER)
71 | _ = api_client.send_get("get_projects")
72 |
73 | basic_auth_mock.assert_called_once_with(FAKE_PROXY_USER.encode('utf-8'))
74 |
75 | @pytest.mark.proxy
76 | def test_send_get_proxy_error(self, api_resources_maker, requests_mock):
77 | """Test handling a proxy authentication failure."""
78 | requests_mock.get(create_url("get_projects"), exc=ProxyError)
79 |
80 | api_client = api_resources_maker(proxy=FAKE_PROXY)
81 |
82 | response = api_client.send_get("get_projects")
83 |
84 | check_calls_count(requests_mock)
85 | check_response(-1, "", PROXY_ERROR_MESSAGE, response)
86 |
87 | @pytest.mark.proxy
88 | def test_send_get_no_proxy(self, api_resources_maker, requests_mock):
89 | """Test API request without a proxy (no --proxy provided)."""
90 | requests_mock.get(create_url("get_projects"), status_code=200, json=FAKE_PROJECT_DATA)
91 | api_client = api_resources_maker()
92 |
93 | response = api_client.send_get("get_projects")
94 |
95 | check_calls_count(requests_mock)
96 | check_response(200, FAKE_PROJECT_DATA, "", response)
97 |
98 | @pytest.mark.proxy
99 | def test_send_get_bypass_proxy(self, api_resources_maker, requests_mock, mocker):
100 | """Test that proxy is bypassed for certain hosts using --noproxy option."""
101 | requests_mock.get(create_url("get_projects"), status_code=200, json=FAKE_PROJECT_DATA)
102 | proxy_bypass_mock = mocker.patch("trcli.api.api_client.APIClient._get_proxies_for_request", return_value=None)
103 |
104 | api_client = api_resources_maker(proxy=FAKE_PROXY, noproxy="127.0.0.1")
105 | _ = api_client.send_get("get_projects")
106 |
107 | proxy_bypass_mock.assert_called_once()
108 |
109 | @pytest.mark.proxy
110 | def test_send_get_with_invalid_proxy_user(self, api_resources_maker, requests_mock, mocker):
111 | """Test handling invalid proxy authentication."""
112 | requests_mock.get(create_url("get_projects"), exc=ProxyError)
113 |
114 | api_client = api_resources_maker(proxy=FAKE_PROXY, proxy_user="invalid_user:invalid_password")
115 |
116 | response = api_client.send_get("get_projects")
117 |
118 | check_calls_count(requests_mock)
119 | check_response(-1, "", PROXY_ERROR_MESSAGE, response)
--------------------------------------------------------------------------------
/tests/test_api_data_provider.py:
--------------------------------------------------------------------------------
1 | from tests.test_data.api_data_provider_test_data import *
2 | from trcli.data_providers.api_data_provider import ApiDataProvider
3 | import pytest
4 |
5 |
6 | @pytest.fixture(scope="function")
7 | def post_data_provider():
8 | yield ApiDataProvider(test_input)
9 |
10 |
11 | @pytest.fixture(scope="function")
12 | def post_data_provider_single_result_with_id():
13 | yield ApiDataProvider(test_input_single_result_with_id)
14 |
15 |
16 | @pytest.fixture(scope="function")
17 | def post_data_provider_single_result_without_id():
18 | yield ApiDataProvider(test_input_single_result_without_id)
19 |
20 |
21 | @pytest.fixture(scope="function")
22 | def post_data_provider_duplicated_case_names():
23 | yield ApiDataProvider(test_input_duplicated_case_names)
24 |
25 |
26 | class TestApiDataProvider:
27 | @pytest.mark.data_provider
28 | def test_post_suite(self, post_data_provider):
29 | assert (
30 | post_data_provider.add_suites_data() == post_suite_bodies
31 | ), "Adding suite data doesn't match expected"
32 |
33 | @pytest.mark.data_provider
34 | def test_data_provider_returns_items_without_id(self, post_data_provider):
35 | """Check if data providers returns data only for items with missing IDs. Numbers correspond to data in
36 | test_data"""
37 | missing_sections = 2
38 | missing_cases = 1
39 | assert (
40 | len(post_data_provider.add_sections_data()) == missing_sections
41 | ), f"Adding suite data doesn't match expected {missing_sections}"
42 | assert (
43 | len(post_data_provider.add_cases()) == missing_cases
44 | ), f"Adding cases data doesn't match expected {missing_cases}"
45 |
46 | @pytest.mark.data_provider
47 | def test_post_section(self, post_data_provider):
48 | """Check body for adding sections"""
49 | suite_updater = [
50 | {
51 | "suite_id": 123,
52 | }
53 | ]
54 |
55 | post_data_provider.update_data(suite_data=suite_updater)
56 | assert (
57 | post_data_provider.add_sections_data() == post_section_bodies
58 | ), "Adding sections data doesn't match expected body"
59 |
60 | @pytest.mark.data_provider
61 | def test_post_cases(self, post_data_provider):
62 | """Check body for adding cases"""
63 | section_updater = [
64 | {"name": "Passed test", "section_id": 12345},
65 | ]
66 | post_data_provider.update_data(section_data=section_updater)
67 | cases = [case.to_dict() for case in post_data_provider.add_cases()]
68 | assert (
69 | cases == post_cases_bodies
70 | ), "Adding cases data doesn't match expected body"
71 |
72 | @pytest.mark.data_provider
73 | def test_post_run(self, post_data_provider):
74 | """Check body for adding run"""
75 | suite_updater = [
76 | {
77 | "suite_id": 123,
78 | }
79 | ]
80 | post_data_provider.update_data(suite_data=suite_updater)
81 | assert (
82 | post_data_provider.add_run("test run") == post_run_bodies
83 | ), "Adding run data doesn't match expected body"
84 |
85 | @pytest.mark.data_provider
86 | def test_post_run_all_args(self, post_data_provider):
87 | """Check body for adding run"""
88 | suite_updater = [
89 | {
90 | "suite_id": 123,
91 | }
92 | ]
93 | post_data_provider.update_data(suite_data=suite_updater)
94 | assert (
95 | post_data_provider.add_run(
96 | "test run",
97 | assigned_to_id=1,
98 | include_all=True,
99 | refs="SAN-1, SAN-2"
100 | ) == post_run_full_body
101 | ), "Adding run full data doesn't match expected body"
102 |
103 | @pytest.mark.data_provider
104 | def test_post_results_for_cases(self, post_data_provider):
105 | """Check body for adding results"""
106 | case_updater = [
107 | {
108 | "case_id": 1234567,
109 | "section_id": 12345,
110 | "title": "testCase2",
111 | "custom_automation_id": "className.testCase2abc"
112 | }
113 | ]
114 | post_data_provider.update_data(case_data=case_updater)
115 | assert (
116 | post_data_provider.add_results_for_cases(bulk_size=10)
117 | == post_results_for_cases_body
118 | ), "Adding results data doesn't match expected body"
119 |
120 | @pytest.mark.data_provider
121 | def test_return_all_items_flag(self, post_data_provider):
122 | all_sections = 3
123 | all_cases = 3
124 | assert (
125 | len(post_data_provider.add_sections_data(return_all_items=True))
126 | == all_sections
127 | ), f"Adding cases with return_all_items flag should match {all_sections}"
128 | assert (
129 | len(post_data_provider.add_cases(return_all_items=True))
130 | == all_cases
131 | ), f"Adding cases with return_all_items flag should match {all_cases}"
132 |
133 | @pytest.mark.data_provider
134 | @pytest.mark.parametrize(
135 | "list_to_divide, bulk_size, expected_result",
136 | [
137 | ([1, 2, 3, 4, 5, 6], 3, [[1, 2, 3], [4, 5, 6]]),
138 | ([1, 2, 3, 4, 5, 6], 4, [[1, 2, 3, 4], [5, 6]]),
139 | ([1, 2, 3, 4, 5, 6], 6, [[1, 2, 3, 4, 5, 6]]),
140 | ([1, 2, 3, 4, 5, 6], 7, [[1, 2, 3, 4, 5, 6]]),
141 | ([], 2, []),
142 | ],
143 | )
144 | def test_divide_list_into_bulks(self, list_to_divide, bulk_size, expected_result):
145 | result = ApiDataProvider.divide_list_into_bulks(list_to_divide, bulk_size)
146 | assert (
147 | result == expected_result
148 | ), f"Expected: {expected_result} but got {result} instead."
149 |
--------------------------------------------------------------------------------
/tests/test_cmd_add_run.py:
--------------------------------------------------------------------------------
1 | from unittest import mock
2 |
3 | from trcli.cli import Environment
4 | from trcli.commands import cmd_add_run
5 |
6 |
7 | class TestCmdAddRun:
8 | @mock.patch("builtins.open", new_callable=mock.mock_open)
9 | def test_write_run_to_file(self, mock_open_file):
10 | """The purpose of this test is to check that calling the write_run_to_file method
11 | writes the correct yaml file excluding optional data."""
12 | title = "Test run 1"
13 | run_id = 1
14 | file = "/fake/path/out.yaml"
15 | environment = Environment(cmd="add_run")
16 | environment.title = title
17 | environment.file = file
18 | expected_string = f"run_id: {run_id}\ntitle: {title}\n"
19 |
20 | cmd_add_run.write_run_to_file(environment, run_id)
21 | mock_open_file.assert_called_with(file, "a")
22 | mock_open_file.return_value.__enter__().write.assert_called_once_with(expected_string)
23 |
24 | @mock.patch("builtins.open", new_callable=mock.mock_open)
25 | def test_write_run_to_file_with_refs_and_description(self, mock_open_file):
26 | """The purpose of this test is to check that calling the write_run_to_file method
27 | writes the correct yaml file including optional data."""
28 | title = "Test run 1"
29 | run_id = 1
30 | file = "/fake/path/out.yaml"
31 | description = "test description"
32 | refs = "JIRA-100"
33 | case_ids = "1234"
34 | assigned_to_id = 1
35 | environment = Environment(cmd="add_run")
36 | environment.title = title
37 | environment.file = file
38 | environment.run_refs = refs
39 | environment.run_description = description
40 | environment.run_assigned_to_id = assigned_to_id
41 | environment.run_case_ids = case_ids
42 | environment.run_include_all = True
43 | expected_string = (f"run_assigned_to_id: {assigned_to_id}\nrun_case_ids: '{case_ids}'\n"
44 | f"run_description: {description}\nrun_id: {run_id}\n"
45 | f"run_include_all: true\nrun_refs: {refs}\ntitle: {title}\n")
46 | cmd_add_run.write_run_to_file(environment, run_id)
47 | mock_open_file.assert_called_with(file, "a")
48 | mock_open_file.return_value.__enter__().write.assert_called_once_with(expected_string)
49 |
--------------------------------------------------------------------------------
/tests/test_data/XML/custom_automation_id_in_property.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
13 |
14 |
15 |
16 |
17 |
18 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/tests/test_data/XML/empty.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/tests/test_data/XML/invalid.xml:
--------------------------------------------------------------------------------
1 | SOME XML INVALID FILE
--------------------------------------------------------------------------------
/tests/test_data/XML/invalid_empty.xml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gurock/trcli/bf238450b555f28c9cd9db8b77a222ef909415e9/tests/test_data/XML/invalid_empty.xml
--------------------------------------------------------------------------------
/tests/test_data/XML/milliseconds.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | FAILURE - No connection
6 |
--------------------------------------------------------------------------------
/tests/test_data/XML/no_root.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | FAILURE - No connection
6 |
--------------------------------------------------------------------------------
/tests/test_data/XML/required_only.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/tests/test_data/XML/robotframework_id_in_name_RF50.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
7 |
8 | SETUP
9 | Logs the given message with the given level.
10 | SETUP
11 |
12 |
13 |
14 |
15 | OK
16 | Logs the given message with the given level.
17 | OK
18 |
19 |
20 |
21 | Custom test message
22 | Sets message for the current test case.
23 | Set test message to:
24 | Custom test message
25 |
26 |
27 |
28 | Some documentation about my test Cases
29 | Nothing to see here
30 |
31 | - testrail_case_id: C123
32 | - testrail_case_field: refs:TR-1
33 | - testrail_case_field: priority_id:2
34 | - testrail_result_field: custom_environment:qa
35 | - testrail_result_field: custom_dropdown_1:3
36 | - testrail_result_comment: Notes for the result
37 | - testrail_attachment: /reports/screenshot.png
38 |
39 | Custom test message
40 |
41 |
42 |
43 |
44 | NOK
45 | Fails the test with the given message and optionally alters its tags.
46 | NOK
47 |
48 |
49 | NOK
50 |
51 |
52 |
53 |
55 |
56 |
57 | OK
58 | Logs the given message with the given level.
59 | OK
60 |
61 |
62 |
63 |
64 |
65 |
66 | OK
67 | Logs the given message with the given level.
68 | OK
69 |
70 |
71 |
72 |
73 | Simple homepage links tests
74 |
75 |
76 |
77 |
78 |
79 |
80 | All Tests
81 |
82 |
83 |
84 |
85 | Sub-Tests
86 | Sub-Tests.Subtests 1
87 | Sub-Tests.Subtests 2
88 |
89 |
90 |
91 |
92 |
93 |
--------------------------------------------------------------------------------
/tests/test_data/XML/robotframework_id_in_name_RF70.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | SETUP
7 | SETUP
8 | Logs the given message with the given level.
9 |
10 |
11 |
12 |
13 | OK
14 | OK
15 | Logs the given message with the given level.
16 |
17 |
18 |
19 | Set test message to:
20 | Custom test message
21 |
22 | Custom test message
23 | Sets message for the current test case.
24 |
25 |
26 | Some documentation about my test Cases
27 | Nothing to see here
28 |
29 | - testrail_case_id: C123
30 | - testrail_case_field: refs:TR-1
31 | - testrail_case_field: priority_id:2
32 | - testrail_result_field: custom_environment:qa
33 | - testrail_result_field: custom_dropdown_1:3
34 | - testrail_result_comment: Notes for the result
35 | - testrail_attachment: /reports/screenshot.png
36 |
37 | Custom test message
38 |
39 |
40 |
41 |
42 | NOK
43 | NOK
44 | Fails the test with the given message and optionally alters its tags.
45 |
46 |
47 | NOK
48 |
49 |
50 |
51 |
52 |
53 |
54 | OK
55 | OK
56 | Logs the given message with the given level.
57 |
58 |
59 |
60 |
61 |
62 |
63 | OK
64 | OK
65 | Logs the given message with the given level.
66 |
67 |
68 |
69 |
70 | Simple homepage links tests
71 |
72 |
73 |
74 |
75 |
76 |
77 | All Tests
78 |
79 |
80 |
81 |
82 | Sub-Tests
83 | Sub-Tests.Subtests 1
84 | Sub-Tests.Subtests 2
85 |
86 |
87 |
88 |
89 |
90 |
--------------------------------------------------------------------------------
/tests/test_data/XML/robotframework_id_in_property_RF50.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
7 |
8 | SETUP
9 | Logs the given message with the given level.
10 | SETUP
11 |
12 |
13 |
14 |
15 | OK
16 | Logs the given message with the given level.
17 | OK
18 |
19 |
20 |
21 | Custom test message
22 | Sets message for the current test case.
23 | Set test message to:
24 | Custom test message
25 |
26 |
27 |
28 | Some documentation about my test Cases
29 | Nothing to see here
30 |
31 | - testrail_case_id: C1
32 | - testrail_case_field: refs:TR-1
33 | - testrail_case_field: priority_id:2
34 | - testrail_result_field: custom_environment:qa
35 | - testrail_result_field: custom_dropdown_1:3
36 | - testrail_result_comment: Notes for the result
37 | - testrail_attachment: /reports/screenshot.png
38 |
39 | Custom test message
40 |
41 |
42 |
43 |
44 | NOK
45 | Fails the test with the given message and optionally alters its tags.
46 | NOK
47 |
48 |
49 | - testrail_case_id: c2
50 | NOK
51 |
52 |
53 |
54 |
56 |
57 |
58 | OK
59 | Logs the given message with the given level.
60 | OK
61 |
62 |
63 | - testrail_case_id: 3
64 |
65 |
66 |
67 |
68 | OK
69 | Logs the given message with the given level.
70 | OK
71 |
72 |
73 | - testrail_case_id: 4
74 |
75 |
76 | Simple homepage links tests
77 |
78 |
79 |
80 |
81 |
82 |
83 | All Tests
84 |
85 |
86 |
87 |
88 | Sub-Tests
89 | Sub-Tests.Subtests 1
90 | Sub-Tests.Subtests 2
91 |
92 |
93 |
94 |
95 |
96 |
--------------------------------------------------------------------------------
/tests/test_data/XML/robotframework_id_in_property_RF70.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | SETUP
7 | SETUP
8 | Logs the given message with the given level.
9 |
10 |
11 |
12 |
13 | OK
14 | OK
15 | Logs the given message with the given level.
16 |
17 |
18 |
19 | Set test message to:
20 | Custom test message
21 |
22 | Custom test message
23 | Sets message for the current test case.
24 |
25 |
26 | Some documentation about my test Cases
27 | Nothing to see here
28 |
29 | - testrail_case_id: C1
30 | - testrail_case_field: refs:TR-1
31 | - testrail_case_field: priority_id:2
32 | - testrail_result_field: custom_environment:qa
33 | - testrail_result_field: custom_dropdown_1:3
34 | - testrail_result_comment: Notes for the result
35 | - testrail_attachment: /reports/screenshot.png
36 |
37 | Custom test message
38 |
39 |
40 |
41 |
42 | NOK
43 | NOK
44 | Fails the test with the given message and optionally alters its tags.
45 |
46 |
47 | - testrail_case_id: c2
48 | NOK
49 |
50 |
51 |
52 |
53 |
54 |
55 | OK
56 | OK
57 | Logs the given message with the given level.
58 |
59 |
60 | - testrail_case_id: 3
61 |
62 |
63 |
64 |
65 | OK
66 | OK
67 | Logs the given message with the given level.
68 |
69 |
70 | - testrail_case_id: 4
71 |
72 |
73 | Simple homepage links tests
74 |
75 |
76 |
77 |
78 |
79 |
80 | All Tests
81 |
82 |
83 |
84 |
85 | Sub-Tests
86 | Sub-Tests.Subtests 1
87 | Sub-Tests.Subtests 2
88 |
89 |
90 |
91 |
92 |
93 |
--------------------------------------------------------------------------------
/tests/test_data/XML/robotframework_simple_RF50.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
7 |
8 | SETUP
9 | Logs the given message with the given level.
10 | SETUP
11 |
12 |
13 |
14 |
15 | OK
16 | Logs the given message with the given level.
17 | OK
18 |
19 |
20 |
21 | Custom test message
22 | Sets message for the current test case.
23 | Set test message to:
24 | Custom test message
25 |
26 |
27 |
28 | Some documentation about my test Cases
29 | Nothing to see here
30 |
31 | - testrail_case_id: C123
32 | - testrail_case_field: refs:TR-1
33 | - testrail_case_field: priority_id:2
34 | - testrail_result_field: custom_environment:qa
35 | - testrail_result_field: custom_dropdown_1:3
36 | - testrail_result_comment: Notes for the result
37 | - testrail_attachment: /reports/screenshot.png
38 |
39 | Custom test message
40 |
41 |
42 |
43 |
44 | NOK
45 | Fails the test with the given message and optionally alters its tags.
46 | NOK
47 |
48 |
49 | NOK
50 |
51 |
52 |
53 |
55 |
56 |
57 | OK
58 | Logs the given message with the given level.
59 | OK
60 |
61 |
62 |
63 |
64 |
65 |
66 | OK
67 | Logs the given message with the given level.
68 | OK
69 |
70 |
71 |
72 |
73 | Simple homepage links tests
74 |
75 |
76 |
77 |
78 |
79 |
80 | All Tests
81 |
82 |
83 |
84 |
85 | Sub-Tests
86 | Sub-Tests.Subtests 1
87 | Sub-Tests.Subtests 2
88 |
89 |
90 |
91 |
92 |
93 |
--------------------------------------------------------------------------------
/tests/test_data/XML/robotframework_simple_RF70.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | SETUP
7 | SETUP
8 | Logs the given message with the given level.
9 |
10 |
11 |
12 |
13 | OK
14 | OK
15 | Logs the given message with the given level.
16 |
17 |
18 |
19 | Set test message to:
20 | Custom test message
21 |
22 | Custom test message
23 | Sets message for the current test case.
24 |
25 |
26 | Some documentation about my test Cases
27 | Nothing to see here
28 |
29 | - testrail_case_id: C123
30 | - testrail_case_field: refs:TR-1
31 | - testrail_case_field: priority_id:2
32 | - testrail_result_field: custom_environment:qa
33 | - testrail_result_field: custom_dropdown_1:3
34 | - testrail_result_comment: Notes for the result
35 | - testrail_attachment: /reports/screenshot.png
36 |
37 | Custom test message
38 |
39 |
40 |
41 |
42 | NOK
43 | NOK
44 | Fails the test with the given message and optionally alters its tags.
45 |
46 |
47 | NOK
48 |
49 |
50 |
51 |
52 |
53 |
54 | OK
55 | OK
56 | Logs the given message with the given level.
57 |
58 |
59 |
60 |
61 |
62 |
63 | OK
64 | OK
65 | Logs the given message with the given level.
66 |
67 |
68 |
69 |
70 | Simple homepage links tests
71 |
72 |
73 |
74 |
75 |
76 |
77 | All Tests
78 |
79 |
80 |
81 |
82 | Sub-Tests
83 | Sub-Tests.Subtests 1
84 | Sub-Tests.Subtests 2
85 |
86 |
87 |
88 |
89 |
90 |
--------------------------------------------------------------------------------
/tests/test_data/XML/root.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
21 |
22 |
23 |
24 |
25 | skipped by user
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/tests/test_data/XML/root_id_in_name.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 | skipped by user
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/tests/test_data/XML/root_id_in_property.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 | skipped by user
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/tests/test_data/XML/sauce.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 | stacktrace...
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/tests/test_data/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gurock/trcli/bf238450b555f28c9cd9db8b77a222ef909415e9/tests/test_data/__init__.py
--------------------------------------------------------------------------------
/tests/test_data/api_client_test_data.py:
--------------------------------------------------------------------------------
1 | from trcli.settings import DEFAULT_API_CALL_TIMEOUT
2 |
3 | FAKE_PROJECT_DATA = {"fake_project_data": "data"}
4 | INVALID_TEST_CASE_ERROR = {"error": "Invalid or unknown test case"}
5 | API_RATE_LIMIT_REACHED_ERROR = {"error": "API rate limit reached"}
6 | NO_PERMISSION_PROJECT_ERROR = {
7 | "error": "No permissions to add projects (requires admin rights)"
8 | }
9 | TIMEOUT_PARSE_ERROR = (
10 | f"Warning. Could not convert provided 'timeout' to float. "
11 | f"Please make sure that timeout format is correct. Setting to default: "
12 | f"{DEFAULT_API_CALL_TIMEOUT}"
13 | )
14 |
--------------------------------------------------------------------------------
/tests/test_data/api_data_provider_test_data.py:
--------------------------------------------------------------------------------
1 | import json
2 | from pathlib import Path
3 | from trcli.data_classes.dataclass_testrail import TestRailSuite
4 | from serde.json import from_json
5 |
6 | file_json = open(Path(__file__).parent / "json/data_provider.json")
7 | json_string = json.dumps(json.load(file_json))
8 | test_input = from_json(TestRailSuite, json_string)
9 |
10 | file_json = open(Path(__file__).parent / "json/update_case_result_single_with_id.json")
11 | json_string = json.dumps(json.load(file_json))
12 | test_input_single_result_with_id = from_json(TestRailSuite, json_string)
13 |
14 | file_json = open(
15 | Path(__file__).parent / "json/update_case_result_single_without_id.json"
16 | )
17 | json_string = json.dumps(json.load(file_json))
18 | test_input_single_result_without_id = from_json(TestRailSuite, json_string)
19 |
20 | file_json = open(
21 | Path(__file__).parent / "json/data_provider_duplicated_case_names.json"
22 | )
23 | json_string = json.dumps(json.load(file_json))
24 | test_input_duplicated_case_names = from_json(TestRailSuite, json_string)
25 |
26 |
27 | post_suite_bodies = [{"name": "Suite1"}]
28 |
29 | post_section_bodies = [
30 | {"name": "Skipped test", "suite_id": 123},
31 | {"name": "Passed test", "suite_id": 123},
32 | ]
33 |
34 | post_cases_bodies = [
35 | {"section_id": 12345, "title": "testCase2", "custom_automation_id": "className.testCase2abc"}
36 | ]
37 |
38 | post_run_bodies = {
39 | "description": "logging: True\ndebug: False",
40 | "name": "test run",
41 | "suite_id": 123,
42 | "case_ids": [60, 4],
43 | "milestone_id": None
44 | }
45 |
46 | post_run_full_body = {
47 | "description": "logging: True\ndebug: False",
48 | "name": "test run",
49 | "suite_id": 123,
50 | "case_ids": [60, 4],
51 | "milestone_id": None,
52 | "assignedto_id": 1,
53 | "include_all": True,
54 | "refs": "SAN-1, SAN-2"
55 | }
56 |
57 | post_results_for_cases_body = [
58 | {
59 | "results": [
60 | {
61 | "case_id": 60,
62 | "comment": "Type: pytest.skip\\nMessage: Please skip\\nText: skipped by user",
63 | "attachments": [],
64 | "status_id": 4,
65 | 'custom_step_results': []
66 | },
67 | {"case_id": 1234567, "comment": "", "attachments": [], "status_id": 1, 'custom_step_results': []},
68 | {"case_id": 4, "comment": "", "attachments": [], "status_id": 1, 'custom_step_results': []},
69 | ]
70 | }
71 | ]
72 |
73 | result_for_update_case = {
74 | "case_id": 10,
75 | "comment": "Type: pytest.skip\\nMessage: Please skip\\nText: skipped by user",
76 | "status_id": 4,
77 | "attachments": [],
78 | }
79 |
--------------------------------------------------------------------------------
/tests/test_data/cli_test_data.py:
--------------------------------------------------------------------------------
1 | from trcli import __version__
2 |
3 | CHECK_ERROR_MESSAGE_FOR_REQUIRED_PARAMETERS_TEST_DATA = [
4 | (
5 | ["file"],
6 | "Please provide a valid path to your results file with the -f argument.\n",
7 | 1,
8 | ),
9 | (
10 | ["host"],
11 | "Please provide a TestRail server address with the -h argument.\n",
12 | 1,
13 | ),
14 | (
15 | ["project"],
16 | "Please specify the project name using the --project argument.\n",
17 | 1,
18 | ),
19 | (
20 | ["username"],
21 | "Please provide a valid TestRail username using the -u argument.\n",
22 | 1,
23 | ),
24 | (
25 | ["title", "run-id"],
26 | "Please give your Test Run a title using the --title argument.\n",
27 | 1,
28 | ),
29 | (
30 | ["password", "key"],
31 | "Please provide either a password using the -p argument or an API key using the -k argument.\n",
32 | 1,
33 | ),
34 | ]
35 |
36 | CHECK_ERROR_MESSAGE_FOR_REQUIRED_PARAMETERS_TEST_IDS = [
37 | "No file parameter provided",
38 | "No host parameter provided",
39 | "No project parameter provided",
40 | "No username parameter provided",
41 | "No title or run-id parameter provided",
42 | "No password and API key parameter provided",
43 | ]
44 |
45 | ENVIRONMENT_VARIABLES = {
46 | "TR_CLI_HOST": "host_name_from_env",
47 | "TR_CLI_FILE": "file_from_env",
48 | "TR_CLI_PROJECT": "project_from_env",
49 | "TR_CLI_TITLE": "title_from_env",
50 | "TR_CLI_USERNAME": "username_from_env",
51 | "TR_CLI_PASSWORD": "password_from_env",
52 | "TR_CLI_KEY": "key_from_env",
53 | }
54 |
55 | RETURN_VALUE_FROM_CUSTOM_CONFIG_FILE = {
56 | "host": "host_from_custom_config",
57 | "file": "file_from_custom_config",
58 | "project": "project_from_custom_config",
59 | "title": "title_from_custom_config",
60 | "username": "username_from_custom_config",
61 | "password": "password_from_custom_config",
62 | "key": "key_from_custom_config",
63 | }
64 |
65 | trcli_description = ('Supported and loaded modules:\n'
66 | ' - parse_junit: JUnit XML Files (& Similar)\n'
67 | ' - parse_robot: Robot Framework XML Files\n'
68 | ' - parse_openapi: OpenAPI YML Files\n'
69 | ' - add_run: Create a new test run\n')
70 |
71 | trcli_help_description = "TestRail CLI"
72 |
--------------------------------------------------------------------------------
/tests/test_data/dataclass_creation.py:
--------------------------------------------------------------------------------
1 | from junitparser import Skipped, Failure, Error
2 |
3 |
4 | FAILED_RESULT_INPUT = Failure(type_="Fail", message="This test Failed")
5 | FAILED_RESULT_INPUT.text = "Assertion failed"
6 | FAILED_EXPECTED = {
7 | "status_id": 5,
8 | "comment": "Type: Fail\nMessage: This test Failed\nText: Assertion failed",
9 | }
10 |
11 | SKIPPED_RESULT_INPUT = Skipped(type_="Skipped", message="This test Skipped")
12 | SKIPPED_RESULT_INPUT.text = "Skipped by user"
13 | SKIPPED_EXPECTED = {
14 | "status_id": 4,
15 | "comment": "Type: Skipped\nMessage: This test Skipped\nText: Skipped by user",
16 | }
17 |
18 | SKIPPED_RESULT_EMPTY_INPUT = Skipped()
19 | SKIPPED_EMPTY_EXPECTED = {"status_id": 4, "comment": ""}
20 |
21 | ERROR_RESULT_INPUT = Error(type_="Error", message="This test Error")
22 | ERROR_RESULT_INPUT.text = "Error in line 1"
23 | ERROR_EXPECTED = {
24 | "status_id": 5,
25 | "comment": "Type: Error\nMessage: This test Error\nText: Error in line 1",
26 | }
27 |
28 | PASSED_EXPECTED = {"status_id": 1, "comment": ""}
29 |
--------------------------------------------------------------------------------
/tests/test_data/json/api_request_handler.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": null,
3 | "name": "Suite1",
4 | "suite_id": 4,
5 | "testsections": [
6 | {
7 | "name": "Skipped test",
8 | "suite_id": 4,
9 | "section_id": 1234,
10 | "testcases": [
11 | {
12 | "section_id": 1234,
13 | "title": "testCase1",
14 | "custom_automation_id": "Skipped test.testCase1",
15 | "case_id": 1,
16 | "result": {
17 | "comment": "Type: pytest.skip\\nMessage: Please skip\\nText: skipped by user",
18 | "status_id": 4,
19 | "case_id": 1,
20 | "attachments": [
21 | "./path1",
22 | "./path2"
23 | ]
24 | }
25 | },
26 | {
27 | "section_id": 1234,
28 | "title": "testCase2",
29 | "custom_automation_id": "Skipped test.testCase2",
30 | "estimate": "30s",
31 | "case_id": null,
32 | "result": {
33 | "comment": "Comment testCase2",
34 | "status_id": 1,
35 | "case_id": null
36 | }
37 | }
38 | ],
39 | "properties": [
40 | {
41 | "description": "logging: True",
42 | "name": "logging",
43 | "value": "True"
44 | },
45 | {
46 | "description": "debug: False",
47 | "name": "debug",
48 | "value": "False"
49 | }
50 | ]
51 | },
52 | {
53 | "name": "Passed test",
54 | "suite_id": 4,
55 | "section_id": null,
56 | "testcases": [
57 | {
58 | "section_id": 2,
59 | "title": "testCase3",
60 | "custom_automation_id": "Passed test.testCase3",
61 | "case_id": null,
62 | "result": {
63 | "comment": "Comment testCase3",
64 | "status_id": 1,
65 | "case_id": null
66 | }
67 | }
68 | ]
69 | }
70 | ]
71 | }
72 |
--------------------------------------------------------------------------------
/tests/test_data/json/api_request_handler_long_testcase.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": null,
3 | "name": "Suite1",
4 | "suite_id": 4,
5 | "testsections": [
6 | {
7 | "name": "Skipped test",
8 | "suite_id": 4,
9 | "section_id": 1234,
10 | "testcases": [
11 | {
12 | "section_id": 1234,
13 | "title": "testCase1",
14 | "custom_automation_id": "Skipped test.testCase1",
15 | "case_id": 1,
16 | "result": {
17 | "comment": "Type: pytest.skip\\nMessage: Please skip\\nText: skipped by user",
18 | "status_id": 4,
19 | "case_id": 1,
20 | "attachments": [
21 | "./path1",
22 | "./path2"
23 | ]
24 | }
25 | },
26 | {
27 | "section_id": 1234,
28 | "title": "Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim.",
29 | "custom_automation_id": "Skipped test.testCase2",
30 | "estimate": "30s",
31 | "case_id": null,
32 | "result": {
33 | "comment": "Comment testCase2",
34 | "status_id": 1,
35 | "case_id": null
36 | }
37 | }
38 | ],
39 | "properties": [
40 | {
41 | "description": "logging: True",
42 | "name": "logging",
43 | "value": "True"
44 | },
45 | {
46 | "description": "debug: False",
47 | "name": "debug",
48 | "value": "False"
49 | }
50 | ]
51 | },
52 | {
53 | "name": "Passed test",
54 | "suite_id": 4,
55 | "section_id": null,
56 | "testcases": [
57 | {
58 | "section_id": 2,
59 | "title": "testCase3",
60 | "custom_automation_id": "Passed test.testCase3",
61 | "case_id": null,
62 | "result": {
63 | "comment": "Comment testCase3",
64 | "status_id": 1,
65 | "case_id": null
66 | }
67 | }
68 | ]
69 | }
70 | ]
71 | }
72 |
--------------------------------------------------------------------------------
/tests/test_data/json/custom_automation_id_in_property.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": null,
3 | "name": "test suites root",
4 | "source": "custom_automation_id_in_property.xml",
5 | "suite_id": null,
6 | "testsections": [
7 | {
8 | "description": null,
9 | "name": "custom_automation_id in property",
10 | "suite_id": null,
11 | "parent_id": null,
12 | "section_id": null,
13 | "testcases": [
14 | {
15 | "title": "test_testrail 1",
16 | "section_id": null,
17 | "case_id": null,
18 | "estimate": null,
19 | "template_id": null,
20 | "type_id": null,
21 | "milestone_id": null,
22 | "refs": null,
23 | "case_fields": {
24 | "template_id": "1"
25 | },
26 | "result": {
27 | "case_id": null,
28 | "status_id": 1,
29 | "comment": "",
30 | "version": null,
31 | "elapsed": "159s",
32 | "defects": null,
33 | "assignedto_id": null,
34 | "attachments": [],
35 | "result_fields": {},
36 | "junit_result_unparsed": [],
37 | "custom_step_results": []
38 | },
39 | "custom_automation_id": "automation_id_1"
40 | },
41 | {
42 | "title": "test_testrail 2",
43 | "section_id": null,
44 | "case_id": null,
45 | "estimate": null,
46 | "template_id": null,
47 | "type_id": null,
48 | "milestone_id": null,
49 | "refs": null,
50 | "case_fields": {
51 | "template_id": "1"
52 | },
53 | "result": {
54 | "case_id": null,
55 | "status_id": 1,
56 | "comment": "",
57 | "version": null,
58 | "elapsed": "159s",
59 | "defects": null,
60 | "assignedto_id": null,
61 | "attachments": [],
62 | "result_fields": {},
63 | "junit_result_unparsed": [],
64 | "custom_step_results": []
65 | },
66 | "custom_automation_id": "automation_id_2"
67 | },
68 | {
69 | "title": "test_testrail 3",
70 | "section_id": null,
71 | "case_id": null,
72 | "estimate": null,
73 | "template_id": null,
74 | "type_id": null,
75 | "milestone_id": null,
76 | "refs": null,
77 | "case_fields": {
78 | "template_id": "1"
79 | },
80 | "result": {
81 | "case_id": null,
82 | "status_id": 1,
83 | "comment": "",
84 | "version": null,
85 | "elapsed": "159s",
86 | "defects": null,
87 | "assignedto_id": null,
88 | "attachments": [],
89 | "result_fields": {},
90 | "junit_result_unparsed": [],
91 | "custom_step_results": []
92 | },
93 | "custom_automation_id": "automation_id_3"
94 | }
95 | ],
96 | "properties": []
97 | }
98 | ]
99 | }
--------------------------------------------------------------------------------
/tests/test_data/json/data_provider.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": null,
3 | "name": "Suite1",
4 | "suite_id": null,
5 | "testsections": [
6 | {
7 | "name": "Skipped test",
8 | "suite_id": null,
9 | "section_id": null,
10 | "testcases": [
11 | {
12 | "section_id": null,
13 | "title": "testCase1",
14 | "case_id": 60,
15 | "result": {
16 | "comment": "Type: pytest.skip\\nMessage: Please skip\\nText: skipped by user",
17 | "status_id": 4,
18 | "case_id": 60
19 | }
20 | }
21 | ],
22 | "properties": [
23 | {
24 | "description": "logging: True",
25 | "name": "logging",
26 | "value": "True"
27 | },
28 | {
29 | "description": "debug: False",
30 | "name": "debug",
31 | "value": "False"
32 | }
33 | ]
34 | },
35 | {
36 | "name": "Passed test",
37 | "suite_id": null,
38 | "section_id": null,
39 | "testcases": [
40 | {
41 | "section_id": null,
42 | "title": "testCase2",
43 | "custom_automation_id": "className.testCase2abc",
44 | "case_id": null,
45 | "time": 400,
46 | "result": {
47 | "comment": "",
48 | "status_id": 1,
49 | "case_id": null
50 | }
51 | }
52 | ]
53 | },
54 | {
55 | "name": "Filled test",
56 | "suite_id": 1,
57 | "section_id": 2,
58 | "testcases": [
59 | {
60 | "section_id": 2,
61 | "title": "testCase3",
62 | "case_id": 4,
63 | "result": {
64 | "comment": "",
65 | "status_id": 1,
66 | "case_id": 4
67 | }
68 | }
69 | ]
70 | }
71 | ]
72 | }
73 |
--------------------------------------------------------------------------------
/tests/test_data/json/data_provider_duplicated_case_names.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": null,
3 | "name": "Suite1",
4 | "suite_id": null,
5 | "testsections": [
6 | {
7 | "name": "Skipped test",
8 | "suite_id": null,
9 | "section_id": null,
10 | "testcases": [
11 | {
12 | "section_id": null,
13 | "title": "testCase1",
14 | "case_id": 60,
15 | "result": {
16 | "comment": "Type: pytest.skip\\nMessage: Please skip\\nText: skipped by user",
17 | "status_id": 4,
18 | "case_id": 60
19 | }
20 | }
21 | ],
22 | "properties": [
23 | {
24 | "description": "logging: True",
25 | "name": "logging",
26 | "value": "True"
27 | },
28 | {
29 | "description": "debug: False",
30 | "name": "debug",
31 | "value": "False"
32 | }
33 | ]
34 | },
35 | {
36 | "name": "Passed test",
37 | "suite_id": null,
38 | "section_id": null,
39 | "testcases": [
40 | {
41 | "section_id": null,
42 | "title": "testCase2",
43 | "case_id": null,
44 | "time": 400,
45 | "result": {
46 | "comment": "",
47 | "status_id": 1,
48 | "case_id": null
49 | }
50 | }
51 | ]
52 | },
53 | {
54 | "name": "Filled test",
55 | "suite_id": 1,
56 | "section_id": 2,
57 | "testcases": [
58 | {
59 | "section_id": 2,
60 | "title": "testCase2",
61 | "case_id": 4,
62 | "result": {
63 | "comment": "",
64 | "status_id": 1,
65 | "case_id": 4
66 | }
67 | }
68 | ]
69 | }
70 | ]
71 | }
72 |
--------------------------------------------------------------------------------
/tests/test_data/json/milliseconds.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": null,
3 | "name": "milliseconds.xml 20-05-20 01:00:00",
4 | "source": "milliseconds.xml",
5 | "suite_id": null,
6 | "testsections": [
7 | {
8 | "description": null,
9 | "name": "One passing scenario, one failing scenario",
10 | "parent_id": null,
11 | "properties": [],
12 | "section_id": null,
13 | "suite_id": null,
14 | "testcases": [
15 | {
16 | "case_id": null,
17 | "custom_automation_id": "One passing scenario, one failing scenario.Passing",
18 | "estimate": null,
19 | "milestone_id": null,
20 | "refs": null,
21 | "result": {
22 | "assignedto_id": null,
23 | "comment": "",
24 | "defects": null,
25 | "elapsed": "0.005s",
26 | "junit_result_unparsed": [],
27 | "status_id": 1,
28 | "case_id": null,
29 | "version": null,
30 | "attachments": [],
31 | "result_fields": {},
32 | "custom_step_results": []
33 | },
34 | "section_id": null,
35 | "template_id": null,
36 | "title": "Passing",
37 | "case_fields": {},
38 | "type_id": null
39 | },
40 | {
41 | "case_id": null,
42 | "custom_automation_id": "One passing scenario, one failing scenario.Failing",
43 | "estimate": null,
44 | "milestone_id": null,
45 | "refs": null,
46 | "result": {
47 | "assignedto_id": null,
48 | "comment": "Type: failed\nMessage: failed Failing\nText: FAILURE - No connection",
49 | "defects": null,
50 | "elapsed": "10.001s",
51 | "junit_result_unparsed": [],
52 | "status_id": 5,
53 | "case_id": null,
54 | "version": null,
55 | "attachments": [],
56 | "result_fields": {},
57 | "custom_step_results": []
58 | },
59 | "section_id": null,
60 | "template_id": null,
61 | "title": "Failing",
62 | "case_fields": {},
63 | "type_id": null
64 | }
65 | ]
66 | }
67 | ]
68 | }
--------------------------------------------------------------------------------
/tests/test_data/json/no_root.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": null,
3 | "name": "no_root.xml 20-05-20 01:00:00",
4 | "source": "no_root.xml",
5 | "suite_id": null,
6 | "testsections": [
7 | {
8 | "description": null,
9 | "name": "One passing scenario, one failing scenario",
10 | "parent_id": null,
11 | "properties": [],
12 | "section_id": null,
13 | "suite_id": null,
14 | "testcases": [
15 | {
16 | "case_id": null,
17 | "custom_automation_id": "One passing scenario, one failing scenario.bjbszpjfyfygrqgxsjyvtguoblrfeleouzktjnqgpcwtuvbedykryuvqeyhgopxphlgguultqosfsrfslpfazzvoqkkbixmoataqmiagpyhqxzjwcmpaepmyvagvfcekhvowjuulxkgmgjxsqaqlipszvodwwlohlwzyjsbwukbwjgycqfgygstclocmfzglskntohpnzkylrjswpypwrefwmdvmhekgnklrherxwqhtkhjkjdbqfymemjcjpppu",
18 | "estimate": null,
19 | "milestone_id": null,
20 | "refs": null,
21 | "result": {
22 | "assignedto_id": null,
23 | "comment": "",
24 | "defects": null,
25 | "elapsed": "1s",
26 | "junit_result_unparsed": [],
27 | "status_id": 1,
28 | "case_id": null,
29 | "version": null,
30 | "attachments": [],
31 | "result_fields": {},
32 | "custom_step_results": []
33 | },
34 | "section_id": null,
35 | "template_id": null,
36 | "title": "jfyfygrqgxsjyvtguoblrfeleouzktjnqgpcwtuvbedykryuvqeyhgopxphlgguultqosfsrfslpfazzvoqkkbixmoataqmiagpyhqxzjwcmpaepmyvagvfcekhvowjuulxkgmgjxsqaqlipszvodwwlohlwzyjsbwukbwjgycqfgygstclocmfzglskntohpnzkylrjswpypwrefwmdvmhekgnklrherxwqhtkhjkjdbqfymemjcjpppu",
37 | "case_fields": {},
38 | "type_id": null
39 | },
40 | {
41 | "case_id": null,
42 | "custom_automation_id": "One passing scenario, one failing scenario.Failing",
43 | "estimate": null,
44 | "milestone_id": null,
45 | "refs": null,
46 | "result": {
47 | "assignedto_id": null,
48 | "comment": "Type: failed\nMessage: failed Failing\nText: FAILURE - No connection",
49 | "defects": null,
50 | "elapsed": "10s",
51 | "junit_result_unparsed": [],
52 | "status_id": 5,
53 | "case_id": null,
54 | "version": null,
55 | "attachments": [],
56 | "result_fields": {},
57 | "custom_step_results": []
58 | },
59 | "section_id": null,
60 | "template_id": null,
61 | "title": "Failing",
62 | "case_fields": {},
63 | "type_id": null
64 | }
65 | ]
66 | }
67 | ]
68 | }
--------------------------------------------------------------------------------
/tests/test_data/json/required_only.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": null,
3 | "name": "required_only.xml 20-05-20 01:00:00",
4 | "source": "required_only.xml",
5 | "suite_id": null,
6 | "testsections": [
7 | {
8 | "description": null,
9 | "name": "name must be here1",
10 | "parent_id": null,
11 | "properties": [],
12 | "section_id": null,
13 | "suite_id": null,
14 | "testcases": [
15 | {
16 | "case_id": null,
17 | "custom_automation_id": "None.name must be here2",
18 | "estimate": null,
19 | "milestone_id": null,
20 | "refs": null,
21 | "result": {
22 | "assignedto_id": null,
23 | "case_id": null,
24 | "comment": "",
25 | "defects": null,
26 | "elapsed": null,
27 | "junit_result_unparsed": [],
28 | "status_id": 1,
29 | "version": null,
30 | "attachments": [],
31 | "result_fields": {},
32 | "custom_step_results": []
33 | },
34 | "section_id": null,
35 | "template_id": null,
36 | "title": "name must be here2",
37 | "case_fields": {},
38 | "type_id": null
39 | },
40 | {
41 | "case_id": null,
42 | "custom_automation_id": "None.name must be here3",
43 | "estimate": null,
44 | "milestone_id": null,
45 | "refs": null,
46 | "result": {
47 | "assignedto_id": null,
48 | "case_id": null,
49 | "comment": "",
50 | "defects": null,
51 | "elapsed": null,
52 | "junit_result_unparsed": [],
53 | "status_id": 5,
54 | "version": null,
55 | "attachments": [],
56 | "result_fields": {},
57 | "custom_step_results": []
58 | },
59 | "section_id": null,
60 | "template_id": null,
61 | "title": "name must be here3",
62 | "case_fields": {},
63 | "type_id": null
64 | }
65 | ]
66 | }
67 | ]
68 | }
--------------------------------------------------------------------------------
/tests/test_data/json/robotframework_id_in_name_RF50.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "robotframework_id_in_name_RF50",
3 | "suite_id": null,
4 | "description": null,
5 | "testsections": [
6 | {
7 | "name": "Sub-Tests.Subtests 1",
8 | "suite_id": null,
9 | "parent_id": null,
10 | "description": null,
11 | "section_id": null,
12 | "testcases": [
13 | {
14 | "title": "Subtest 1a",
15 | "section_id": null,
16 | "case_id": 1,
17 | "estimate": null,
18 | "template_id": null,
19 | "type_id": null,
20 | "milestone_id": null,
21 | "refs": null,
22 | "case_fields": {
23 | "refs": "TR-1",
24 | "priority_id": "2"
25 | },
26 | "result": {
27 | "case_id": 1,
28 | "status_id": 1,
29 | "comment": "Notes for the result\n\nCustom test message\n ",
30 | "version": null,
31 | "elapsed": "1s",
32 | "defects": null,
33 | "assignedto_id": null,
34 | "attachments": [
35 | "/reports/screenshot.png"
36 | ],
37 | "result_fields": {
38 | "custom_environment": "qa",
39 | "custom_dropdown_1": "3"
40 | },
41 | "junit_result_unparsed": null,
42 | "custom_step_results": [
43 | {
44 | "content": "Log",
45 | "status_id": 1
46 | },
47 | {
48 | "content": "Set Test Message",
49 | "status_id": 1
50 | }
51 | ]
52 | },
53 | "custom_automation_id": "Sub-Tests.Subtests 1.Subtest 1a"
54 | },
55 | {
56 | "title": "Subtest 1b",
57 | "section_id": null,
58 | "case_id": 2,
59 | "estimate": null,
60 | "template_id": null,
61 | "type_id": null,
62 | "milestone_id": null,
63 | "refs": null,
64 | "case_fields": {},
65 | "result": {
66 | "case_id": 2,
67 | "status_id": 5,
68 | "comment": "NOK",
69 | "version": null,
70 | "elapsed": "1s",
71 | "defects": null,
72 | "assignedto_id": null,
73 | "attachments": [],
74 | "result_fields": {},
75 | "junit_result_unparsed": null,
76 | "custom_step_results": [
77 | {
78 | "content": "Fail",
79 | "status_id": 5
80 | }
81 | ]
82 | },
83 | "custom_automation_id": "Sub-Tests.Subtests 1.Subtest 1b"
84 | }
85 | ],
86 | "properties": []
87 | },
88 | {
89 | "name": "Sub-Tests.Subtests 2",
90 | "suite_id": null,
91 | "parent_id": null,
92 | "description": null,
93 | "section_id": null,
94 | "testcases": [
95 | {
96 | "title": "Subtest 2a",
97 | "section_id": null,
98 | "case_id": 3,
99 | "estimate": null,
100 | "template_id": null,
101 | "type_id": null,
102 | "milestone_id": null,
103 | "refs": null,
104 | "case_fields": {},
105 | "result": {
106 | "case_id": 3,
107 | "status_id": 1,
108 | "comment": null,
109 | "version": null,
110 | "elapsed": "1s",
111 | "defects": null,
112 | "assignedto_id": null,
113 | "attachments": [],
114 | "result_fields": {},
115 | "junit_result_unparsed": null,
116 | "custom_step_results": [
117 | {
118 | "content": "Log",
119 | "status_id": 1
120 | }
121 | ]
122 | },
123 | "custom_automation_id": "Sub-Tests.Subtests 2.Subtest 2a"
124 | },
125 | {
126 | "title": "Subtest 2b",
127 | "section_id": null,
128 | "case_id": 4,
129 | "estimate": null,
130 | "template_id": null,
131 | "type_id": null,
132 | "milestone_id": null,
133 | "refs": null,
134 | "case_fields": {},
135 | "result": {
136 | "case_id": 4,
137 | "status_id": 1,
138 | "comment": null,
139 | "version": null,
140 | "elapsed": "3s",
141 | "defects": null,
142 | "assignedto_id": null,
143 | "attachments": [],
144 | "result_fields": {},
145 | "junit_result_unparsed": null,
146 | "custom_step_results": [
147 | {
148 | "content": "Log",
149 | "status_id": 1
150 | }
151 | ]
152 | },
153 | "custom_automation_id": "Sub-Tests.Subtests 2.Subtest 2b"
154 | }
155 | ],
156 | "properties": []
157 | }
158 | ],
159 | "source": "robotframework_id_in_name_RF50.xml"
160 | }
--------------------------------------------------------------------------------
/tests/test_data/json/robotframework_id_in_name_RF70.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "robotframework_id_in_name_RF70",
3 | "suite_id": null,
4 | "description": null,
5 | "testsections": [
6 | {
7 | "name": "Sub-Tests.Subtests 1",
8 | "suite_id": null,
9 | "parent_id": null,
10 | "description": null,
11 | "section_id": null,
12 | "testcases": [
13 | {
14 | "title": "Subtest 1a",
15 | "section_id": null,
16 | "case_id": 1,
17 | "estimate": null,
18 | "template_id": null,
19 | "type_id": null,
20 | "milestone_id": null,
21 | "refs": null,
22 | "case_fields": {
23 | "refs": "TR-1",
24 | "priority_id": "2"
25 | },
26 | "result": {
27 | "case_id": 1,
28 | "status_id": 1,
29 | "comment": "Notes for the result\n\nCustom test message\n ",
30 | "version": null,
31 | "elapsed": "1s",
32 | "defects": null,
33 | "assignedto_id": null,
34 | "attachments": [
35 | "/reports/screenshot.png"
36 | ],
37 | "result_fields": {
38 | "custom_environment": "qa",
39 | "custom_dropdown_1": "3"
40 | },
41 | "junit_result_unparsed": null,
42 | "custom_step_results": [
43 | {
44 | "content": "Log",
45 | "status_id": 1
46 | },
47 | {
48 | "content": "Set Test Message",
49 | "status_id": 1
50 | }
51 | ]
52 | },
53 | "custom_automation_id": "Sub-Tests.Subtests 1.Subtest 1a"
54 | },
55 | {
56 | "title": "Subtest 1b",
57 | "section_id": null,
58 | "case_id": 2,
59 | "estimate": null,
60 | "template_id": null,
61 | "type_id": null,
62 | "milestone_id": null,
63 | "refs": null,
64 | "case_fields": {},
65 | "result": {
66 | "case_id": 2,
67 | "status_id": 5,
68 | "comment": "NOK",
69 | "version": null,
70 | "elapsed": "1s",
71 | "defects": null,
72 | "assignedto_id": null,
73 | "attachments": [],
74 | "result_fields": {},
75 | "junit_result_unparsed": null,
76 | "custom_step_results": [
77 | {
78 | "content": "Fail",
79 | "status_id": 5
80 | }
81 | ]
82 | },
83 | "custom_automation_id": "Sub-Tests.Subtests 1.Subtest 1b"
84 | }
85 | ],
86 | "properties": []
87 | },
88 | {
89 | "name": "Sub-Tests.Subtests 2",
90 | "suite_id": null,
91 | "parent_id": null,
92 | "description": null,
93 | "section_id": null,
94 | "testcases": [
95 | {
96 | "title": "Subtest 2a",
97 | "section_id": null,
98 | "case_id": 3,
99 | "estimate": null,
100 | "template_id": null,
101 | "type_id": null,
102 | "milestone_id": null,
103 | "refs": null,
104 | "case_fields": {},
105 | "result": {
106 | "case_id": 3,
107 | "status_id": 1,
108 | "comment": null,
109 | "version": null,
110 | "elapsed": "1s",
111 | "defects": null,
112 | "assignedto_id": null,
113 | "attachments": [],
114 | "result_fields": {},
115 | "junit_result_unparsed": null,
116 | "custom_step_results": [
117 | {
118 | "content": "Log",
119 | "status_id": 1
120 | }
121 | ]
122 | },
123 | "custom_automation_id": "Sub-Tests.Subtests 2.Subtest 2a"
124 | },
125 | {
126 | "title": "Subtest 2b",
127 | "section_id": null,
128 | "case_id": 4,
129 | "estimate": null,
130 | "template_id": null,
131 | "type_id": null,
132 | "milestone_id": null,
133 | "refs": null,
134 | "case_fields": {},
135 | "result": {
136 | "case_id": 4,
137 | "status_id": 1,
138 | "comment": null,
139 | "version": null,
140 | "elapsed": "3s",
141 | "defects": null,
142 | "assignedto_id": null,
143 | "attachments": [],
144 | "result_fields": {},
145 | "junit_result_unparsed": null,
146 | "custom_step_results": [
147 | {
148 | "content": "Log",
149 | "status_id": 1
150 | }
151 | ]
152 | },
153 | "custom_automation_id": "Sub-Tests.Subtests 2.Subtest 2b"
154 | }
155 | ],
156 | "properties": []
157 | }
158 | ],
159 | "source": "robotframework_id_in_name_RF70.xml"
160 | }
--------------------------------------------------------------------------------
/tests/test_data/json/root.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": null,
3 | "name": "test suites root",
4 | "source": "root.xml",
5 | "suite_id": null,
6 | "testsections": [
7 | {
8 | "description": null,
9 | "name": "Skipped test",
10 | "parent_id": null,
11 | "properties": [
12 | {
13 | "description": "setting1: True",
14 | "name": "setting1",
15 | "value": "True"
16 | }
17 | ],
18 | "section_id": null,
19 | "suite_id": null,
20 | "testcases": [
21 | {
22 | "case_id": null,
23 | "custom_automation_id": "tests.test_junit_to_dataclass.test_testrail_test_suite",
24 | "estimate": null,
25 | "milestone_id": null,
26 | "refs": null,
27 | "result": {
28 | "assignedto_id": null,
29 | "comment": "Finding 1\n\nFinding 2\n\ntesting:&&qS55!T@\n\nType: pytest.skip\nMessage: Please skip\nText: skipped by user",
30 | "defects": null,
31 | "elapsed": "159s",
32 | "junit_result_unparsed": [],
33 | "status_id": 4,
34 | "case_id": null,
35 | "version": null,
36 | "attachments": [
37 | "/first.file",
38 | "second.file"
39 | ],
40 | "result_fields": {
41 | "version": "1.1",
42 | "custom_field": "custom_val"
43 | },
44 | "custom_step_results": [
45 | {
46 | "content": "Go to homepage",
47 | "status_id": 1
48 | },
49 | {
50 | "content": "Verify content",
51 | "status_id": 5
52 | }
53 | ]
54 | },
55 | "section_id": null,
56 | "template_id": null,
57 | "title": "test_testrail_test_suite",
58 | "case_fields": {
59 | "custom_case_field": "custom_case_val",
60 | "custom_steps": "1. First step\n2. Second step\n3. Third step",
61 | "test_empty_value": "another_case_val"
62 | },
63 | "type_id": null
64 | },
65 | {
66 | "case_id": null,
67 | "custom_automation_id": "The quick, brown fox jumps over a lazy dog. DJs flock by when MTV ax quiz prog. Junk MTV quiz graced by fox whelps. Bawds jog, flick quartz, vex nymphs. Waltz, bad nymph, for quick jigs vex! Fox nymph.Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec.",
68 | "estimate": null,
69 | "milestone_id": null,
70 | "refs": null,
71 | "case_fields": {
72 | },
73 | "result": {
74 | "assignedto_id": null,
75 | "defects": null,
76 | "elapsed": "160s",
77 | "junit_result_unparsed": [],
78 | "status_id": 1,
79 | "case_id": null,
80 | "version": null,
81 | "custom_step_results": [],
82 | "result_fields": {},
83 | "attachments": [],
84 | "comment": ""
85 | },
86 | "section_id": null,
87 | "template_id": null,
88 | "title": "elit Aenean commodo ligula eget dolor Aenean massa Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem Nulla consequat massa quis enim Donec",
89 | "type_id": null
90 | }
91 | ]
92 | }
93 | ]
94 | }
--------------------------------------------------------------------------------
/tests/test_data/json/root_id_in_name.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": null,
3 | "name": "test suites root",
4 | "source": "root_id_in_name.xml",
5 | "suite_id": null,
6 | "testsections": [
7 | {
8 | "description": null,
9 | "name": "Skipped test",
10 | "parent_id": null,
11 | "properties": [
12 | {
13 | "description": "setting1: True",
14 | "name": "setting1",
15 | "value": "True"
16 | }
17 | ],
18 | "section_id": null,
19 | "suite_id": null,
20 | "testcases": [
21 | {
22 | "case_id": 100,
23 | "custom_automation_id": "tests.test_junit_to_dataclass.[C100] test_testrail 1",
24 | "estimate": null,
25 | "milestone_id": null,
26 | "refs": null,
27 | "result": {
28 | "assignedto_id": null,
29 | "comment": "",
30 | "defects": null,
31 | "elapsed": "159s",
32 | "junit_result_unparsed": [],
33 | "status_id": 1,
34 | "case_id": 100,
35 | "version": null,
36 | "attachments": [],
37 | "result_fields": {},
38 | "custom_step_results": []
39 | },
40 | "section_id": null,
41 | "template_id": null,
42 | "title": "test_testrail 1",
43 | "type_id": null,
44 | "case_fields": {}
45 | },
46 | {
47 | "case_id": 101,
48 | "custom_automation_id": "tests.test_junit_to_dataclass.[c101] test_testrail 2",
49 | "estimate": null,
50 | "milestone_id": null,
51 | "refs": null,
52 | "result": {
53 | "assignedto_id": null,
54 | "comment": "Type: pytest.skip\nMessage: Please skip\nText: skipped by user",
55 | "defects": null,
56 | "elapsed": "159s",
57 | "junit_result_unparsed": [],
58 | "status_id": 4,
59 | "case_id": 101,
60 | "version": null,
61 | "attachments": [
62 | "/first.file",
63 | "second.file"
64 | ],
65 | "result_fields": {},
66 | "custom_step_results": []
67 | },
68 | "section_id": null,
69 | "template_id": null,
70 | "title": "test_testrail 2",
71 | "type_id": null,
72 | "case_fields": {}
73 | },
74 | {
75 | "case_id": null,
76 | "custom_automation_id": "tests.test_junit_to_dataclass.test_testrail 3",
77 | "estimate": null,
78 | "milestone_id": null,
79 | "refs": null,
80 | "result": {
81 | "assignedto_id": null,
82 | "comment": "",
83 | "defects": null,
84 | "elapsed": "159s",
85 | "junit_result_unparsed": [],
86 | "status_id": 1,
87 | "case_id": null,
88 | "version": null,
89 | "attachments": [],
90 | "result_fields": {},
91 | "custom_step_results": []
92 | },
93 | "section_id": null,
94 | "template_id": null,
95 | "title": "test_testrail 3",
96 | "type_id": null,
97 | "case_fields": {}
98 | }
99 | ]
100 | }
101 | ]
102 | }
--------------------------------------------------------------------------------
/tests/test_data/json/root_id_in_property.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": null,
3 | "name": "test suites root",
4 | "source": "root_id_in_property.xml",
5 | "suite_id": null,
6 | "testsections": [
7 | {
8 | "description": null,
9 | "name": "Skipped test",
10 | "parent_id": null,
11 | "properties": [
12 | {
13 | "description": "setting1: True",
14 | "name": "setting1",
15 | "value": "True"
16 | }
17 | ],
18 | "section_id": null,
19 | "suite_id": null,
20 | "testcases": [
21 | {
22 | "case_id": 100,
23 | "custom_automation_id": "tests.test_junit_to_dataclass.test_testrail 1",
24 | "estimate": null,
25 | "milestone_id": null,
26 | "refs": null,
27 | "result": {
28 | "assignedto_id": null,
29 | "comment": "",
30 | "defects": null,
31 | "elapsed": "159s",
32 | "junit_result_unparsed": [],
33 | "status_id": 1,
34 | "case_id": 100,
35 | "version": null,
36 | "attachments": [],
37 | "result_fields": {},
38 | "custom_step_results": []
39 | },
40 | "section_id": null,
41 | "template_id": null,
42 | "title": "test_testrail 1",
43 | "type_id": null,
44 | "case_fields": {}
45 | },
46 | {
47 | "case_id": 101,
48 | "custom_automation_id": "tests.test_junit_to_dataclass.test_testrail 2",
49 | "estimate": null,
50 | "milestone_id": null,
51 | "refs": null,
52 | "result": {
53 | "assignedto_id": null,
54 | "comment": "",
55 | "defects": null,
56 | "elapsed": "159s",
57 | "junit_result_unparsed": [],
58 | "status_id": 1,
59 | "case_id": 101,
60 | "version": null,
61 | "attachments": [],
62 | "result_fields": {},
63 | "custom_step_results": []
64 | },
65 | "section_id": null,
66 | "template_id": null,
67 | "title": "test_testrail 2",
68 | "type_id": null,
69 | "case_fields": {}
70 | },
71 | {
72 | "case_id": 102,
73 | "custom_automation_id": "tests.test_junit_to_dataclass.test_testrail 3",
74 | "estimate": null,
75 | "milestone_id": null,
76 | "refs": null,
77 | "result": {
78 | "assignedto_id": null,
79 | "comment": "Type: pytest.skip\nMessage: Please skip\nText: skipped by user",
80 | "defects": null,
81 | "elapsed": "159s",
82 | "junit_result_unparsed": [],
83 | "status_id": 4,
84 | "case_id": 102,
85 | "version": null,
86 | "attachments": [
87 | "/first.file",
88 | "second.file"
89 | ],
90 | "result_fields": {},
91 | "custom_step_results": []
92 | },
93 | "section_id": null,
94 | "template_id": null,
95 | "title": "test_testrail 3",
96 | "type_id": null,
97 | "case_fields": {}
98 | },
99 | {
100 | "case_id": null,
101 | "custom_automation_id": "tests.test_junit_to_dataclass.test_testrail 4",
102 | "estimate": null,
103 | "milestone_id": null,
104 | "refs": null,
105 | "result": {
106 | "assignedto_id": null,
107 | "comment": "",
108 | "defects": null,
109 | "elapsed": "159s",
110 | "junit_result_unparsed": [],
111 | "status_id": 1,
112 | "case_id": null,
113 | "version": null,
114 | "attachments": [],
115 | "result_fields": {},
116 | "custom_step_results": []
117 | },
118 | "section_id": null,
119 | "template_id": null,
120 | "title": "test_testrail 4",
121 | "type_id": null,
122 | "case_fields": {}
123 | }
124 | ]
125 | }
126 | ]
127 | }
--------------------------------------------------------------------------------
/tests/test_data/json/sauce1.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Firefox",
3 | "suite_id": null,
4 | "description": null,
5 | "testsections": [
6 | {
7 | "name": "test_suite_1.cy.js",
8 | "suite_id": null,
9 | "parent_id": null,
10 | "description": null,
11 | "section_id": null,
12 | "testcases": [
13 | {
14 | "section_id": null,
15 | "title": "Component 1 Verify page structure",
16 | "case_id": null,
17 | "estimate": null,
18 | "template_id": null,
19 | "type_id": null,
20 | "milestone_id": null,
21 | "refs": null,
22 | "case_fields": {},
23 | "result": {
24 | "case_id": null,
25 | "status_id": 1,
26 | "comment": "SauceLabs session: https://app.saucelabs.com/tests/7d7544f09a47428fb97ee53d1a5b1419\n\n",
27 | "version": null,
28 | "elapsed": "1s",
29 | "defects": null,
30 | "assignedto_id": null,
31 | "attachments": [],
32 | "junit_result_unparsed": [],
33 | "result_fields": {},
34 | "custom_step_results": []
35 | },
36 | "custom_automation_id": "Verify page structure.Component 1 Verify page structure"
37 | }
38 | ],
39 | "properties": [
40 | {
41 | "name": "browser",
42 | "value": "firefox 108",
43 | "description": "browser: firefox 108"
44 | },
45 | {
46 | "name": "platform",
47 | "value": "Windows 11",
48 | "description": "platform: Windows 11"
49 | }
50 | ]
51 | },
52 | {
53 | "name": "test_suite_2.cy.js",
54 | "suite_id": null,
55 | "parent_id": null,
56 | "description": null,
57 | "section_id": null,
58 | "testcases": [
59 | {
60 | "section_id": null,
61 | "title": "Component 2 Verify page structure",
62 | "case_id": null,
63 | "estimate": null,
64 | "template_id": null,
65 | "type_id": null,
66 | "milestone_id": null,
67 | "refs": null,
68 | "case_fields": {},
69 | "result": {
70 | "case_id": null,
71 | "status_id": 1,
72 | "comment": "SauceLabs session: https://app.saucelabs.com/tests/cbb864e049c645e1a96d56e953fe33f0\n\n",
73 | "version": null,
74 | "elapsed": "1s",
75 | "defects": null,
76 | "assignedto_id": null,
77 | "attachments": [],
78 | "junit_result_unparsed": [],
79 | "result_fields": {},
80 | "custom_step_results": []
81 | },
82 | "custom_automation_id": "Verify page structure.Component 2 Verify page structure"
83 | }
84 | ],
85 | "properties": []
86 | }
87 | ],
88 | "source": "sauce.xml"
89 | }
90 |
--------------------------------------------------------------------------------
/tests/test_data/json/sauce2.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Chrome",
3 | "suite_id": null,
4 | "description": null,
5 | "testsections": [
6 | {
7 | "name": "test_suite_1.cy.js",
8 | "suite_id": null,
9 | "parent_id": null,
10 | "description": null,
11 | "section_id": null,
12 | "testcases": [
13 | {
14 | "section_id": null,
15 | "title": "Component 1 Verify page structure",
16 | "case_id": null,
17 | "estimate": null,
18 | "template_id": null,
19 | "type_id": null,
20 | "milestone_id": null,
21 | "refs": null,
22 | "case_fields": {},
23 | "result": {
24 | "case_id": null,
25 | "status_id": 5,
26 | "comment": "SauceLabs session: https://app.saucelabs.com/tests/349cf779c0f94e649f7ea6ccc42e1753\n\nType: element not found\nMessage: Fail due to...\nText: stacktrace...",
27 | "version": null,
28 | "elapsed": "1s",
29 | "defects": null,
30 | "assignedto_id": null,
31 | "attachments": [],
32 | "junit_result_unparsed": [],
33 | "result_fields": {},
34 | "custom_step_results": []
35 | },
36 | "custom_automation_id": "Verify page structure.Component 1 Verify page structure"
37 | }
38 | ],
39 | "properties": [
40 | {
41 | "name": "browser",
42 | "value": "chrome 106",
43 | "description": "browser: chrome 106"
44 | },
45 | {
46 | "name": "platform",
47 | "value": "Windows 11",
48 | "description": "platform: Windows 11"
49 | }
50 | ]
51 | },
52 | {
53 | "name": "test_suite_2.cy.js",
54 | "suite_id": null,
55 | "parent_id": null,
56 | "description": null,
57 | "section_id": null,
58 | "testcases": [
59 | {
60 | "section_id": null,
61 | "title": "Component 2 Verify page structure",
62 | "case_id": null,
63 | "estimate": null,
64 | "template_id": null,
65 | "type_id": null,
66 | "milestone_id": null,
67 | "refs": null,
68 | "case_fields": {},
69 | "result": {
70 | "case_id": null,
71 | "status_id": 1,
72 | "comment": "SauceLabs session: https://app.saucelabs.com/tests/c0e3ddae1e104b86b940ed7e8026ff83\n\n",
73 | "version": null,
74 | "elapsed": "1s",
75 | "defects": null,
76 | "assignedto_id": null,
77 | "attachments": [],
78 | "junit_result_unparsed": [],
79 | "result_fields": {},
80 | "custom_step_results": []
81 | },
82 | "custom_automation_id": "Verify page structure.Component 2 Verify page structure"
83 | }
84 | ],
85 | "properties": []
86 | }
87 | ],
88 | "source": "sauce.xml"
89 | }
90 |
--------------------------------------------------------------------------------
/tests/test_data/json/update_case_result_single_with_id.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": null,
3 | "name": "Suite1",
4 | "suite_id": 4,
5 | "testsections": [
6 | {
7 | "name": "Skipped test",
8 | "suite_id": 4,
9 | "section_id": 1234,
10 | "testcases": [
11 | {
12 | "section_id": 1234,
13 | "title": "testCase1",
14 | "case_id": 10,
15 | "result": {
16 | "comment": "Type: pytest.skip\\nMessage: Please skip\\nText: skipped by user",
17 | "status_id": 4,
18 | "case_id": 10
19 | }
20 | }
21 | ]
22 | }
23 | ]
24 | }
25 |
--------------------------------------------------------------------------------
/tests/test_data/json/update_case_result_single_without_id.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": null,
3 | "name": "Suite1",
4 | "suite_id": 4,
5 | "testsections": [
6 | {
7 | "name": "Skipped test",
8 | "suite_id": 4,
9 | "section_id": 1234,
10 | "testcases": [
11 | {
12 | "section_id": 1234,
13 | "title": "testCase1",
14 | "case_id": null,
15 | "result": {
16 | "comment": "Type: pytest.skip\\nMessage: Please skip\\nText: skipped by user",
17 | "status_id": 4,
18 | "case_id": null
19 | }
20 | }
21 | ]
22 | }
23 | ]
24 | }
25 |
--------------------------------------------------------------------------------
/tests/test_data/load_data_from_config_test_data.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | correct_yaml_expected_result = dict(
4 | host="https://fakename.testrail.io/",
5 | project="Project name",
6 | file="result.xml",
7 | title="Project title",
8 | verbose=False,
9 | silent=False,
10 | batch_size=20,
11 | timeout=50,
12 | auto_creation_response=True,
13 | suite_id=50,
14 | run_id=10,
15 | )
16 | correct_config_file_path = Path(__file__).parent / "yaml" / "correct_config_file.yaml"
17 | correct_config_file_path_with_custom_config_path = (
18 | Path(__file__).parent / "yaml" / "correct_config_file_with_custom_config_path.yaml"
19 | )
20 | correct_config_file_with_custom_config_empty_path = (
21 | Path(__file__).parent / "yaml" / "correct_config_file_with_custom_config_empty.yaml"
22 | )
23 | correct_config_file_loop_check_path = (
24 | Path(__file__).parent / "yaml" / "correct_config_file_loop_check.yaml"
25 | )
26 | correct_config_file_multiple_documents_path = (
27 | Path(__file__).parent / "yaml" / "correct_config_file_multiple_documents.yaml"
28 | )
29 | correct_config_file_multiple_documents_path_with_custom_config_path = (
30 | Path(__file__).parent
31 | / "yaml"
32 | / "correct_config_file_multiple_documents_with_custom_config_path.yaml"
33 | )
34 | incorrect_config_file_path = (
35 | Path(__file__).parent / "yaml" / "corrupted_config_file.yaml"
36 | )
37 | incorrect_config_file_with_string_path = (
38 | Path(__file__).parent / "yaml" / "corrupted_config_file_with_string.yaml"
39 | )
40 | incorrect_config_file_with_list_path = (
41 | Path(__file__).parent / "yaml" / "corrupted_config_file_with_list.yaml"
42 | )
43 | incorrect_config_file_with_start_indicator_at_the_end_path = (
44 | Path(__file__).parent / "yaml" / "corrupted_config_start_indicator_at_the_end.yaml"
45 | )
46 | incorrect_config_file_with_with_empty_document = (
47 | Path(__file__).parent / "yaml" / "corrupted_config_file_with_empty_document.yaml"
48 | )
49 |
50 | incorrect_config_file_multiple_documents_path = (
51 | Path(__file__).parent / "yaml" / "corrupted_config_file_multiple_documents.yaml"
52 | )
53 |
--------------------------------------------------------------------------------
/tests/test_data/project_based_client_test_data.py:
--------------------------------------------------------------------------------
1 | from trcli.constants import FAULT_MAPPING
2 |
3 | TEST_GET_SUITE_ID_PROMPTS_USER_TEST_DATA = [
4 | (True, 10, 1, "Adding missing suites to project Fake project name.", False),
5 | (True, 10, -1, "Adding missing suites to project Fake project name.", True),
6 | (False, -1, -1, FAULT_MAPPING["no_user_agreement"].format(type="suite"), False),
7 | ]
8 |
9 | TEST_GET_SUITE_ID_PROMPTS_USER_IDS = [
10 | "user agrees",
11 | "user agrees, fail to add suite",
12 | "used does not agree",
13 | ]
14 |
15 | TEST_GET_SUITE_ID_SINGLE_SUITE_MODE_BASELINES_TEST_DATA = [
16 | (([], "Could not get suites"), -1, -1, "Could not get suites"),
17 | (([10], ""), -1, 1, ""),
18 | (
19 | ([10, 11, 12], ""),
20 | -1,
21 | -1,
22 | FAULT_MAPPING["not_unique_suite_id_single_suite_baselines"].format(
23 | project_name="Fake project name"
24 | ),
25 | ),
26 | ]
27 |
28 | TEST_GET_SUITE_ID_SINGLE_SUITE_MODE_BASELINES_IDS = [
29 | "get_suite_ids fails",
30 | "get_suite_ids returns one ID",
31 | "get_suite_ids returns more than one ID",
32 | ]
33 |
--------------------------------------------------------------------------------
/tests/test_data/proxy_test_data.py:
--------------------------------------------------------------------------------
1 | from trcli.settings import DEFAULT_API_CALL_TIMEOUT
2 |
3 | FAKE_PROJECT_DATA = {"fake_project_data": "data"}
4 | INVALID_TEST_CASE_ERROR = {"error": "Invalid or unknown test case"}
5 | API_RATE_LIMIT_REACHED_ERROR = {"error": "API rate limit reached"}
6 | NO_PERMISSION_PROJECT_ERROR = {
7 | "error": "No permissions to add projects (requires admin rights)"
8 | }
9 | TIMEOUT_PARSE_ERROR = (
10 | f"Warning. Could not convert provided 'timeout' to float. "
11 | f"Please make sure that timeout format is correct. Setting to default: "
12 | f"{DEFAULT_API_CALL_TIMEOUT}"
13 | )
14 |
15 | #proxy test data
16 | FAKE_PROXY = "http://127.0.0.1:8080"
17 | FAKE_PROXY_USER = "username:password"
18 |
19 | PROXY_ERROR_MESSAGE = (
20 | f"Failed to connect to the proxy server. Please check the proxy settings and ensure the server is available."
21 | )
--------------------------------------------------------------------------------
/tests/test_data/results_provider_test_data.py:
--------------------------------------------------------------------------------
1 | from trcli.constants import FAULT_MAPPING, RevertMessages
2 |
3 | TEST_UPLOAD_RESULTS_FLOW_TEST_DATA = [
4 | "get_suite_id",
5 | "check_for_missing_sections_and_add",
6 | "check_for_missing_test_cases_and_add",
7 | "add_run",
8 | "add_results",
9 | "close_run",
10 | ]
11 | TEST_UPLOAD_RESULTS_FLOW_IDS = [
12 | "failed_to_get_suite_id",
13 | "check_and_add_sections_failed",
14 | "check_and_add_test_cases_failed",
15 | "add_run_failed",
16 | "add_results_failed",
17 | "close_run_failed",
18 | ]
19 | TEST_ADD_MISSING_SECTIONS_PROMPTS_USER_TEST_DATA = [
20 | (
21 | True,
22 | [10, 11, 12],
23 | "",
24 | [10, 11, 12],
25 | "Adding missing sections to the suite.",
26 | 1,
27 | ),
28 | (
29 | True,
30 | [10, 11, 12],
31 | "Fail to add",
32 | [],
33 | "Adding missing sections to the suite.",
34 | -1,
35 | ),
36 | (
37 | False,
38 | [10, 11, 12],
39 | "",
40 | [],
41 | FAULT_MAPPING["no_user_agreement"].format(type="sections"),
42 | -1,
43 | ),
44 | ]
45 | TEST_ADD_MISSING_SECTIONS_PROMPTS_USER_IDS = [
46 | "user agrees, sections added",
47 | "user agrees, sections not added",
48 | "used does not agree",
49 | ]
50 | TEST_ADD_MISSING_TEST_CASES_PROMPTS_USER_TEST_DATA = [
51 | (
52 | True,
53 | [10, 11, 12],
54 | "",
55 | [10, 11, 12],
56 | "Adding missing test cases to the suite.",
57 | 1,
58 | ),
59 | (
60 | True,
61 | [10, 11, 12],
62 | "Fail to add",
63 | [],
64 | "Adding missing test cases to the suite.",
65 | -1,
66 | ),
67 | (
68 | False,
69 | [10, 11, 12],
70 | "",
71 | [],
72 | FAULT_MAPPING["no_user_agreement"].format(type="test cases"),
73 | -1,
74 | ),
75 | ]
76 | TEST_ADD_MISSING_TEST_CASES_PROMPTS_USER_IDS = [
77 | "user agrees, test cases added",
78 | "user agrees, test cases not added",
79 | "used does not agree",
80 | ]
81 |
82 | TEST_REVERT_FUNCTIONS_AND_EXPECTED = [
83 | (
84 | "delete_suite",
85 | [
86 | RevertMessages.run_deleted,
87 | RevertMessages.test_cases_deleted,
88 | RevertMessages.section_deleted,
89 | RevertMessages.suite_not_deleted.format(
90 | error="No permissions to delete suite."
91 | ),
92 | ],
93 | ),
94 | (
95 | "delete_sections",
96 | [
97 | RevertMessages.run_deleted,
98 | RevertMessages.test_cases_deleted,
99 | RevertMessages.section_not_deleted.format(
100 | error="No permissions to delete sections."
101 | ),
102 | RevertMessages.suite_deleted,
103 | ],
104 | ),
105 | (
106 | "delete_cases",
107 | [
108 | RevertMessages.run_deleted,
109 | RevertMessages.test_cases_not_deleted.format(
110 | error="No permissions to delete cases."
111 | ),
112 | RevertMessages.section_deleted,
113 | RevertMessages.suite_deleted,
114 | ],
115 | ),
116 | (
117 | "delete_run",
118 | [
119 | RevertMessages.run_not_deleted.format(
120 | error="No permissions to delete run."
121 | ),
122 | RevertMessages.test_cases_deleted,
123 | RevertMessages.section_deleted,
124 | RevertMessages.suite_deleted,
125 | ],
126 | ),
127 | ]
128 |
129 | TEST_REVERT_FUNCTIONS_IDS = [
130 | "unable_to_delete_suite",
131 | "unable_to_delete_sections",
132 | "unable_to_delete_cases",
133 | "unable_to_delete_run",
134 | ]
135 |
136 | TEST_REVERT_FUNCTIONS_AND_EXPECTED_EXISTING_SUITE = [
137 | (
138 | "delete_sections",
139 | [
140 | RevertMessages.run_deleted,
141 | RevertMessages.test_cases_deleted,
142 | RevertMessages.section_not_deleted.format(
143 | error="No permissions to delete sections."
144 | ),
145 | ],
146 | ),
147 | (
148 | "delete_cases",
149 | [
150 | RevertMessages.run_deleted,
151 | RevertMessages.test_cases_not_deleted.format(
152 | error="No permissions to delete cases."
153 | ),
154 | RevertMessages.section_deleted,
155 | ],
156 | ),
157 | (
158 | "delete_run",
159 | [
160 | RevertMessages.run_not_deleted.format(
161 | error="No permissions to delete run."
162 | ),
163 | RevertMessages.test_cases_deleted,
164 | RevertMessages.section_deleted,
165 | ],
166 | ),
167 | ]
168 |
169 | TEST_REVERT_FUNCTIONS_IDS_EXISTING_SUITE = [
170 | "unable_to_delete_sections",
171 | "unable_to_delete_cases",
172 | "unable_to_delete_run",
173 | ]
174 |
--------------------------------------------------------------------------------
/tests/test_data/yaml/correct_config_file.yaml:
--------------------------------------------------------------------------------
1 | host: https://fakename.testrail.io/
2 | project: Project name
3 | file: result.xml
4 | title: Project title
5 | verbose: False
6 | silent: False
7 | batch_size: 20
8 | timeout: 50
9 | auto_creation_response: True
10 | suite_id: 50
11 | run_id: 10
12 |
--------------------------------------------------------------------------------
/tests/test_data/yaml/correct_config_file_loop_check.yaml:
--------------------------------------------------------------------------------
1 | host: https://fakename.testrail.io/
2 | project: Project name
3 | file: result.xml
4 | title: Project title
5 | verbose: False
6 | silent: False
7 | batch_size: 20
8 | config: config.yaml
9 | timeout: 50
10 | auto_creation_response: True
11 | suite_id: 50
12 | run_id: 10
13 |
--------------------------------------------------------------------------------
/tests/test_data/yaml/correct_config_file_multiple_documents.yaml:
--------------------------------------------------------------------------------
1 | host: https://fakename.testrail.io/
2 | project: Project name
3 | ---
4 | file: result.xml
5 | title: Project title
6 | verbose: False
7 | silent: False
8 | ---
9 | batch_size: 20
10 | timeout: 50
11 | auto_creation_response: True
12 | ---
13 | suite_id: 50
14 | run_id: 10
15 | ...
--------------------------------------------------------------------------------
/tests/test_data/yaml/correct_config_file_multiple_documents_with_custom_config_path.yaml:
--------------------------------------------------------------------------------
1 | host: https://fakename.testrail.io/
2 | project: Project name
3 | ---
4 | file: result.xml
5 | title: Project title
6 | verbose: False
7 | silent: False
8 | ---
9 | batch_size: 20
10 | timeout: 50
11 | config: custom_config.yaml
12 | auto_creation_response: True
13 | ---
14 | suite_id: 50
15 | run_id: 10
16 | ...
--------------------------------------------------------------------------------
/tests/test_data/yaml/correct_config_file_with_custom_config_empty.yaml:
--------------------------------------------------------------------------------
1 | host: https://fakename.testrail.io/
2 | project: Project name
3 | file: result.xml
4 | title: Project title
5 | verbose: False
6 | silent: False
7 | batch_size: 20
8 | timeout: 50
9 | config:
10 | auto_creation_response: True
11 | suite_id: 50
12 | run_id: 10
--------------------------------------------------------------------------------
/tests/test_data/yaml/correct_config_file_with_custom_config_path.yaml:
--------------------------------------------------------------------------------
1 | host: https://fakename_default_config.testrail.io/
2 | project: Project name default config file
3 | file: result_default_config_file.xml
4 | title: Project title default config file
5 | verbose: True
6 | silent: True
7 | config: custom_config.yaml
8 | batch_size: 10
9 | timeout: 40
10 | auto_creation_response: False
11 | suite_id: 40
12 | run_id: 20
13 |
--------------------------------------------------------------------------------
/tests/test_data/yaml/corrupted_config_file.yaml:
--------------------------------------------------------------------------------
1 | host: https://fakename.testrail.io/
2 | project: Project name
3 | file: result.xml
4 | title: Project title
5 | verbose: False
6 | silent: False
7 | config: custom_config.yaml
8 | batch_size: 20
9 | timeout: 50
10 | auto_creation_response: True
11 | suite_id: 50
12 | aa
13 | run_id: 10
14 |
--------------------------------------------------------------------------------
/tests/test_data/yaml/corrupted_config_file_multiple_documents.yaml:
--------------------------------------------------------------------------------
1 | host: https://fakename.testrail.io/
2 | project: Project name
3 | ---
4 | file: result.xml
5 | title: Project title
6 | verbose: False
7 | silent: False
8 | config: custom_config.yaml
9 | ---
10 | aaa
11 | batch_size: 20
12 | timeout: 50
13 | auto_creation_response: True
14 | ---
15 | suite_id: 50
16 | run_id: 10
17 | ...
--------------------------------------------------------------------------------
/tests/test_data/yaml/corrupted_config_file_with_empty_document.yaml:
--------------------------------------------------------------------------------
1 | file: dsa
2 | ---
3 | ...
4 | username: fake@fake.com
--------------------------------------------------------------------------------
/tests/test_data/yaml/corrupted_config_file_with_list.yaml:
--------------------------------------------------------------------------------
1 | [Test item, another one]
2 |
--------------------------------------------------------------------------------
/tests/test_data/yaml/corrupted_config_file_with_start_indicator_at_the_end.yaml:
--------------------------------------------------------------------------------
1 | file: test
2 | ---
3 |
--------------------------------------------------------------------------------
/tests/test_data/yaml/corrupted_config_file_with_string.yaml:
--------------------------------------------------------------------------------
1 | Test string
2 |
--------------------------------------------------------------------------------
/tests/test_data/yaml/custom_config_file.yaml:
--------------------------------------------------------------------------------
1 | host: "host_from_custom_config"
2 | file: "file_from_custom_config"
3 | project: "project_from_custom_config"
4 | title: "title_from_custom_config"
5 | username: "username_from_custom_config"
6 | password: "password_from_custom_config"
7 | key: "key_from_custom_config"
--------------------------------------------------------------------------------
/tests/test_data/yaml/default_config_file.yaml:
--------------------------------------------------------------------------------
1 | host: "host_from_default_config",
2 | file: "file_from_default_config.xml",
3 | project: "project_from_default_config",
4 | title: "title_from_default_config",
5 | username: "username_from_default_config",
6 | password: "password_from_default_config",
7 | key: "key_from_default_config",
--------------------------------------------------------------------------------
/tests/test_dataclass_creation.py:
--------------------------------------------------------------------------------
1 | import json
2 | import pytest
3 | from junitparser import Element
4 | from tests.test_data.dataclass_creation import *
5 | from trcli.data_classes.dataclass_testrail import (
6 | TestRailResult,
7 | TestRailProperty,
8 | TestRailSuite,
9 | TestRailCase,
10 | TestRailSection,
11 | )
12 | from serde.json import to_json
13 | from trcli.data_classes.validation_exception import ValidationException
14 |
15 |
16 | class TestDataClassCreation:
17 | @pytest.mark.dataclass
18 | @pytest.mark.parametrize(
19 | "junit_test_result, expected_result",
20 | [
21 | ([FAILED_RESULT_INPUT], FAILED_EXPECTED),
22 | ([SKIPPED_RESULT_INPUT], SKIPPED_EXPECTED),
23 | ([SKIPPED_RESULT_EMPTY_INPUT], SKIPPED_EMPTY_EXPECTED),
24 | ([ERROR_RESULT_INPUT], ERROR_EXPECTED),
25 | ([], PASSED_EXPECTED),
26 | ],
27 | ids=[
28 | "Test result with failure",
29 | "Test result with skipped",
30 | "Test result with skipped but no messages",
31 | "Test result with error",
32 | "Test result passed",
33 | ],
34 | )
35 | @pytest.mark.dataclass
36 | def test_create_test_result_from_junit_element(
37 | self, junit_test_result: Element, expected_result: dict
38 | ):
39 | result_dataclass = TestRailResult(1, junit_result_unparsed=junit_test_result)
40 | result_json = json.loads(to_json(result_dataclass))
41 | assert (
42 | result_json["status_id"] == expected_result["status_id"]
43 | ), "calculated status id doesn't mach expected id"
44 | assert (
45 | result_json["comment"] == expected_result["comment"]
46 | ), "Joined comment doesn't mach expected comment"
47 |
48 | @pytest.mark.dataclass
49 | def test_create_property(self):
50 | result_dataclass = TestRailProperty("Some property", "True")
51 | result_json = json.loads(to_json(result_dataclass))
52 | assert (
53 | result_json["description"] == "Some property: True"
54 | ), "Property description doesn't mach expected values"
55 |
56 | @pytest.mark.dataclass
57 | def test_generate_suite_name(self, freezer):
58 | freezer.move_to("2020-01-10 01:00:00")
59 | suite = TestRailSuite(name=None, source="file.xml")
60 | assert suite.name == "file.xml 10-01-20 01:00:00", "Name not generated properly"
61 |
62 | @pytest.mark.dataclass
63 | @pytest.mark.parametrize(
64 | "input_time, output_time",
65 | [
66 | ("40", "40s"),
67 | ("119.99", "120s"),
68 | (0, None),
69 | (50.4, "50s"),
70 | (-100, None),
71 | ],
72 | )
73 | def test_elapsed_time_calc_in_testresult(self, input_time, output_time):
74 | test_result = TestRailResult(case_id=1, elapsed=input_time)
75 | assert test_result.elapsed == output_time, "Elapsed not parsed properly"
76 |
77 | @pytest.mark.dataclass
78 | def test_elapsed_time_calc_in_testresult_none(self):
79 | test_result = TestRailResult(case_id=1, elapsed=None)
80 | assert test_result.elapsed is None, "Elapsed is not None"
81 | assert "elapsed" not in to_json(
82 | test_result
83 | ), "Elapsed should be skipped by serde"
84 |
85 | @pytest.mark.dataclass
86 | def test_validation_error_for_case(self):
87 | with pytest.raises(ValidationException):
88 | TestRailCase(section_id=1, title="")
89 |
90 | @pytest.mark.dataclass
91 | def test_validation_error_for_section(self):
92 | with pytest.raises(ValidationException):
93 | TestRailSection(suite_id=1, name="")
94 |
--------------------------------------------------------------------------------
/tests/test_response_verify.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from trcli.api.api_response_verify import ApiResponseVerify
3 | from trcli.data_classes.dataclass_testrail import TestRailSuite
4 |
5 |
6 | @pytest.fixture(scope="function")
7 | def api_response_verify():
8 | yield ApiResponseVerify(True)
9 |
10 |
11 | class TestResponseVerify:
12 | @pytest.mark.verifier
13 | def test_verify_add_suite(self, api_response_verify: ApiResponseVerify):
14 | send_to_api = TestRailSuite("Suite1", description="Some Description")
15 | returned_from_api = {"name": "Suite1", "description": "Some Description"}
16 | assert api_response_verify.verify_returned_data(send_to_api, returned_from_api)
17 |
18 | @pytest.mark.verifier
19 | def test_verify_add_suite_not_equal(self, api_response_verify: ApiResponseVerify):
20 | send_to_api = TestRailSuite("Suite1", description="Some Description")
21 | returned_from_api = {"name": "Suite1", "description": "Some other description"}
22 | assert not api_response_verify.verify_returned_data(
23 | send_to_api, returned_from_api
24 | )
25 |
26 | @pytest.mark.verifier
27 | def test_verify_data_in_list(self, api_response_verify: ApiResponseVerify):
28 | added_data = [{}, {}]
29 | response_data = [{}, {}]
30 |
31 | assert api_response_verify.verify_returned_data_for_list(
32 | added_data, response_data
33 | ), "Added data and returned data should match"
34 |
35 | added_data = [{}, {}, {}]
36 | response_data = [{}, {}]
37 |
38 | assert not api_response_verify.verify_returned_data_for_list(
39 | added_data, response_data
40 | ), "Missing item in response data. Verification should fail."
41 |
42 | added_data = [{"Case_name": "Case1"}, {"Case_name": "Case2"}]
43 | response_data = [
44 | {"Case_name": "Case1", "id": 1},
45 | {"Case_name": "Case2", "id": 2},
46 | ]
47 |
48 | assert api_response_verify.verify_returned_data_for_list(
49 | added_data, response_data
50 | ), "Added data and returned data should match"
51 |
52 | added_data = [{"Case_name": "Case1"}, {"Case_name": "Case2"}]
53 | response_data = [
54 | {"Case_name": "Case1", "id": 1},
55 | {"Case_name": "Case44", "id": 44},
56 | ]
57 |
58 | assert not api_response_verify.verify_returned_data_for_list(
59 | added_data, response_data
60 | ), "Missing item in response data. Verification should fail."
61 |
62 | @pytest.mark.verifier
63 | @pytest.mark.parametrize(
64 | "input_data_estimate, response_data_estimate",
65 | [
66 | ({"estimate": "1m 40s"}, {"estimate": "1m 40s"}),
67 | ({"estimate": "1m 60s"}, {"estimate": "2m"}),
68 | ({"estimate": "120s"}, {"estimate": "2m"}),
69 | ({"estimate": "36000s"}, {"estimate": "10h"}),
70 | ({"time": "2m"}, {"time": "2m"}),
71 | ],
72 | )
73 | @pytest.mark.verifier
74 | def test_verify_estimate(
75 | self,
76 | api_response_verify: ApiResponseVerify,
77 | input_data_estimate: dict,
78 | response_data_estimate: dict,
79 | ):
80 |
81 | assert api_response_verify.verify_returned_data(
82 | input_data_estimate, response_data_estimate
83 | ), "Added data and returned data should match"
84 |
85 | @pytest.mark.verifier
86 | @pytest.mark.parametrize(
87 | "input_data_estimate, response_data_estimate",
88 | [
89 | ({"description": ""}, {"description": None}),
90 | ({"description": None}, {"description": ""}),
91 | ({"comment": ""}, {"comment": None}),
92 | ({"comment": None}, {"comment": ""}),
93 | ],
94 | )
95 | def test_verify_strings(
96 | self,
97 | api_response_verify: ApiResponseVerify,
98 | input_data_estimate: dict,
99 | response_data_estimate: dict,
100 | ):
101 |
102 | assert api_response_verify.verify_returned_data(
103 | input_data_estimate, response_data_estimate
104 | ), "Added data and returned data should match"
105 |
--------------------------------------------------------------------------------
/tests/test_robot_parser.py:
--------------------------------------------------------------------------------
1 | import json
2 | from dataclasses import asdict
3 | from pathlib import Path
4 | from typing import Union
5 |
6 | import pytest
7 | from deepdiff import DeepDiff
8 |
9 | from trcli.cli import Environment
10 | from trcli.data_classes.data_parsers import MatchersParser
11 | from trcli.data_classes.dataclass_testrail import TestRailSuite
12 | from trcli.readers.robot_xml import RobotParser
13 |
14 |
15 | class TestRobotParser:
16 |
17 | @pytest.mark.parse_robot
18 | @pytest.mark.parametrize(
19 | "matcher, input_xml_path, expected_path",
20 | [
21 | # RF 5.0 format
22 | (
23 | MatchersParser.AUTO,
24 | Path(__file__).parent / "test_data/XML/robotframework_simple_RF50.xml",
25 | Path(__file__).parent / "test_data/json/robotframework_simple_RF50.json",
26 | ),
27 | (
28 | MatchersParser.NAME,
29 | Path(__file__).parent / "test_data/XML/robotframework_id_in_name_RF50.xml",
30 | Path(__file__).parent / "test_data/json/robotframework_id_in_name_RF50.json",
31 | ),
32 |
33 | # RF 7.0 format
34 | (
35 | MatchersParser.AUTO,
36 | Path(__file__).parent / "test_data/XML/robotframework_simple_RF70.xml",
37 | Path(__file__).parent / "test_data/json/robotframework_simple_RF70.json",
38 | ),
39 | (
40 | MatchersParser.NAME,
41 | Path(__file__).parent / "test_data/XML/robotframework_id_in_name_RF70.xml",
42 | Path(__file__).parent / "test_data/json/robotframework_id_in_name_RF70.json",
43 | )
44 | ],
45 | ids=["Case Matcher Auto", "Case Matcher Name", "Case Matcher Auto", "Case Matcher Name"]
46 | )
47 | @pytest.mark.parse_robot
48 | def test_robot_xml_parser_id_matcher_name(
49 | self, matcher: str, input_xml_path: Union[str, Path], expected_path: str, freezer
50 | ):
51 | freezer.move_to("2020-05-20 01:00:00")
52 | env = Environment()
53 | env.case_matcher = matcher
54 | env.file = input_xml_path
55 | file_reader = RobotParser(env)
56 | read_junit = file_reader.parse_file()[0]
57 | parsing_result_json = asdict(read_junit)
58 | file_json = open(expected_path)
59 | expected_json = json.load(file_json)
60 | assert DeepDiff(parsing_result_json, expected_json) == {}, \
61 | f"Result of parsing XML is different than expected \n{DeepDiff(parsing_result_json, expected_json)}"
62 |
63 | @pytest.mark.parse_robot
64 | def test_robot_xml_parser_file_not_found(self):
65 | with pytest.raises(FileNotFoundError):
66 | env = Environment()
67 | env.file = Path(__file__).parent / "not_found.xml"
68 | RobotParser(env)
69 |
--------------------------------------------------------------------------------
/tests_e2e/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gurock/trcli/bf238450b555f28c9cd9db8b77a222ef909415e9/tests_e2e/__init__.py
--------------------------------------------------------------------------------
/tests_e2e/attachments/evidence.json:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gurock/trcli/bf238450b555f28c9cd9db8b77a222ef909415e9/tests_e2e/attachments/evidence.json
--------------------------------------------------------------------------------
/tests_e2e/attachments/testrail.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gurock/trcli/bf238450b555f28c9cd9db8b77a222ef909415e9/tests_e2e/attachments/testrail.jpg
--------------------------------------------------------------------------------
/tests_e2e/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | log_cli = true
--------------------------------------------------------------------------------
/tests_e2e/reports_junit/attachments.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | failed due to...
9 |
10 |
11 |
12 |
13 |
14 |
15 | failed due to...
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/tests_e2e/reports_junit/duplicate-names.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/tests_e2e/reports_junit/generic_ids_auto.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | failed due to...
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/tests_e2e/reports_junit/generic_ids_auto_plus_one.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | failed due to...
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/tests_e2e/reports_junit/generic_ids_name.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | failed due to...
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/tests_e2e/reports_junit/generic_ids_property.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 | failed due to...
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/tests_e2e/reports_junit/junit_multiple_parts_pt1.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/tests_e2e/reports_junit/junit_multiple_parts_pt2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/tests_e2e/reports_junit/saucelabs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 | stacktrace...
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
36 |
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/tests_e2e/reports_robot/simple_report_rf50.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | (...)
7 |
8 |
9 |
10 |
11 |
12 |
13 | (...)
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 | Verifies header contains link to request a demo - Intentionally failing
33 | - testrail_case_field: refs:TR-1
34 | - testrail_case_field: priority_id:2
35 | - testrail_result_field: custom_environment:qa
36 |
37 | - testrail_attachment: C:\Github\gurock\automation-frameworks-integration\samples\robotframework\robotframework-selenium\reports\failure-2.png
38 | Element 'css=.breakdance-menu-list [href*='/invalid/']' not visible after 5 seconds.
39 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/tests_e2e/requirements.txt:
--------------------------------------------------------------------------------
1 | pytest
2 | pytest-md-report
3 | coverage
4 | beartype
5 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | env_list =
3 | unit-test, #click-pyyaml{60,latest}-junitparser{31,latest}-pyserde{12,latest}-requests{231,232,latest}-tqdm{465,latest}-humanfriendly{100,latest}-openapi{50,latest}-beartype{17,latest}-prance
4 |
5 | [testenv:unit-test]
6 | description = run basic unit test based on latest version of dependencies
7 | commands =
8 | pip install -r tests/requirements-tox.txt
9 | pip install -r tests/requirements-variable-deps.txt
10 | pip list
11 | coverage run -m pytest -c tests/pytest.ini -W ignore::pytest.PytestCollectionWarning tests
12 |
13 | allowlist_externals =
14 | cd
15 |
16 | [testenv]
17 | description = Run dependencies matrix + full unit test (NOTE: May take time as different versions of each dependencies are tested against each other!)
18 | deps =
19 | click: click==8.0.*
20 | pyyaml-60: pyyaml==6.0.*
21 | pyyaml-latest: pyyaml>=6.0.0,<7.0.0
22 | junitparser-31: junitparser==3.1.*
23 | junitparser-latest: junitparser>=3.1.0,<4.0.0
24 | pyserde-12: pyserde==0.12.*
25 | pyserde-latest: pyserde>=0.12.0,<1.0.0
26 | requests-231: requests==2.31.*
27 | requests-232: requests==2.32.0
28 | requests-latest: requests>=2.31.0,<3.0.0
29 | tqdm-465: tqdm==4.65.*
30 | tqdm-latest: tqdm>=4.65.0,<5.0.0
31 | humanfriendly-100: humanfriendly==10.0.*
32 | humanfriendly-latest: humanfriendly>=10.0.0,<11.0.0
33 | openapi-50: openapi-spec-validator==0.5.*
34 | openapi-latest: openapi-spec-validator>=0.5.0,<1.0.0
35 | beartype-17: beartype==0.17.*
36 | beartype-latest: beartype>=0.17.0,<1.0.0
37 | prance: prance
38 |
39 | commands_pre =
40 | pip install -r tests/requirements-tox.txt
41 | #pip install --upgrade pip
42 |
43 | commands =
44 | #Run unit test for each unique environment
45 | coverage run -m pytest -c ./tests/pytest.ini -W ignore::pytest.PytestCollectionWarning tests
46 | #Execute End to End tests
47 | #cd tests_e2e && pytest -c ./pytest.ini -W ignore::pytest.PytestCollectionWarning .
48 | allowlist_externals =
49 | pytest
50 | coverage
51 | allure
52 | pip
--------------------------------------------------------------------------------
/trcli/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = "1.9.12"
2 |
--------------------------------------------------------------------------------
/trcli/api/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gurock/trcli/bf238450b555f28c9cd9db8b77a222ef909415e9/trcli/api/__init__.py
--------------------------------------------------------------------------------
/trcli/api/api_response_verify.py:
--------------------------------------------------------------------------------
1 | from serde.json import to_dict
2 | from beartype.typing import List, Union, Any, Callable
3 | from humanfriendly import parse_timespan
4 |
5 |
6 | class ApiResponseVerify:
7 | """Class for verifying if new resources added to Test Rail are created correctly.
8 | :verify: If false all verification methods are skipped.
9 | Default False because verification is an optional parameter steered by user in CLI.
10 | """
11 |
12 | def __init__(self, verify: bool = False):
13 | self.verify = verify
14 |
15 | def verify_returned_data(self, added_data: Union[dict, Any], returned_data: dict):
16 | """
17 | Check if all added_data fields are in returned data.
18 | For all POST requests in test rail response will be the same as for GET
19 | e.g. add_case POST (If successful) method returns the new created test case using
20 | the same response format as get_case.
21 | :added_data: dict or dataclass
22 | :returned_data: dict
23 | """
24 | if not self.verify:
25 | return True # skip verification
26 | added_data_json = to_dict(added_data)
27 | returned_data_json = to_dict(returned_data)
28 | for key, value in added_data_json.items():
29 | if not self.field_compare(key)(returned_data_json[key], value):
30 | return False
31 |
32 | return True
33 |
34 | def verify_returned_data_for_list(
35 | self, added_data: List[dict], returned_data: List[dict]
36 | ):
37 | if not self.verify:
38 | return True # skip verification
39 | if len(added_data) != len(returned_data):
40 | return False
41 | else:
42 | comparison_result = [
43 | self.verify_returned_data(item, returned_data[index])
44 | for index, item in enumerate(added_data)
45 | ]
46 | return all(comparison_result)
47 |
48 | def field_compare(self, added_data_key: str) -> Callable:
49 | function_list = {
50 | "estimate": self.__compare_estimate,
51 | "description": self.__compare_strings,
52 | "comment": self.__compare_strings,
53 | }
54 | return (
55 | function_list[added_data_key]
56 | if added_data_key in function_list
57 | else self.__simple_comparison
58 | )
59 |
60 | @staticmethod
61 | def __simple_comparison(returned_value: Any, added_value: Any) -> bool:
62 | return returned_value == added_value
63 |
64 | @staticmethod
65 | def __compare_estimate(returned_value: str, added_value: str) -> bool:
66 | sum_time_returned = sum(map(parse_timespan, returned_value.split(" ")))
67 | sum_time_added = sum(map(parse_timespan, added_value.split(" ")))
68 | return sum_time_returned == sum_time_added
69 |
70 | @staticmethod
71 | def __compare_strings(returned_value: str, added_value: str) -> bool:
72 | returned_value = "" if returned_value in [None, ""] else returned_value
73 | added_value = "" if added_value in [None, ""] else added_value
74 | return returned_value == added_value
75 |
--------------------------------------------------------------------------------
/trcli/backports.py:
--------------------------------------------------------------------------------
1 | def removeprefix(text, prefix):
2 | """Backport of python 3.9 str.removeprefix"""
3 |
4 | if text.startswith(prefix):
5 | return text[len(prefix):]
6 | return text
7 |
--------------------------------------------------------------------------------
/trcli/commands/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gurock/trcli/bf238450b555f28c9cd9db8b77a222ef909415e9/trcli/commands/__init__.py
--------------------------------------------------------------------------------
/trcli/commands/cmd_add_run.py:
--------------------------------------------------------------------------------
1 | import click
2 | import yaml
3 |
4 | from trcli.api.project_based_client import ProjectBasedClient
5 | from trcli.cli import pass_environment, CONTEXT_SETTINGS, Environment
6 | from trcli.data_classes.dataclass_testrail import TestRailSuite
7 |
8 |
9 | def print_config(env: Environment):
10 | env.log(f"Parser Results Execution Parameters"
11 | f"\n> TestRail instance: {env.host} (user: {env.username})"
12 | f"\n> Project: {env.project if env.project else env.project_id}"
13 | f"\n> Run title: {env.title}"
14 | f"\n> Suite ID: {env.suite_id}"
15 | f"\n> Description: {env.run_description}"
16 | f"\n> Milestone ID: {env.milestone_id}"
17 | f"\n> Assigned To ID: {env.run_assigned_to_id}"
18 | f"\n> Include All: {env.run_include_all}"
19 | f"\n> Case IDs: {env.run_case_ids}"
20 | f"\n> Refs: {env.run_refs}")
21 |
22 |
23 | def write_run_to_file(environment: Environment, run_id: int):
24 | """Write the created run id and title to a yaml file that can be included in the configuration of later runs."""
25 | environment.log(f"Writing test run data to file ({environment.file}). ", new_line=False)
26 | data = dict(title=environment.title, run_id=run_id)
27 | if environment.run_description:
28 | data['run_description'] = environment.run_description
29 | if environment.run_refs:
30 | data['run_refs'] = environment.run_refs
31 | if environment.run_include_all:
32 | data['run_include_all'] = environment.run_include_all
33 | if environment.run_case_ids:
34 | data['run_case_ids'] = environment.run_case_ids
35 | if environment.run_assigned_to_id:
36 | data['run_assigned_to_id'] = environment.run_assigned_to_id
37 | with open(environment.file, "a") as f:
38 | f.write(yaml.dump(data, default_flow_style=False))
39 | environment.log("Done.")
40 |
41 |
42 | @click.command(context_settings=CONTEXT_SETTINGS)
43 | @click.option("--title", metavar="", help="Title of Test Run to be created or updated in TestRail.")
44 | @click.option(
45 | "--suite-id",
46 | type=click.IntRange(min=1),
47 | metavar="",
48 | help="Suite ID to submit results to.",
49 | )
50 | @click.option("--run-description", metavar="", default="", help="Summary text to be added to the test run.")
51 | @click.option(
52 | "--milestone-id",
53 | type=click.IntRange(min=1),
54 | metavar="",
55 | help="Milestone ID to which the Test Run should be associated to.",
56 | )
57 | @click.option(
58 | "--run-assigned-to-id",
59 | type=click.IntRange(min=1),
60 | metavar="",
61 | help="The ID of the user the test run should be assigned to."
62 | )
63 | @click.option(
64 | "--run-include-all",
65 | is_flag=True,
66 | default=False,
67 | help="Use this option to include all test cases in this test run."
68 | )
69 | @click.option(
70 | "--case-ids",
71 | metavar="",
72 | help="Comma separated list of test case IDs to include in the test run."
73 | )
74 | @click.option(
75 | "--run-refs",
76 | metavar="",
77 | help="A comma-separated list of references/requirements"
78 | )
79 | @click.option("-f", "--file", type=click.Path(), metavar="", help="Write run data to file.")
80 | @click.pass_context
81 | @pass_environment
82 | def cli(environment: Environment, context: click.Context, *args, **kwargs):
83 | """Add a new test run in TestRail"""
84 | environment.cmd = "add_run"
85 | environment.set_parameters(context)
86 | environment.check_for_required_parameters()
87 | print_config(environment)
88 |
89 | project_client = ProjectBasedClient(
90 | environment=environment,
91 | suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id),
92 | )
93 | project_client.resolve_project()
94 | project_client.resolve_suite()
95 | run_id, error_message = project_client.create_or_update_test_run()
96 | if error_message:
97 | exit(1)
98 |
99 | environment.run_id = run_id
100 | environment.log(f"title: {environment.title}")
101 | environment.log(f"run_id: {run_id}")
102 | if environment.file is not None:
103 | write_run_to_file(environment, run_id)
104 |
--------------------------------------------------------------------------------
/trcli/commands/cmd_parse_junit.py:
--------------------------------------------------------------------------------
1 | from xml.etree.ElementTree import ParseError
2 |
3 | import click
4 | from junitparser import JUnitXmlError
5 |
6 | from trcli import settings
7 | from trcli.api.results_uploader import ResultsUploader
8 | from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS
9 | from trcli.commands.results_parser_helpers import results_parser_options, print_config
10 | from trcli.constants import FAULT_MAPPING
11 | from trcli.data_classes.validation_exception import ValidationException
12 | from trcli.readers.junit_xml import JunitParser
13 |
14 |
15 | @click.command(context_settings=CONTEXT_SETTINGS)
16 | @results_parser_options
17 | @click.option(
18 | "--special-parser",
19 | metavar="",
20 | default="junit",
21 | type=click.Choice(["junit", "saucectl"], case_sensitive=False),
22 | help="Optional special parser option for specialized JUnit reports."
23 | )
24 | @click.pass_context
25 | @pass_environment
26 | def cli(environment: Environment, context: click.Context, *args, **kwargs):
27 | """Parse JUnit report and upload results to TestRail"""
28 | environment.cmd = "parse_junit"
29 | environment.set_parameters(context)
30 | environment.check_for_required_parameters()
31 | settings.ALLOW_ELAPSED_MS = environment.allow_ms
32 | print_config(environment)
33 | try:
34 | parsed_suites = JunitParser(environment).parse_file()
35 | for suite in parsed_suites:
36 | result_uploader = ResultsUploader(environment=environment, suite=suite)
37 | result_uploader.upload_results()
38 | except FileNotFoundError:
39 | environment.elog(FAULT_MAPPING["missing_file"])
40 | exit(1)
41 | except (JUnitXmlError, ParseError):
42 | environment.elog(FAULT_MAPPING["invalid_file"])
43 | exit(1)
44 | except ValidationException as exception:
45 | environment.elog(
46 | FAULT_MAPPING["dataclass_validation_error"].format(
47 | field=exception.field_name,
48 | class_name=exception.class_name,
49 | reason=exception.reason,
50 | )
51 | )
52 | exit(1)
53 |
--------------------------------------------------------------------------------
/trcli/commands/cmd_parse_openapi.py:
--------------------------------------------------------------------------------
1 | from xml.etree.ElementTree import ParseError
2 |
3 | import click
4 | from junitparser import JUnitXmlError
5 |
6 | from trcli import settings
7 | from trcli.api.results_uploader import ResultsUploader
8 | from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS
9 | from trcli.constants import FAULT_MAPPING
10 | from trcli.data_classes.validation_exception import ValidationException
11 | from trcli.readers.openapi_yml import OpenApiParser
12 |
13 |
14 | def print_config(env: Environment):
15 | env.log(f"Parse OpenAPI Execution Parameters"
16 | f"\n> OpenAPI file: {env.file}"
17 | f"\n> Config file: {env.config}"
18 | f"\n> TestRail instance: {env.host} (user: {env.username})"
19 | f"\n> Project: {env.project if env.project else env.project_id}"
20 | f"\n> Auto-create entities: {env.auto_creation_response}")
21 |
22 |
23 | @click.command(context_settings=CONTEXT_SETTINGS)
24 | @click.option("-f", "--file", type=click.Path(), metavar="", help="Filename and path.")
25 | @click.option(
26 | "--suite-id",
27 | type=click.IntRange(min=1),
28 | metavar="",
29 | help="Suite ID to create the tests in (if project is multi-suite).",
30 | )
31 | @click.option(
32 | "--case-fields",
33 | multiple=True,
34 | metavar="",
35 | default=[],
36 | help="List of case fields and values for new test cases creation. "
37 | "Usage: --case-fields type_id:1 --case-fields priority_id:3",
38 | )
39 | @click.pass_context
40 | @pass_environment
41 | def cli(environment: Environment, context: click.Context, *args, **kwargs):
42 | """Parse OpenAPI spec and create cases in TestRail"""
43 | environment.cmd = "parse_openapi"
44 | environment.set_parameters(context)
45 | environment.check_for_required_parameters()
46 | settings.ALLOW_ELAPSED_MS = environment.allow_ms
47 | print_config(environment)
48 | try:
49 | parsed_suites = OpenApiParser(environment).parse_file()
50 | for suite in parsed_suites:
51 | result_uploader = ResultsUploader(environment=environment, suite=suite, skip_run=True)
52 | result_uploader.upload_results()
53 | except FileNotFoundError:
54 | environment.elog(FAULT_MAPPING["missing_file"])
55 | exit(1)
56 | except (JUnitXmlError, ParseError):
57 | environment.elog(FAULT_MAPPING["invalid_file"])
58 | exit(1)
59 | except ValidationException as exception:
60 | environment.elog(
61 | FAULT_MAPPING["dataclass_validation_error"].format(
62 | field=exception.field_name,
63 | class_name=exception.class_name,
64 | reason=exception.reason,
65 | )
66 | )
67 | exit(1)
68 |
--------------------------------------------------------------------------------
/trcli/commands/cmd_parse_robot.py:
--------------------------------------------------------------------------------
1 | from xml.etree.ElementTree import ParseError
2 |
3 | import click
4 |
5 | from trcli import settings
6 | from trcli.api.results_uploader import ResultsUploader
7 | from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS
8 | from trcli.commands.results_parser_helpers import results_parser_options, print_config
9 | from trcli.constants import FAULT_MAPPING
10 | from trcli.data_classes.validation_exception import ValidationException
11 | from trcli.readers.robot_xml import RobotParser
12 |
13 |
14 | @click.command(context_settings=CONTEXT_SETTINGS)
15 | @results_parser_options
16 | @click.pass_context
17 | @pass_environment
18 | def cli(environment: Environment, context: click.Context, *args, **kwargs):
19 | """Parse Robot Framework report and upload results to TestRail"""
20 | environment.cmd = "parse_robot"
21 | environment.set_parameters(context)
22 | environment.check_for_required_parameters()
23 | settings.ALLOW_ELAPSED_MS = environment.allow_ms
24 | print_config(environment)
25 | try:
26 | parsed_suites = RobotParser(environment).parse_file()
27 | for suite in parsed_suites:
28 | result_uploader = ResultsUploader(environment=environment, suite=suite)
29 | result_uploader.upload_results()
30 | except FileNotFoundError:
31 | environment.elog(FAULT_MAPPING["missing_file"])
32 | exit(1)
33 | except ParseError:
34 | environment.elog(FAULT_MAPPING["invalid_file"])
35 | exit(1)
36 | except ValidationException as exception:
37 | environment.elog(
38 | FAULT_MAPPING["dataclass_validation_error"].format(
39 | field=exception.field_name,
40 | class_name=exception.class_name,
41 | reason=exception.reason,
42 | )
43 | )
44 | exit(1)
45 |
46 |
--------------------------------------------------------------------------------
/trcli/commands/results_parser_helpers.py:
--------------------------------------------------------------------------------
1 | import functools
2 |
3 | import click
4 | from click import BadParameter
5 |
6 | from trcli.cli import Environment
7 |
8 |
9 | def print_config(env: Environment):
10 | env.log(f"Parser Results Execution Parameters"
11 | f"\n> Report file: {env.file}"
12 | f"\n> Config file: {env.config}"
13 | f"\n> TestRail instance: {env.host} (user: {env.username})"
14 | f"\n> Project: {env.project if env.project else env.project_id}"
15 | f"\n> Run title: {env.title}"
16 | f"\n> Update run: {env.run_id if env.run_id else 'No'}"
17 | f"\n> Add to milestone: {env.milestone_id if env.milestone_id else 'No'}"
18 | f"\n> Auto-create entities: {env.auto_creation_response}")
19 |
20 |
21 | def resolve_comma_separated_list(ctx, param, value):
22 | if value:
23 | try:
24 | return [int(part.strip()) for part in value.split(',')]
25 | except:
26 | raise BadParameter('Invalid format, use a comma-separated list (i.e.: 43,19)')
27 |
28 |
29 | def results_parser_options(f):
30 | @click.option("-f", "--file", type=click.Path(), metavar="", help="Filename and path.")
31 | @click.option("--close-run", is_flag=True, help="Close the newly created run")
32 | @click.option("--title", metavar="", help="Title of Test Run to be created or updated in TestRail.")
33 | @click.option(
34 | "--case-matcher",
35 | metavar="",
36 | default="auto",
37 | type=click.Choice(["auto", "name", "property"], case_sensitive=False),
38 | help="Mechanism to match cases between the report and TestRail."
39 | )
40 | @click.option(
41 | "--suite-id",
42 | type=click.IntRange(min=1),
43 | metavar="",
44 | help="Suite ID to submit results to.",
45 | )
46 | @click.option(
47 | "--suite-name",
48 | metavar="",
49 | help="Suite name to submit results to.",
50 | )
51 | @click.option(
52 | "--run-id",
53 | type=click.IntRange(min=1),
54 | metavar="",
55 | help="Run ID for the results they are reporting (otherwise the tool will attempt to create a new run).",
56 | )
57 | @click.option(
58 | "--plan-id",
59 | type=click.IntRange(min=1),
60 | metavar="",
61 | help="Plan ID with which the Test Run will be associated.",
62 | )
63 | @click.option(
64 | "--config-ids",
65 | metavar="",
66 | callback=resolve_comma_separated_list,
67 | help="Comma-separated configuration IDs to use along with Test Plans (i.e.: 34,52).",
68 | )
69 | @click.option(
70 | "--milestone-id",
71 | type=click.IntRange(min=1),
72 | metavar="",
73 | help="Milestone ID to which the Test Run should be associated to.",
74 | )
75 | @click.option(
76 | "--section-id",
77 | type=click.IntRange(min=1),
78 | metavar="",
79 | help="Section ID to create new sections with test cases under (optional).",
80 | )
81 | @click.option("--run-description", metavar="", default="", help="Summary text to be added to the test run.")
82 | @click.option(
83 | "--case-fields",
84 | multiple=True,
85 | metavar="",
86 | default=[],
87 | help="List of case fields and values for new test cases creation. "
88 | "Usage: --case-fields type_id:1 --case-fields priority_id:3",
89 | )
90 | @click.option(
91 | "--result-fields",
92 | multiple=True,
93 | metavar="",
94 | default=[],
95 | help="List of result fields and values for test results creation. "
96 | "Usage: --result-fields custom_field_a:value1 --result-fields custom_field_b:3",
97 | )
98 | @click.option("--allow-ms", is_flag=True, help="Allows using milliseconds for elapsed times.")
99 | @functools.wraps(f)
100 | def wrapper_common_options(*args, **kwargs):
101 | return f(*args, **kwargs)
102 |
103 | return wrapper_common_options
104 |
--------------------------------------------------------------------------------
/trcli/data_classes/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gurock/trcli/bf238450b555f28c9cd9db8b77a222ef909415e9/trcli/data_classes/__init__.py
--------------------------------------------------------------------------------
/trcli/data_classes/data_parsers.py:
--------------------------------------------------------------------------------
1 | import re, ast
2 | from beartype.typing import Union, List, Dict, Tuple
3 |
4 |
5 | class MatchersParser:
6 |
7 | AUTO = "auto"
8 | NAME = "name"
9 | PROPERTY = "property"
10 |
11 | @staticmethod
12 | def parse_name_with_id(case_name: str) -> Tuple[int, str]:
13 | """Parses case names expecting an ID following one of the following patterns:
14 | - "C123 my test case"
15 | - "my test case C123"
16 | - "C123_my_test_case"
17 | - "my_test_case_C123"
18 | - "module_1_C123_my_test_case"
19 | - "[C123] my test case"
20 | - "my test case [C123]"
21 | - "module 1 [C123] my test case"
22 |
23 | :param case_name: Name of the test case
24 | :return: Tuple with test case ID and test case name without the ID
25 | """
26 | for char in [" ", "_"]:
27 | parts = case_name.split(char)
28 | parts_copy = parts.copy()
29 | for idx, part in enumerate(parts):
30 | if part.lower().startswith("c") and len(part) > 1:
31 | id_part = part[1:]
32 | if id_part.isnumeric():
33 | parts_copy.pop(idx)
34 | return int(id_part), char.join(parts_copy)
35 |
36 | results = re.findall(r"\[(.*?)\]", case_name)
37 | for result in results:
38 | if result.lower().startswith("c"):
39 | case_id = result[1:]
40 | if case_id.isnumeric():
41 | id_tag = f"[{result}]"
42 | tag_idx = case_name.find(id_tag)
43 | case_name = f"{case_name[0:tag_idx].strip()} {case_name[tag_idx + len(id_tag):].strip()}".strip()
44 | return int(case_id), case_name
45 |
46 | return None, case_name
47 |
48 |
49 | class FieldsParser:
50 |
51 | @staticmethod
52 | def resolve_fields(fields: Union[List[str], Dict]) -> Tuple[Dict, str]:
53 | error = None
54 | fields_dictionary = {}
55 | try:
56 | if isinstance(fields, list) or isinstance(fields, tuple):
57 | for field in fields:
58 | field, value = field.split(":", maxsplit=1)
59 | if value.startswith("["):
60 | try:
61 | value = ast.literal_eval(value)
62 | except Exception:
63 | pass
64 | fields_dictionary[field] = value
65 | elif isinstance(fields, dict):
66 | fields_dictionary = fields
67 | else:
68 | error = f"Invalid field type ({type(fields)}), supported types are tuple/list/dictionary"
69 | return fields_dictionary, error
70 | except Exception as ex:
71 | return fields_dictionary, f"Error parsing fields: {ex}"
72 |
73 | class TestRailCaseFieldsOptimizer:
74 |
75 | MAX_TESTCASE_TITLE_LENGTH = 250
76 |
77 | @staticmethod
78 | def extract_last_words(input_string, max_characters=MAX_TESTCASE_TITLE_LENGTH):
79 | if input_string is None:
80 | return None
81 |
82 | # Define delimiters for splitting words
83 | delimiters = [' ', '\t', ';', ':', '>', '/', '.']
84 |
85 | # Replace multiple consecutive delimiters with a single space
86 | regex_pattern = '|'.join(map(re.escape, delimiters))
87 | cleaned_string = re.sub(f'[{regex_pattern}]+', ' ', input_string.strip())
88 |
89 | # Split the cleaned string into words
90 | words = cleaned_string.split()
91 |
92 | # Extract the last words up to the maximum character limit
93 | extracted_words = []
94 | current_length = 0
95 | for word in reversed(words):
96 | if current_length + len(word) <= max_characters:
97 | extracted_words.append(word)
98 | current_length += len(word) + 1 # Add 1 for the space between words
99 | else:
100 | break
101 |
102 | # Reverse the extracted words to maintain the original order
103 | result = ' '.join(reversed(extracted_words))
104 |
105 | # as fallback, return the last characters if the result is empty
106 | if result.strip() == "":
107 | result = input_string[-max_characters:]
108 |
109 | return result
--------------------------------------------------------------------------------
/trcli/data_classes/validation_exception.py:
--------------------------------------------------------------------------------
1 | class ValidationException(Exception):
2 | """Exception raised for validation errors in dataclass.
3 |
4 | Attributes:
5 | field_name: input field name that didn't pass validation
6 | class_name: input class name that didn't pass validation
7 | reason: reason of validation error
8 | """
9 |
10 | def __init__(self, field_name: str, class_name: str, reason=""):
11 | self.field_name = field_name
12 | self.class_name = class_name
13 | self.reason = reason
14 | super().__init__(f"Unable to parse {field_name} in {class_name} property. {reason}")
15 |
--------------------------------------------------------------------------------
/trcli/readers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gurock/trcli/bf238450b555f28c9cd9db8b77a222ef909415e9/trcli/readers/__init__.py
--------------------------------------------------------------------------------
/trcli/readers/file_parser.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from abc import abstractmethod
3 | from beartype.typing import Union, List
4 |
5 | from trcli.cli import Environment
6 | from trcli.data_classes.dataclass_testrail import TestRailSuite
7 |
8 |
9 | class FileParser:
10 | """
11 | Each new parser should inherit from this class, to make file reading modular.
12 | """
13 |
14 | def __init__(self, environment: Environment):
15 | self.filepath = self.check_file(environment.file)
16 | self.filename = self.filepath.name
17 | self.env = environment
18 |
19 | @staticmethod
20 | def check_file(filepath: Union[str, Path]) -> Path:
21 | filepath = Path(filepath)
22 | if not filepath.is_file():
23 | raise FileNotFoundError("File not found.")
24 | return filepath
25 |
26 | @abstractmethod
27 | def parse_file(self) -> List[TestRailSuite]:
28 | raise NotImplementedError
29 |
--------------------------------------------------------------------------------
/trcli/settings.py:
--------------------------------------------------------------------------------
1 | MAX_WORKERS_ADD_CASE = 10
2 | MAX_WORKERS_ADD_RESULTS = 10
3 | DEFAULT_API_CALL_RETRIES = 3
4 | DEFAULT_API_CALL_TIMEOUT = 30
5 | DEFAULT_BATCH_SIZE = 50
6 | ALLOW_ELAPSED_MS = False
7 |
--------------------------------------------------------------------------------