self = <test_login_example.LoginTestCase testMethod=test_login_2>
2 |
3 | def test_login_2(self):
4 | """This is an expected failure
5 |
6 | :id: 5adbfbe3-9594-46bb-b8b6-d8ef3dbca6b6
7 |
8 | :steps:
9 |
10 | 1. First Step
11 | 2. Second Step
12 |
13 | :expectedresults:
14 |
15 | 1. First Result
16 | 2. Second Result
17 | """
18 | > self.fail('Expected failure')
19 | E AssertionError: Expected failure
20 |
21 | test_login_example.py:39: AssertionErrorsample_project/tests/test_login_example.py:40: <py._xmlgen.raw object at 0x7fdf36f6a110>
--------------------------------------------------------------------------------
/sample_project/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | :requirement: Global Requirement
3 | """
4 |
--------------------------------------------------------------------------------
/sample_project/tests/test_init.py:
--------------------------------------------------------------------------------
1 | """Tests for the fields defined on the __init__.py module."""
2 |
3 |
4 | def test_global_requirement(self):
5 | """Test case to find if the global requirement will be pulled.
6 |
7 | :id: 3b658fe1-3d96-4ddb-bdf9-2abd950567c7
8 |
9 | :steps: Run Betelgeuse and check if this test case to collect this test
10 | case.
11 |
12 | :expectedresults: The test case must have the requirement defined as the
13 | Global Requirement from the ``__init__.py`` file.
14 | """
15 | pass
16 |
--------------------------------------------------------------------------------
/sample_project/tests/test_login_example.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | """Test class for Login
3 |
4 | :requirement: Importer Test
5 | """
6 |
7 | import unittest
8 |
9 |
10 | class LoginTestCase(unittest.TestCase):
11 | """Tests for Login"""
12 |
13 | def test_login_1(self):
14 | """Check if a user is able to login with valid userid and password
15 |
16 | :id: 60e48736-43a9-11e6-bcaa-104a7da122d7
17 |
18 | :steps: Login to UI with valid userid and password
19 |
20 | :expectedresults: User is able to login successfully
21 | """
22 | pass
23 |
24 | def test_login_2(self):
25 | """This is an expected failure
26 |
27 | :id: 5adbfbe3-9594-46bb-b8b6-d8ef3dbca6b6
28 |
29 | :steps:
30 |
31 | 1. First Step
32 | 2. Second Step
33 |
34 | :expectedresults:
35 |
36 | 1. First Result
37 | 2. Second Result
38 | """
39 | self.fail('Expected failure')
40 |
41 | def test_login_3(self):
42 | """This is an expected skip
43 |
44 | :id: 76fdbb37-1b05-4f90-918e-d34e5e22ed7e
45 | """
46 | self.skipTest('Expected skip')
47 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | """A setuptools-based script for installing Betelgeuse."""
4 | from setuptools import find_packages, setup
5 |
6 | with open('README.rst') as handle:
7 | LONG_DESCRIPTION = handle.read()
8 |
9 | with open('VERSION') as handle:
10 | VERSION = handle.read().strip()
11 |
12 | setup(
13 | name='Betelgeuse',
14 | author='Elyézer Rezende, Og Maciel',
15 | author_email='erezende@redhat.com, omaciel@redhat.com',
16 | version=VERSION,
17 | packages=find_packages(include=['betelgeuse', 'betelgeuse.*']),
18 | install_requires=['click', 'docutils'],
19 | # See https://pypi.python.org/pypi?%3Aaction=list_classifiers
20 | classifiers=[
21 | 'Development Status :: 5 - Production/Stable',
22 | 'Intended Audience :: Developers',
23 | 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
24 | 'Programming Language :: Python :: 3 :: Only',
25 | 'Programming Language :: Python :: 3',
26 | 'Programming Language :: Python :: 3.6',
27 | 'Programming Language :: Python :: 3.7',
28 | 'Programming Language :: Python :: 3.8',
29 | 'Programming Language :: Python :: 3.9',
30 | ],
31 | description=(
32 | 'Betelgeuse is a Python program that reads standard Python test cases '
33 | 'and generates XML files that are suited to be imported by Polarion '
34 | 'importers.'
35 | ),
36 | entry_points="""
37 | [console_scripts]
38 | betelgeuse=betelgeuse:cli
39 | """,
40 | include_package_data=True,
41 | license='GPLv3',
42 | long_description=LONG_DESCRIPTION,
43 | package_data={'': ['LICENSE']},
44 | url='https://github.com/SatelliteQE/betelgeuse',
45 | )
46 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """Tests for Betelgeuse."""
2 |
--------------------------------------------------------------------------------
/tests/data/__init__.py:
--------------------------------------------------------------------------------
1 | """Sample tests package."""
2 |
--------------------------------------------------------------------------------
/tests/data/ignore_dir/__init__.py:
--------------------------------------------------------------------------------
1 | """Package with tests to be ignored."""
2 |
--------------------------------------------------------------------------------
/tests/data/ignore_dir/test_ignore_dir.py:
--------------------------------------------------------------------------------
1 | """Tests to be ignored during collection."""
2 |
3 |
4 | def test_ignore_1():
5 | """Test ignore 1.
6 |
7 | :field1: value1
8 | :field2: value2
9 | """
10 |
11 |
12 | def test_ignore_2():
13 | """Test ignore 2.
14 |
15 | :field1: value1
16 | :field2: value2
17 | """
18 |
--------------------------------------------------------------------------------
/tests/data/test_sample.py:
--------------------------------------------------------------------------------
1 | # encoding=utf-8
2 | """Sample test module."""
3 | import unittest
4 | import pytest
5 |
6 |
7 | pytestmark = [pytest.mark.run_in_one_thread, pytest.mark.tier1]
8 |
9 | CONSTANT = 'contant-value'
10 |
11 |
12 | def decorator(func):
13 | """No-op decorator."""
14 | return func
15 |
16 |
17 | decorator.mark = object()
18 |
19 |
20 | def decorator_with_args(*args, **kwargs):
21 | """No-op decorator that expects arguments."""
22 | def inner(func):
23 | return func
24 | return inner
25 |
26 |
27 | def test_function():
28 | """Test function.
29 |
30 | :field1: value1
31 | :field2: value2
32 | """
33 | pass
34 |
35 |
36 | @decorator
37 | @decorator.mark.something
38 | @decorator_with_args([1, b'bytes', ('a', 'b'), None])
39 | @decorator_with_args(*[True, (True or False) and True])
40 | @decorator_with_args((f'{CONSTANT!r:5>} with literal {{ and }}',))
41 | @decorator_with_args({1, 2, -3})
42 | @decorator_with_args({'a': 1, 'b': 2, **{'c': 3}})
43 | @decorator_with_args([1, 2][0], [1, 2][:1], [1, 2][0:], [1, 2][0:1:1])
44 | @decorator_with_args([i for i in range(5) if i % 2 == 0])
45 | @decorator_with_args((i for i in range(5)))
46 | @decorator_with_args({i for i in range(5)})
47 | @decorator_with_args({k: v for k in 'abcde' for v in range(5)})
48 | @decorator_with_args(1, 2, 3, a=1, b=2)
49 | @decorator_with_args(
50 | dict(a=1, b=2),
51 | dict(**{'a': 1}),
52 | vars(decorator.mark),
53 | lambda a, *args, b=1, **kwargs: (a, args, b, kwargs),
54 | lambda a, *, b=1: (a, b),
55 | lambda v: v if v else None,
56 | )
57 | def test_decorated_test():
58 | """Test decorated function.
59 |
60 | :field1: value1
61 | :field2: value2
62 | """
63 |
64 |
65 | class TestCase(unittest.TestCase):
66 | """Test case."""
67 |
68 | def test_method(self):
69 | """Test method.
70 |
71 | :field1: value1
72 | :field2: value2
73 | """
74 | pass
75 |
76 | def test_without_docstring(self): # noqa: D102
77 | pass
78 |
79 |
80 | @pytest.mark.on_prem_provisioning
81 | class TestclasswithMarkers:
82 | """Class to verify tests markers are collected from class."""
83 |
84 | @pytest.mark.skipif(2 == 3, reason='2 is not 3')
85 | @pytest.mark.osp
86 | def test_markers_sample(self):
87 | """Test for markers at test level."""
88 | assert True
89 |
--------------------------------------------------------------------------------
/tests/test_betelgeuse.py:
--------------------------------------------------------------------------------
1 | """Betelgeuse unit tests."""
2 | import click
3 | import mock
4 | import operator
5 | import os
6 | import pytest
7 | import re
8 |
9 | from click.testing import CliRunner
10 | from betelgeuse import (
11 | INVALID_CHARS_REGEX,
12 | cli,
13 | create_xml_property,
14 | create_xml_testcase,
15 | default_config,
16 | load_custom_fields,
17 | map_steps,
18 | parse_junit,
19 | parse_test_results,
20 | validate_key_value_option,
21 | )
22 | from betelgeuse.config import BetelgeuseConfig
23 | from io import StringIO
24 | from xml.etree import ElementTree
25 |
26 |
27 | JUNIT_XML = """
28 |
29 |
30 |
31 |
32 | ...
33 |
34 |
35 | ...
36 |
37 |
38 | ...
39 |
40 |
41 |
42 |
43 | """
44 |
45 | TEST_MODULE = ''' # noqa: Q000
46 | def test_something():
47 | """This test something."""
48 |
49 | def test_something_else():
50 | """This test something else."""
51 | '''
52 |
53 |
54 | MULTIPLE_STEPS = """
55 | First step
56 | Second step
57 | Third step
58 |
59 | """
60 |
61 | MULTIPLE_EXPECTEDRESULTS = """
62 | First step expected result.
63 | Second step expected result.
64 | Third step expected result.
65 |
66 | """
67 |
68 | SINGLE_STEP = """Single step
"""
69 |
70 | SINGLE_EXPECTEDRESULT = """Single step expected result.
"""
71 |
72 |
73 | @pytest.fixture
74 | def cli_runner():
75 | """Return a `click`->`CliRunner` object."""
76 | return CliRunner()
77 |
78 |
79 | def test_load_custom_fields():
80 | """Check if custom fields can be loaded using = notation."""
81 | assert load_custom_fields(('isautomated=true',)) == {
82 | 'isautomated': 'true'
83 | }
84 |
85 |
86 | def test_load_custom_fields_empty():
87 | """Check if empty value return empty dict for custom fields."""
88 | assert load_custom_fields(('',)) == {}
89 |
90 |
91 | def test_load_custom_fields_none():
92 | """Check if None value return empty dict for custom fields."""
93 | assert load_custom_fields(None) == {}
94 |
95 |
96 | def test_load_custom_fields_json():
97 | """Check if custom fields can be loaded using JSON data."""
98 | assert load_custom_fields(('{"isautomated":true}',)) == {
99 | 'isautomated': True,
100 | }
101 |
102 |
103 | def test_map_single_step():
104 | """Check if mapping single step works."""
105 | mapped = [(SINGLE_STEP, SINGLE_EXPECTEDRESULT)]
106 | assert map_steps(SINGLE_STEP, SINGLE_EXPECTEDRESULT) == mapped
107 |
108 |
109 | def test_map_multiple_steps():
110 | """Check if mapping multiple steps works."""
111 | assert map_steps(MULTIPLE_STEPS, MULTIPLE_EXPECTEDRESULTS) == [
112 | ('First step
', 'First step expected result.
'),
113 | ('Second step
', 'Second step expected result.
'),
114 | ('Third step
', 'Third step expected result.
'),
115 | ]
116 |
117 |
118 | def test_get_multiple_steps_diff_items():
119 | """Check if parsing multiple steps of different items works."""
120 | multiple_steps = '\n'.join(MULTIPLE_STEPS.splitlines()[:-2] + ['\n'])
121 | assert map_steps(
122 | multiple_steps, MULTIPLE_EXPECTEDRESULTS) == [(
123 | '\n First step
\n '
124 | 'Second step
\n
\n',
125 | MULTIPLE_EXPECTEDRESULTS
126 | )]
127 |
128 |
129 | def test_parse_junit():
130 | """Check if jUnit parsing works."""
131 | junit_xml = StringIO(JUNIT_XML)
132 | assert parse_junit(junit_xml) == [
133 | {'classname': 'foo1', 'name': 'test_passed', 'status': 'passed',
134 | 'line': '8', 'file': 'source.py'},
135 | {'classname': 'foo1', 'name': 'test_passed_no_id', 'status': 'passed'},
136 | {'classname': 'foo2', 'message': 'Skipped message',
137 | 'name': 'test_skipped', 'status': 'skipped'},
138 | {'classname': 'foo3', 'name': 'test_failure',
139 | 'message': 'Failure message', 'status': 'failure', 'type': 'Type'},
140 | {'classname': 'foo4', 'name': 'test_error', 'message': 'Error message',
141 | 'status': 'error', 'type': 'ExceptionName'},
142 | {'classname': 'foo1', 'name': 'test_parametrized[a]',
143 | 'status': 'passed'},
144 | {'classname': 'foo1', 'name': 'test_parametrized[b]',
145 | 'status': 'passed'},
146 | ]
147 | junit_xml.close()
148 |
149 |
150 | def test_invalid_test_run_chars_regex():
151 | """Check if invalid test run characters are handled."""
152 | invalid_test_run_id = '\\/.:*"<>|~!@#$?%^&\'*()+`,='
153 | assert re.sub(INVALID_CHARS_REGEX, '', invalid_test_run_id) == ''
154 |
155 |
156 | def test_parse_test_results():
157 | """Check if parsing test results works."""
158 | test_results = [
159 | {'status': u'passed',
160 | 'name': 'test_positive_read',
161 | 'classname': 'tests.api.test_ReadTestCase',
162 | 'file': 'tests/api/test_foo.py',
163 | 'time': '4.13224601746',
164 | 'line': '521'},
165 | {'status': u'passed',
166 | 'name': 'test_positive_delete',
167 | 'classname': 'tests.api.test_ReadTestCase',
168 | 'file': 'tests/api/test_foo.py',
169 | 'time': '4.13224601746',
170 | 'line': '538'},
171 | {'status': u'failure',
172 | 'name': 'test_negative_read',
173 | 'classname': 'tests.api.test_ReadTestCase',
174 | 'file': 'tests/api/test_foo.py',
175 | 'time': '4.13224601746',
176 | 'line': '218'},
177 | {'status': u'skipped',
178 | 'name': 'test_positive_update',
179 | 'classname': 'tests.api.test_ReadTestCase',
180 | 'file': 'tests/api/test_foo.py',
181 | 'time': '4.13224601746',
182 | 'line': '112'},
183 | {'status': u'error',
184 | 'name': 'test_positive_create',
185 | 'classname': 'tests.api.test_ReadTestCase',
186 | 'file': 'tests/api/test_foo.py',
187 | 'time': '4.13224601746',
188 | 'line': '788'},
189 | ]
190 | summary = parse_test_results(test_results)
191 | assert summary['passed'] == 2
192 | assert summary['failure'] == 1
193 | assert summary['skipped'] == 1
194 | assert summary['error'] == 1
195 |
196 |
197 | def test_test_results(cli_runner):
198 | """Check if test results command works."""
199 | with cli_runner.isolated_filesystem():
200 | with open('results.xml', 'w') as handler:
201 | handler.write(JUNIT_XML)
202 | result = cli_runner.invoke(
203 | cli, ['test-results', '--path', 'results.xml'])
204 | assert result.exit_code == 0
205 | assert 'Error: 1\n' in result.output
206 | assert 'Failure: 1\n' in result.output
207 | assert 'Passed: 4\n' in result.output
208 | assert 'Skipped: 1\n' in result.output
209 |
210 |
211 | def test_test_results_default_path(cli_runner):
212 | """Check if test results in the default path works."""
213 | with cli_runner.isolated_filesystem():
214 | with open('junit-results.xml', 'w') as handler:
215 | handler.write(JUNIT_XML)
216 | result = cli_runner.invoke(cli, ['test-results'])
217 | assert result.exit_code == 0
218 | assert 'Error: 1\n' in result.output
219 | assert 'Failure: 1\n' in result.output
220 | assert 'Passed: 4\n' in result.output
221 | assert 'Skipped: 1\n' in result.output
222 |
223 |
224 | def test_create_xml_property():
225 | """Check if create_xml_property creates the expected XML tag."""
226 | generated = ElementTree.tostring(
227 | create_xml_property('name', 'value'),
228 | encoding='unicode'
229 | )
230 | assert generated == ''
231 |
232 |
233 | def test_create_xml_testcase():
234 | """Check if create_xml_testcase creates the expected XML tag."""
235 | testcase = mock.MagicMock()
236 | testcase.name = 'test_it_works'
237 | testcase.parent_class = 'FeatureTestCase'
238 | testcase.testmodule = 'tests/test_feature.py'
239 | testcase.docstring = 'Test feature docstring'
240 | testcase.fields = {
241 | field: field for field in
242 | default_config.TESTCASE_FIELDS + default_config.TESTCASE_CUSTOM_FIELDS
243 | }
244 | testcase.fields['parametrized'] = 'yes'
245 | config = BetelgeuseConfig()
246 | generated = ElementTree.tostring(
247 | create_xml_testcase(config, testcase, '{path}#{line_number}'),
248 | encoding='unicode'
249 | )
250 | assert generated == (
251 | 'title'
254 | 'description'
255 | ''
257 | 'steps'
258 | 'expectedresults'
259 | ''
260 | ''
261 | 'Iteration: '
262 | ''
263 | 'Pass'
264 | ''
265 | ''
266 | ''
267 | ''
268 | ''
269 | ''
270 | ''
271 | ''
272 | ''
273 | ''
274 | ''
275 | ''
276 | ''
277 | ''
278 | ''
279 | ''
280 | ''
281 | ''
282 | ''
283 | ''
284 | ''
285 | ''
286 | ''
287 | ''
288 | ''
289 | ''
290 | ''
291 | ''
292 | ''
293 | ''
294 | ''
295 | )
296 |
297 |
298 | def test_requirement(cli_runner):
299 | """Check if requirement command works."""
300 | with cli_runner.isolated_filesystem():
301 | with open('source.py', 'w') as handler:
302 | handler.write('')
303 | with mock.patch('betelgeuse.collector.collect_tests') as collect_tests:
304 | return_value_testcases = []
305 | for index in range(5):
306 | t = mock.MagicMock()
307 | t.docstring = None
308 | t.fields = {'requirement': f'requirement{index}'}
309 | return_value_testcases.append(t)
310 |
311 | collect_tests.return_value = {
312 | 'source.py': return_value_testcases,
313 | }
314 | result = cli_runner.invoke(
315 | cli,
316 | [
317 | 'requirement',
318 | '--approver', 'approver1',
319 | '--approver', 'approver2',
320 | '--assignee', 'assignee',
321 | '--dry-run',
322 | '--response-property', 'property_key=property_value',
323 | 'source.py',
324 | 'projectid',
325 | 'requirements.xml'
326 | ]
327 | )
328 | assert result.exit_code == 0, result.output
329 | assert result.output.strip() == ''
330 | collect_tests.assert_called_once_with('source.py', ())
331 | assert os.path.isfile('requirements.xml')
332 | root = ElementTree.parse('requirements.xml').getroot()
333 | assert root.tag == 'requirements'
334 | properties = root.find('properties')
335 | assert properties
336 | properties = [p.attrib for p in properties.findall('property')]
337 | expected = [
338 | {'name': 'lookup-method', 'value': 'name'},
339 | {'name': 'dry-run', 'value': 'true'},
340 | ]
341 | for p in properties:
342 | assert p in expected
343 | for index, requirement in enumerate(root.findall('requirement')):
344 | children = [
345 | ElementTree.tostring(child, encoding='unicode')
346 | for child in requirement
347 | ]
348 | assert children == [
349 | f'requirement{index}',
350 | ''
351 | ''
352 | ''
353 | ]
354 | assert requirement.attrib == {
355 | 'approver-ids': 'approver1:approved approver2:approved',
356 | 'assignee-id': 'assignee',
357 | 'priority-id': 'high',
358 | 'severity-id': 'should_have',
359 | 'status-id': 'approved',
360 | }
361 |
362 |
363 | def test_test_run(cli_runner):
364 | """Check if test run command works."""
365 | with cli_runner.isolated_filesystem():
366 | with open('junit_report.xml', 'w') as handler:
367 | handler.write(JUNIT_XML)
368 | with open('source.py', 'w') as handler:
369 | handler.write('')
370 | with mock.patch('betelgeuse.collector') as collector:
371 | testcases = [
372 | {'name': 'test_passed', 'testmodule': 'foo1'},
373 | {'name': 'test_passed_no_id', 'testmodule': 'foo1'},
374 | {'name': 'test_skipped', 'testmodule': 'foo2'},
375 | {'name': 'test_failure', 'testmodule': 'foo3'},
376 | {'name': 'test_error', 'testmodule': 'foo4'},
377 | {'name': 'test_parametrized', 'testmodule': 'foo1'},
378 | ]
379 | return_value_testcases = []
380 | for test in testcases:
381 | t = mock.MagicMock()
382 | t.docstring = ''
383 | t.name = test['name']
384 | t.parent_class = None
385 | t.testmodule = test['testmodule']
386 | t.fields = {'id': str(id(t))}
387 | if t.name == 'test_parametrized':
388 | t.fields['parametrized'] = 'yes'
389 | t.junit_id = f'{test["testmodule"]}.{test["name"]}'
390 | return_value_testcases.append(t)
391 |
392 | collector.collect_tests.return_value = {
393 | 'source.py': return_value_testcases,
394 | }
395 | result = cli_runner.invoke(
396 | cli,
397 | [
398 | 'test-run',
399 | '--dry-run',
400 | '--no-include-skipped',
401 | '--create-defects',
402 | '--custom-fields', 'field=value',
403 | '--project-span-ids', 'project1, project2',
404 | '--response-property', 'key=value',
405 | '--status', 'inprogress',
406 | '--test-run-id', 'test-run-id',
407 | '--test-run-group-id', 'test-run-group-id',
408 | '--test-run-template-id', 'test-run-template-id',
409 | '--test-run-title', 'test-run-title',
410 | '--test-run-type-id', 'test-run-type-id',
411 | 'junit_report.xml',
412 | 'source.py',
413 | 'userid',
414 | 'projectid',
415 | 'importer.xml'
416 | ]
417 | )
418 | assert result.exit_code == 0, result.output
419 | collector.collect_tests.assert_called_once_with('source.py', ())
420 | assert os.path.isfile('importer.xml')
421 | root = ElementTree.parse('importer.xml').getroot()
422 | assert root.tag == 'testsuites'
423 | properties = root.find('properties')
424 | assert properties
425 | by_name = operator.itemgetter('name')
426 | properties = sorted(
427 | [p.attrib for p in properties.findall('property')],
428 | key=by_name
429 | )
430 |
431 | expected = [
432 | {'name': 'polarion-create-defects', 'value': 'true'},
433 | {'name': 'polarion-custom-field', 'value': 'value'},
434 | {'name': 'polarion-custom-lookup-method-field-id',
435 | 'value': 'testCaseID'},
436 | {'name': 'polarion-dry-run', 'value': 'true'},
437 | {'name': 'polarion-include-skipped', 'value': 'false'},
438 | {'name': 'polarion-lookup-method', 'value': 'custom'},
439 | {'name': 'polarion-project-id', 'value': 'projectid'},
440 | {'name': 'polarion-project-span-ids',
441 | 'value': 'project1, project2'},
442 | {'name': 'polarion-response-key', 'value': 'value'},
443 | {'name': 'polarion-testrun-status-id', 'value': 'inprogress'},
444 | {'name': 'polarion-testrun-id', 'value': 'test-run-id'},
445 | {'name': 'polarion-group-id', 'value': 'test-run-group-id'},
446 | {'name': 'polarion-testrun-template-id',
447 | 'value': 'test-run-template-id'},
448 | {'name': 'polarion-testrun-title', 'value': 'test-run-title'},
449 | {'name': 'polarion-testrun-type-id',
450 | 'value': 'test-run-type-id'},
451 | {'name': 'polarion-user-id', 'value': 'userid'},
452 | ]
453 | expected.sort(key=by_name)
454 | assert properties == expected
455 | testsuite = root.find('testsuite')
456 | assert testsuite
457 | for index, testcase in enumerate(testsuite.findall('testcase')):
458 | properties = testcase.find('properties')
459 | assert properties
460 | p = properties.findall('property')
461 | assert 0 < len(p) <= 2
462 | print(index)
463 | print(ElementTree.tostring(testcase))
464 |
465 | if len(p) == 2:
466 | testcase_id = str(id(return_value_testcases[-1]))
467 | else:
468 | testcase_id = str(id(return_value_testcases[index]))
469 |
470 | assert p[0].attrib == {
471 | 'name': 'polarion-testcase-id',
472 | 'value': testcase_id,
473 | }
474 |
475 | if len(p) == 2:
476 | assert p[1].attrib['name'] == (
477 | 'polarion-parameter-pytest parameters'
478 | )
479 | assert p[1].attrib['value'] in ('a', 'b')
480 |
481 |
482 | def test_validate_key_value_option():
483 | """Check if validate_key_value_option works."""
484 | # None value will be passed when the option is not specified.
485 | for value, result in (('key=value=', ('key', 'value=')), (None, None)):
486 | assert validate_key_value_option(
487 | None, mock.MagicMock(), value) == result
488 |
489 |
490 | def test_validate_key_value_option_exception():
491 | """Check if validate_key_value_option validates invalid values."""
492 | option = mock.MagicMock()
493 | option.name = 'option_name'
494 | msg = 'option_name needs to be in format key=value'
495 | for value in ('value', ''):
496 | with pytest.raises(click.BadParameter) as excinfo:
497 | validate_key_value_option(None, option, value)
498 | assert excinfo.value.message == msg
499 |
--------------------------------------------------------------------------------
/tests/test_collector.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | """Tests for :mod:`betelgeuse.collector`."""
3 | import pytest
4 |
5 | from betelgeuse import collector
6 |
7 |
8 | @pytest.mark.parametrize(
9 | 'path', ('./tests/data', './tests/data/test_sample.py'))
10 | def test_collect_tests(path):
11 | """Check if ``collect_tests`` 'tests/data'collect tests."""
12 | tests = collector.collect_tests(path)
13 | assert 'tests/data/test_sample.py' in tests
14 | assert len(tests['tests/data/test_sample.py']) == 5
15 |
16 | # Check if we are not doing a specific python module collection
17 | if path.endswith('.py'):
18 | return
19 |
20 | assert 'tests/data/ignore_dir/test_ignore_dir.py' in tests
21 | assert len(tests['tests/data/ignore_dir/test_ignore_dir.py']) == 2
22 |
23 |
24 | @pytest.mark.parametrize('ignore_path', (
25 | 'tests/data/ignore_dir',
26 | 'tests/data/ignore_dir/test_ignore_dir.py'
27 | ))
28 | def test_collect_ignore_path(ignore_path):
29 | """Check if ``collect_tests`` don't collect tests on the ignore paths."""
30 | tests = collector.collect_tests('tests/data', [ignore_path])
31 | assert 'tests/data/ignore_dir/test_ignore_dir.py' not in tests
32 | assert 'tests/data/test_sample.py' in tests
33 | assert len(tests['tests/data/test_sample.py']) == 5
34 |
35 |
36 | @pytest.mark.parametrize('filename', ('test_module.py', 'module_test.py'))
37 | def test_is_test_module(filename):
38 | """Check ``is_test_module`` working for valid filenames."""
39 | assert collector.is_test_module(filename)
40 |
41 |
42 | @pytest.mark.parametrize('filename', ('not_test_module.py', 'module.py'))
43 | def test_not_is_test_module(filename):
44 | """Check ``is_test_module`` working for invalid filenames."""
45 | assert not collector.is_test_module(filename)
46 |
--------------------------------------------------------------------------------
/tests/test_parser.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | """Tests for :mod:`betelgeuse.parser`."""
3 | import pytest
4 | import mock
5 |
6 | from betelgeuse import parser
7 |
8 |
9 | def test_parse_docstring():
10 | """Check ``parse_docstring`` parser result."""
11 | docstring = """
12 | :field1: value1
13 | :field2: value2
14 | :field3:
15 | * item 1
16 | * item 2
17 | """
18 | assert parser.parse_docstring(docstring) == {
19 | 'field1': 'value1',
20 | 'field2': 'value2',
21 | 'field3': '\nitem 1
\n'
22 | 'item 2
\n
\n',
23 | }
24 |
25 |
26 | @pytest.mark.parametrize('docstring', ('', None))
27 | def test_parse_none_docstring(docstring):
28 | """Check ``parse_docstring`` returns empty dict on empty input."""
29 | assert parser.parse_docstring(docstring) == {}
30 |
31 |
32 | def test_parse_docstring_special_characters():
33 | """Check ``parse_docstring`` parser result."""
34 | docstring = """
35 | Description with an special character like é
36 |
37 | :field1: value with an special character like é
38 | """
39 | assert parser.parse_docstring(docstring) == {
40 | u'field1': u'value with an special character like é',
41 | }
42 |
43 |
44 | @pytest.mark.parametrize('string', ('', None))
45 | def test_parse_rst_empty_string(string):
46 | """Check ``parse_rst`` returns empty string on empty input."""
47 | assert parser.parse_rst(string) == ''
48 |
49 |
50 | def test_parse_rst_translator_class():
51 | """Check if ``parse_rst`` uses a custom translator_class."""
52 | docstring = """
53 | :field1: value1
54 | :field2: value2
55 | :field3:
56 | """
57 | expected = (
58 | '\n'
59 | '\n'
60 | '\n'
61 | 'field1 | \n'
62 | 'value1 \n'
63 | ' | \n'
64 | '
\n'
65 | 'field2 | \n'
66 | 'value2 \n'
67 | ' | \n'
68 | '
\n'
69 | 'field3 | \n'
70 | ' | \n'
71 | '
\n'
72 | '
\n'
73 | '
\n'
74 | '\n'
75 | )
76 | assert parser.parse_rst(
77 | docstring, parser.TableFieldListTranslator) == expected
78 |
79 |
80 | def test_parse_rst_special_characters():
81 | """Check if ``parse_rst`` plays nice with special characters."""
82 | assert parser.parse_rst(u'String with special character like é') == (
83 | u'\n'
84 | u'String with special character like é
\n'
85 | u'\n'
86 | )
87 |
88 |
89 | def test_parse_markers():
90 | """
91 | Test if the markers list is parsed.
92 |
93 | List should be comma separated list of markers from all levels after
94 | removing 'pytest.mark' text and ignore some markers.
95 | """
96 | _mod_markers = 'pytest.mark.destructive'
97 | _class_markers = [
98 | 'pytest.mark.on_prem_provisioning',
99 | "pytest.mark.usefixtures('cleandir')"
100 | ]
101 | _test_markers = [
102 | "pytest.mark.parametrize('something', ['a', 'b'])",
103 | 'pytest.mark.skipif(not settings.robottelo.REPOS_HOSTING_URL)',
104 | 'pytest.mark.tier1'
105 | ]
106 | _all_markers = [_mod_markers, _class_markers, _test_markers]
107 |
108 | expected = 'destructive, on_prem_provisioning, tier1'
109 | config = mock.MagicMock()
110 | config.MARKERS_IGNORE_LIST = [
111 | 'parametrize', 'skipif', 'usefixtures', 'skip_if_not_set']
112 | assert parser.parse_markers(_all_markers, config=config) == expected
113 |
--------------------------------------------------------------------------------
/tests/test_source_generator.py:
--------------------------------------------------------------------------------
1 | """Tests for :mod:`betelgeuse.source_generator`."""
2 | from betelgeuse import collector
3 | import mock
4 |
5 |
6 | def test_source_generator():
7 | """Check if ``collect_tests`` 'tests/data'collect tests."""
8 | tests = collector.collect_tests('tests/data/test_sample.py')
9 | test_decorated_test = [
10 | test for test in tests['tests/data/test_sample.py']
11 | if test.name == 'test_decorated_test'
12 | ].pop()
13 |
14 | assert test_decorated_test.decorators == [
15 | 'decorator',
16 | 'decorator.mark.something',
17 | "decorator_with_args([1, b'bytes', ('a', 'b'), None])",
18 | 'decorator_with_args(*[True, ((True or False) and True)])',
19 | "decorator_with_args((f'{CONSTANT!r:5>} with literal {{ and }}',))",
20 | 'decorator_with_args({1, 2, (- 3)})',
21 | "decorator_with_args({'a': 1, 'b': 2, **{'c': 3}})",
22 |
23 | 'decorator_with_args([1, 2][0], [1, 2][:1], [1, 2][0:], '
24 | '[1, 2][0:1:1])',
25 |
26 | 'decorator_with_args([i for i in range(5) if ((i % 2) == 0)])',
27 | 'decorator_with_args((i for i in range(5)))',
28 | 'decorator_with_args({i for i in range(5)})',
29 | "decorator_with_args({k: v for k in 'abcde' for v in range(5)})",
30 | 'decorator_with_args(1, 2, 3, a=1, b=2)',
31 |
32 | 'decorator_with_args('
33 | 'dict(a=1, b=2), '
34 | "dict(**{'a': 1}), "
35 | 'vars(decorator.mark), '
36 | '(lambda a, *args, b=1, **kwargs: (a, args, b, kwargs)), '
37 | '(lambda a, *, b=1: (a, b)), '
38 | '(lambda v: (v if v else None))'
39 | ')',
40 | ]
41 |
42 |
43 | def test_source_markers():
44 | """Verifies if the test collection collects test markers."""
45 | config = mock.Mock()
46 | config.MARKERS_IGNORE_LIST = [
47 | 'parametrize', 'skipif', 'usefixtures', 'skip_if_not_set']
48 | tests = collector.collect_tests('tests/data/test_sample.py', config=config)
49 | marked_test = [
50 | test for test in tests['tests/data/test_sample.py']
51 | if test.name == 'test_markers_sample'
52 | ].pop()
53 | assert marked_test.fields['markers'] == ('run_in_one_thread, tier1, '
54 | 'on_prem_provisioning, osp')
55 |
56 |
57 | def test_source_singular_module_marker():
58 | """Verifies the single module level marker is retrieved."""
59 | mod_string = 'import pytest\n\npytestmark = pytest.mark.tier2' \
60 | '\n\ndef test_sing():\n\tpass'
61 | with open('/tmp/test_singular.py', 'w') as tfile:
62 | tfile.writelines(mod_string)
63 |
64 | config = mock.Mock()
65 | config.MARKERS_IGNORE_LIST = ['tier3']
66 | tests = collector.collect_tests('/tmp/test_singular.py', config=config)
67 | marked_test = [
68 | test for test in tests['/tmp/test_singular.py']
69 | if test.name == 'test_sing'
70 | ].pop()
71 | assert marked_test.fields['markers'] == 'tier2'
72 |
--------------------------------------------------------------------------------