├── src └── test_archiver │ ├── __init__.py │ ├── schemas │ ├── migrations │ │ ├── sqlite │ │ │ ├── testing │ │ │ │ ├── 10001-minor_test_update1.sql │ │ │ │ ├── 10002-minor_test_update2.sql │ │ │ │ └── 10003-major_test_update.sql │ │ │ ├── 0002-execution_paths.sql │ │ │ ├── 0003-test_run_mapping_cascade.sql │ │ │ └── 0001-schema_update_table_and_log_message_index.sql │ │ └── postgres │ │ │ ├── 0002-execution_paths.sql │ │ │ ├── 0003-test_run_mapping_cascade.sql │ │ │ └── 0001-schema_update_table_and_log_message_index.sql │ ├── schema_postgres.sql │ ├── schema_sqlite.sql │ └── README.md │ ├── version.py │ ├── archiver_listeners.py │ ├── ArchiverRobotListener.py │ └── configs.py ├── pytest ├── fixture_tests │ ├── pytest.ini │ ├── test_skipping.py │ ├── test_first_module.py │ ├── test_second_module.py │ ├── test_setups_and_teardowns.py │ └── test_class_setups_and_teardowns.py ├── fixture_runner.py └── README.md ├── archive_api_server ├── tests │ ├── requirements.txt │ └── robot │ │ └── api │ │ └── basic.robot ├── requirements.txt ├── test_config.json └── database.py ├── fixture_config_sqlite.json ├── robot_tests ├── resources │ ├── python_keywords.py │ └── common_keywords.robot ├── tests │ ├── __init__.robot │ ├── Errors.robot │ ├── top_suite │ │ ├── teardown_failure │ │ │ ├── __init__.robot │ │ │ ├── passing_suite1.robot │ │ │ └── passing_suite2.robot │ │ ├── Passing_tests.robot │ │ ├── lower_suite │ │ │ ├── empty.robot │ │ │ ├── documents.robot │ │ │ ├── tagging.robot │ │ │ └── embedded.robot │ │ ├── skipping.robot │ │ ├── Failing_tests.robot │ │ ├── Logging.robot │ │ └── Data-Driven.robot │ ├── variables.robot │ ├── control_structures │ │ ├── try_except.robot │ │ ├── other_control_structures.robot │ │ ├── grouped_templates.robot │ │ ├── while_loops.robot │ │ ├── if_else.robot │ │ └── for_loops.robot │ ├── sleep_suite │ │ ├── sleeper.robot │ │ └── Behavior-driven.robot │ ├── randomized_suite │ │ ├── random_pass.robot │ │ ├── flaky.robot │ │ └── bigrandom.robot │ └── README.md ├── libraries │ └── fixture_library.py └── README.md ├── fixture_config_postgres.json ├── phpunit_tests ├── fixture_tests │ ├── BasicTest.php │ ├── SetupClassFailureTest.php │ ├── TeardownClassFailsPassTest.php │ ├── SkippingTest.php │ ├── TeardownClassFailureTest.php │ ├── SetupAndTeardownTest.php │ └── DependenciesTest.php └── README.md ├── .gitattributes ├── docker ├── Dockerfile_db_postgres └── Dockerfile_demo_postgres ├── .pylintrc ├── .github └── workflows │ ├── publish.yaml │ └── basic_pipeline.yaml ├── tests ├── unit │ ├── test_change_engine.py │ ├── test_ouput_parser.py │ ├── test_archive_listener.py │ ├── test_configs.py │ ├── test_archiver_module.py │ └── test_database.py └── integration │ └── test_robot_framework_parsing.py ├── helpers └── diff2change_context_list.py ├── mocha_tests ├── fixture_suite.js └── README.md ├── .gitignore ├── pyproject.toml └── LICENSE /src/test_archiver/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pytest/fixture_tests/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | junit_family=xunit1 3 | -------------------------------------------------------------------------------- /archive_api_server/tests/requirements.txt: -------------------------------------------------------------------------------- 1 | robotframework 2 | RESTinstance 3 | -------------------------------------------------------------------------------- /archive_api_server/requirements.txt: -------------------------------------------------------------------------------- 1 | psycopg2-binary==2.8.2 2 | queries==2.0.1 3 | tornado==6.0.2 4 | -------------------------------------------------------------------------------- /fixture_config_sqlite.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_engine": "sqlite", 3 | "database": "fixture.db" 4 | } 5 | -------------------------------------------------------------------------------- /robot_tests/resources/python_keywords.py: -------------------------------------------------------------------------------- 1 | 2 | def keyword_expecting_numbers(number): 3 | int(number) 4 | -------------------------------------------------------------------------------- /robot_tests/tests/__init__.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Metadata Version 2.0 3 | Test Tags fixture 4 | Suite setup Log foo 5 | Suite teardown Log foo 6 | 7 | -------------------------------------------------------------------------------- /src/test_archiver/schemas/migrations/sqlite/testing/10001-minor_test_update1.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO schema_updates(schema_version, applied_by) 2 | VALUES (10001, '{applied_by}'); 3 | -------------------------------------------------------------------------------- /src/test_archiver/schemas/migrations/sqlite/testing/10002-minor_test_update2.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO schema_updates(schema_version, applied_by) 2 | VALUES (10002, '{applied_by}'); 3 | -------------------------------------------------------------------------------- /src/test_archiver/schemas/migrations/sqlite/testing/10003-major_test_update.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO schema_updates(schema_version, applied_by) 2 | VALUES (10003, '{applied_by}'); 3 | -------------------------------------------------------------------------------- /archive_api_server/test_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "port": 8888, 3 | "db_name": "fixture_archive", 4 | "db_host": "localhost", 5 | "db_user": "robot", 6 | "db_password": "robot" 7 | } -------------------------------------------------------------------------------- /robot_tests/tests/Errors.robot: -------------------------------------------------------------------------------- 1 | 2 | 3 | *** Test Cases *** 4 | 5 | Syntax Error In Test Case 6 | [Tags] listener_parser_mismatch 7 | [Arguments] ${illegal_argument} 8 | No Operation 9 | -------------------------------------------------------------------------------- /fixture_config_postgres.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_engine": "postgresql", 3 | "database": "fixture_archive", 4 | "host": "localhost", 5 | "port": 5432, 6 | "user": "robot", 7 | "password": "robot", 8 | "require_ssl": false 9 | } 10 | -------------------------------------------------------------------------------- /phpunit_tests/fixture_tests/BasicTest.php: -------------------------------------------------------------------------------- 1 | assertTrue(false); 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /robot_tests/libraries/fixture_library.py: -------------------------------------------------------------------------------- 1 | 2 | class Interactions(object): 3 | 4 | ROBOT_LIBRARY_SCOPE = 'TEST SUITE' 5 | 6 | def __init__(self): 7 | self.mode = None 8 | 9 | @staticmethod 10 | def interact_with_sut(): 11 | return "foo" 12 | -------------------------------------------------------------------------------- /robot_tests/tests/top_suite/teardown_failure/__init__.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Documentation Suite where everything passes but the suite teardown fails 3 | Resource ../../../resources/common_keywords.robot 4 | Test Tags failed_by_suite_teardown 5 | Suite Teardown Fail the test case 6 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text eol=lf 2 | 3 | # Explicitly declare text files you want to always be normalized and converted 4 | # to native line endings on checkout. 5 | *.py eol=lf 6 | *.md eol=lf 7 | *.txt eol=lf 8 | *.sql eol=lf 9 | 10 | # Denote all files that are truly binary and should not be modified. 11 | *.png binary 12 | *.jpg binary 13 | -------------------------------------------------------------------------------- /pytest/fixture_runner.py: -------------------------------------------------------------------------------- 1 | 2 | from pathlib import Path 3 | 4 | import pytest 5 | 6 | PYTEST_FIXTURE_ROOT = Path(__file__).parent 7 | 8 | # This runner is needed to ignore the status code from the test run in other scrips 9 | 10 | pytest.main([ 11 | PYTEST_FIXTURE_ROOT / "fixture_tests", 12 | "--tb=no", 13 | "--junit-xml", PYTEST_FIXTURE_ROOT / "pytest_fixture_output.xml", 14 | ]) 15 | -------------------------------------------------------------------------------- /robot_tests/tests/top_suite/teardown_failure/passing_suite1.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Documentation Suite where everything passes 3 | Resource ../../../resources/common_keywords.robot 4 | 5 | *** Test Cases *** 6 | First passing testcase 7 | Log This test case passes but will be failed 8 | 9 | Second passing testcase 10 | Log This test case passes but will be failed 11 | -------------------------------------------------------------------------------- /robot_tests/tests/top_suite/teardown_failure/passing_suite2.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Documentation Suite where everything passes 3 | Resource ../../../resources/common_keywords.robot 4 | 5 | *** Test Cases *** 6 | First passing testcase 7 | Log This test case passes but will be failed 8 | 9 | Second passing testcase 10 | Log This test case passes but will be failed 11 | -------------------------------------------------------------------------------- /src/test_archiver/schemas/migrations/postgres/0002-execution_paths.sql: -------------------------------------------------------------------------------- 1 | -- Adds columns to record execution paths 2 | ALTER TABLE suite_result ADD COLUMN execution_path text DEFAULT NULL; 3 | ALTER TABLE test_result ADD COLUMN execution_path text DEFAULT NULL; 4 | ALTER TABLE log_message ADD COLUMN execution_path text DEFAULT NULL; 5 | 6 | INSERT INTO schema_updates (schema_version, applied_by) 7 | VALUES (2, '{applied_by}'); 8 | -------------------------------------------------------------------------------- /src/test_archiver/schemas/migrations/sqlite/0002-execution_paths.sql: -------------------------------------------------------------------------------- 1 | -- Adds columns to record execution paths 2 | ALTER TABLE suite_result ADD COLUMN execution_path text DEFAULT NULL; 3 | ALTER TABLE test_result ADD COLUMN execution_path text DEFAULT NULL; 4 | ALTER TABLE log_message ADD COLUMN execution_path text DEFAULT NULL; 5 | 6 | INSERT INTO schema_updates (schema_version, applied_by) 7 | VALUES (2, '{applied_by}'); 8 | -------------------------------------------------------------------------------- /phpunit_tests/fixture_tests/SetupClassFailureTest.php: -------------------------------------------------------------------------------- 1 | assertTrue(true); 10 | } 11 | 12 | public function testTrueFalse(): void 13 | { 14 | $this->assertTrue(true); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /phpunit_tests/fixture_tests/TeardownClassFailsPassTest.php: -------------------------------------------------------------------------------- 1 | assertTrue(false); 10 | 11 | } 12 | 13 | public function testTrueFalse(): void 14 | { 15 | $this->assertTrue(true); 16 | } 17 | 18 | } -------------------------------------------------------------------------------- /phpunit_tests/fixture_tests/SkippingTest.php: -------------------------------------------------------------------------------- 1 | assertTrue(false); 9 | } 10 | /** 11 | * @requires OSFAMILY Windows 12 | */ 13 | public function testSkip(): void 14 | { 15 | $this->assertTrue(true); 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /phpunit_tests/fixture_tests/TeardownClassFailureTest.php: -------------------------------------------------------------------------------- 1 | assertTrue(false); 10 | 11 | } 12 | 13 | public function testTrueFalse(): void 14 | { 15 | $this->assertTrue(false); 16 | } 17 | 18 | } 19 | -------------------------------------------------------------------------------- /robot_tests/tests/top_suite/Passing_tests.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Resource ../../resources/common_keywords.robot 3 | Test Tags passing 4 | 5 | *** Test cases *** 6 | Just log something 7 | Log foo bar 8 | 9 | Use library 10 | [Tags] sut_interaction 11 | Interact with the SUT 12 | 13 | Set things up, do something and then tear down 14 | [Setup] Do nothing twice 15 | [Teardown] Do nothing twice 16 | Log Doing nothing 17 | Do nothing twice 18 | Do nothing twice 19 | -------------------------------------------------------------------------------- /phpunit_tests/fixture_tests/SetupAndTeardownTest.php: -------------------------------------------------------------------------------- 1 | assertTrue(false); 9 | } 10 | 11 | protected function tearDown(): void 12 | { 13 | $this->assertTrue(true); 14 | } 15 | 16 | public function testTrueFalse(): void 17 | { 18 | $this->assertTrue(true); 19 | } 20 | 21 | 22 | } 23 | -------------------------------------------------------------------------------- /pytest/fixture_tests/test_skipping.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | class TestClassWithSkippedTest(unittest.TestCase): 4 | 5 | @unittest.skip("demonstrating skipping") 6 | def test_that_is_skipped(self): 7 | pass 8 | 9 | def test_that_is_not_skipped(self): 10 | pass 11 | 12 | @unittest.skip("showing class skipping") 13 | class TestClassThatIsSkipped(unittest.TestCase): 14 | 15 | def test_something(self): 16 | pass 17 | 18 | def test_something_else(self): 19 | pass 20 | -------------------------------------------------------------------------------- /pytest/fixture_tests/test_first_module.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import pytest 4 | 5 | @pytest.fixture(scope="session", autouse=True) 6 | def log_global_env_facts(record_testsuite_property): 7 | record_testsuite_property("EXAMPLE_METADATA", "foo") 8 | record_testsuite_property("SW_VERSION", "X.Y.Z") 9 | 10 | class FirstTestClass(unittest.TestCase): 11 | 12 | def test_something(self): 13 | record_property("example_key", 1) 14 | 15 | pass 16 | 17 | def test_other_thing(self): 18 | pass 19 | -------------------------------------------------------------------------------- /robot_tests/tests/variables.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Test Tags variables 3 | 4 | 5 | *** Variables *** 6 | ${ORIGINAL_VARIABLE}= original value 7 | 8 | 9 | *** Test cases *** 10 | Set variables 11 | [Tags] listener_parser_mismatch 12 | VAR ${local_variable}= ${ORIGINAL_VARIABLE} scope=LOCAL 13 | VAR ${test_variable}= ${ORIGINAL_VARIABLE} scope=TEST 14 | VAR ${suite_variable}= ${ORIGINAL_VARIABLE} scope=SUITE 15 | Log ${local_variable} ${test_variable} ${suite_variable} 16 | -------------------------------------------------------------------------------- /robot_tests/resources/common_keywords.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Library fixture_library.Interactions 3 | Library ./python_keywords.py 4 | 5 | *** Keywords *** 6 | Interact with the SUT 7 | Interact with sut 8 | 9 | Fail the test case 10 | Fail You shall not pass! 11 | 12 | Do nothing twice 13 | Not actually doing anything 14 | Not actually doing anything 15 | 16 | Not actually doing anything 17 | No operation 18 | 19 | Do something with value 20 | [Arguments] ${value} 21 | Log ${value} 22 | -------------------------------------------------------------------------------- /docker/Dockerfile_db_postgres: -------------------------------------------------------------------------------- 1 | # Postgres 13 with empty fixture_archive database. 2 | # 3 | # Example usage: 4 | # Building `docker build -t testarchiver_db -f ./Dockerfile_db_postgres ..` Notice: the build context is set to parent directory 5 | # Running `docker run --rm -e POSTGRES_PASSWORD=robot -e POSTGRES_USER=testarchiver -e POSTGRES_DB=fixture_archive -p 5432:5432 -t testarchiver_db:latest` 6 | # 7 | 8 | FROM postgres:13-alpine 9 | COPY ./test_archiver/schemas/schema_postgres.sql /docker-entrypoint-initdb.d/ 10 | 11 | CMD ["postgres"] -------------------------------------------------------------------------------- /docker/Dockerfile_demo_postgres: -------------------------------------------------------------------------------- 1 | # Postgres 13 fixture_archive database with sample data. 2 | # Sample data is generated with run_fixture_robot.sh script 3 | # 4 | # Example usage: 5 | # Building `docker build -t testarchiver_demo -f ./Dockerfile_demo_postgres .` 6 | # Running `docker run --rm -e POSTGRES_PASSWORD=robot -e POSTGRES_USER=testarchiver -e POSTGRES_DB=fixture_archive -p 5432:5432 -t testarchiver_demo:latest` 7 | # 8 | 9 | FROM postgres:13-alpine 10 | COPY ./fixture_archive_v2_dump.sql docker-entrypoint-initdb.d/ 11 | 12 | CMD ["postgres"] -------------------------------------------------------------------------------- /pytest/fixture_tests/test_second_module.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | def function_under_tests(): 4 | foo() 5 | 6 | class FirstTestClass(unittest.TestCase): 7 | 8 | def test_something(self): 9 | pass 10 | 11 | def test_other_thing(self): 12 | pass 13 | 14 | 15 | class TestClassWithFailingTests(unittest.TestCase): 16 | 17 | def test_failing_assert(self): 18 | assert False, "foo" 19 | 20 | def test_opening_missing_file(self): 21 | open('non-existing-file', 'r') 22 | 23 | def test_function_not_found(self): 24 | function_under_tests() 25 | -------------------------------------------------------------------------------- /robot_tests/tests/top_suite/lower_suite/empty.robot: -------------------------------------------------------------------------------- 1 | *** Settings*** 2 | Test Tags should-fail 3 | Test Teardown Run Keywords Do Logging AND Do Tagging AND ${EMPTY} 4 | 5 | # This suite contains problems and are demonstrating failing during teardown even test works. 6 | 7 | *** Test Cases *** 8 | 9 | Test With Same Name 10 | Log First Run 11 | 12 | Test With Same Name 13 | Log Second Run 14 | 15 | *** Keywords *** 16 | Do Logging 17 | Log Test teardown DEBUG 18 | Do Tagging 19 | Set Tags teardown -------------------------------------------------------------------------------- /robot_tests/tests/top_suite/skipping.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Resource ../../resources/common_keywords.robot 3 | Test Tags skipping rf4 4 | 5 | *** Test cases *** 6 | This test is skipped after doing something 7 | Do nothing twice 8 | Skip This is skipped 9 | 10 | This test is skipped in setup 11 | [Setup] Skip This is skipped 12 | Do nothing twice 13 | 14 | This test is skipped in teardown 15 | Do nothing twice 16 | [Teardown] Skip This is skipped 17 | 18 | This test is always skipped 19 | [Tags] robot:skip 20 | Do nothing twice 21 | [Teardown] Skip This is skipped 22 | -------------------------------------------------------------------------------- /robot_tests/tests/top_suite/lower_suite/documents.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Test Tags documenting 3 | 4 | *** Test cases *** 5 | Set test documentation 6 | Set Test Documentation Test's own documentation 7 | Log ${TEST DOCUMENTATION} 8 | 9 | Set suite documentation 10 | Set Suite Documentation Suite's own documentation. 11 | Log ${SUITE DOCUMENTATION} 12 | 13 | Add text to suite documentation 14 | Set Suite Documentation This is the additional text append=yes 15 | Log ${SUITE DOCUMENTATION} 16 | -------------------------------------------------------------------------------- /robot_tests/tests/control_structures/try_except.robot: -------------------------------------------------------------------------------- 1 | 2 | *** Settings *** 3 | Resource ../../resources/common_keywords.robot 4 | 5 | 6 | *** Test Cases *** 7 | Try Except Catching 8 | Try And Catch Not Numbers 1 9 | Try And Catch Not Numbers foo 10 | 11 | *** Keywords *** 12 | Try And Catch Not Numbers 13 | [Arguments] ${value} 14 | TRY 15 | Keyword Expecting Numbers ${value} 16 | EXCEPT ValueError: * type=GLOB 17 | Log Exeption cauth, not a number 18 | ELSE 19 | Log No problem with the value 20 | FINALLY 21 | Log Always executed. 22 | END 23 | -------------------------------------------------------------------------------- /robot_tests/tests/control_structures/other_control_structures.robot: -------------------------------------------------------------------------------- 1 | 2 | 3 | *** Test Cases *** 4 | Using Repeat Keyword 5 | Repeat Keyword 5 Log Repeat it! 6 | 7 | Basic Group Syntax 8 | GROUP First group 9 | Log Logged in first group 10 | END 11 | Log Logged in in between 12 | GROUP Second group 13 | Log Logged in second group 14 | END 15 | 16 | Anonymous Group 17 | GROUP 18 | Log Group name is optional. 19 | END 20 | 21 | Nested Groups 22 | GROUP 23 | GROUP Nested group 24 | Log Groups can be nested. 25 | END 26 | END 27 | -------------------------------------------------------------------------------- /robot_tests/tests/top_suite/Failing_tests.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Resource ../../resources/common_keywords.robot 3 | Test Tags failing 4 | 5 | *** Test cases *** 6 | You can do it! Go for it! 7 | Fail the test case 8 | 9 | Bungle the set up, do something (or actually not) and then tear things down 10 | [Setup] Fail the test case 11 | [Teardown] Do nothing twice 12 | Do nothing twice 13 | Do nothing twice 14 | 15 | Bungle the set up, do something (or actually not) and then bungle the tear down 16 | [Setup] Fail the test case 17 | [Teardown] Fail the test case 18 | Do nothing twice 19 | Do nothing twice 20 | -------------------------------------------------------------------------------- /robot_tests/tests/control_structures/grouped_templates.robot: -------------------------------------------------------------------------------- 1 | 2 | *** Settings *** 3 | Library String 4 | Test Template Upper case should be 5 | 6 | *** Test Cases *** 7 | Grouped Template 8 | GROUP ASCII characters 9 | a A 10 | z Z 11 | END 12 | GROUP Latin-1 characters 13 | ä Ä 14 | ß SS 15 | END 16 | GROUP Numbers 17 | 1 1 18 | 9 9 19 | END 20 | 21 | *** Keywords *** 22 | Upper case should be 23 | [Arguments] ${char} ${expected} 24 | ${actual} = Convert To Upper Case ${char} 25 | Should Be Equal ${actual} ${expected} 26 | -------------------------------------------------------------------------------- /robot_tests/tests/control_structures/while_loops.robot: -------------------------------------------------------------------------------- 1 | 2 | *** Settings *** 3 | Resource ../../resources/common_keywords.robot 4 | 5 | *** Test Cases *** 6 | Limit as iteration count 7 | [Tags] random should_fail 8 | WHILE True limit=0.5s on_limit_message=Custom While loop error message 9 | Sleep 0.3s This is run 0.5 seconds. 10 | END 11 | 12 | CONTINUE and BREAK with WHILE 13 | WHILE True 14 | TRY 15 | ${value} = Not actually doing anything 16 | EXCEPT 17 | CONTINUE 18 | END 19 | Do something with value ${value} 20 | BREAK 21 | END 22 | -------------------------------------------------------------------------------- /robot_tests/tests/sleep_suite/sleeper.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Test Template sleep-driven 3 | Test Tags data-driven sleep 4 | 5 | *** Test Cases *** Sleep time 6 | First sleep 1 7 | Second sleep 6 8 | Third sleep 1 9 | Fourth sleep 7 10 | 11 | *** Keywords *** 12 | sleep-driven 13 | [Arguments] ${time} 14 | Sleep ${time} 15 | Run Keyword if ${time} > 5 Log That's lazy! WARN 16 | Run Keyword if ${time} > 5 Set tags Lazy 17 | Log Time to wake up! 18 | -------------------------------------------------------------------------------- /robot_tests/tests/randomized_suite/random_pass.robot: -------------------------------------------------------------------------------- 1 | *** Test Cases *** 2 | 3 | Randomized fail 4 | ${random}= Evaluate random.randint(1, 100) random 5 | Should Be True ${random} < ${ADJUST} 6 | 7 | Low failrate 8 | ${random}= Evaluate random.randint(1, 100) random 9 | Should Be True ${random} < 90 10 | 11 | Medium failrate 12 | ${random}= Evaluate random.randint(1, 100) random 13 | Should Be True ${random} < 50 14 | 15 | High Failrate 16 | ${random}= Evaluate random.randint(1, 100) random 17 | Should Be True ${random} < 10 18 | 19 | 20 | *** Variables *** 21 | ${ADJUST}= 50 -------------------------------------------------------------------------------- /robot_tests/tests/sleep_suite/Behavior-driven.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Test Tags behavior-driven sleep 3 | 4 | *** Test Cases *** 5 | Behavior sleeping 6 | Given I would like to sleep "3" "seconds" more 7 | When I woke up I would like to get warning 8 | and add a "woke-up" tag for Test 9 | Then I should feel rested 10 | 11 | 12 | *** Keywords *** 13 | I would like to sleep "${value}" "${variable}" more 14 | Log Sleeping 15 | Sleep ${value} 16 | I woke up I would like to get warning 17 | Log Wake up! WARN 18 | 19 | add a "${tag}" tag for Test 20 | Set tags ${tag} 21 | Log Tag assigned! WARN 22 | I should feel rested 23 | Log Feeling well! WARN -------------------------------------------------------------------------------- /src/test_archiver/version.py: -------------------------------------------------------------------------------- 1 | # import os 2 | 3 | # ARCHIVER_VERSION = "3.0.0" 4 | 5 | 6 | # def dynamic_package_version(): 7 | # version = ARCHIVER_VERSION 8 | # build_number = None 9 | # try: 10 | # build_number = os.environ['BUILD_NUMBER_FOR_DEV_PACKAGE_VERSION'] 11 | # except KeyError: 12 | # pass 13 | 14 | # if build_number: 15 | # # Not an official release 16 | # if 'dev' not in version: 17 | # # If not dev then release candidate 18 | # version += 'rc' 19 | # version += build_number 20 | 21 | # return version 22 | 23 | from pathlib import Path 24 | 25 | DEFAULT_VERSION = "0.0.1" 26 | 27 | version_file = Path(__file__).parent / 'version.txt' 28 | ARCHIVER_VERSION = version_file.read_text().strip() if version_file.exists() else DEFAULT_VERSION 29 | -------------------------------------------------------------------------------- /robot_tests/tests/top_suite/Logging.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Resource ../../resources/common_keywords.robot 3 | Test Tags logging 4 | 5 | *** Test cases *** 6 | Log a trace message 7 | Log There is some trace DEBUG 8 | 9 | Log a debug message 10 | Log Good luck debugging this DEBUG 11 | 12 | Log a info message 13 | Log Here is some info for you: foo INFO 14 | Log Here is some info for you: foo TRACE 15 | Log Here is some info for you: foo INFO 16 | 17 | Log a warning message 18 | Log This is the last warning! WARN 19 | 20 | Log a error message 21 | [Tags] error 22 | Log A grave error has been made... but on purpose. ERROR 23 | 24 | Log a fail message 25 | [Tags] failing should-fail 26 | Fail This test has been utterly failed 27 | -------------------------------------------------------------------------------- /pytest/fixture_tests/test_setups_and_teardowns.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | class TestClassWithFailingTestSetUp(unittest.TestCase): 4 | 5 | def setUp(self): 6 | assert False 7 | 8 | def test_something(self): 9 | pass 10 | 11 | class TestClassWithFailingTestTearDownAndPassingTests(unittest.TestCase): 12 | 13 | def test_something(self): 14 | pass 15 | 16 | def test_something_else(self): 17 | pass 18 | 19 | def tearDown(self): 20 | assert False 21 | 22 | class TestClassWithFailingTestTearDownAndFailingTests(unittest.TestCase): 23 | 24 | def test_something(self): 25 | assert False 26 | 27 | def test_something_else(self): 28 | assert False 29 | 30 | def tearDown(self): 31 | assert False 32 | 33 | @classmethod 34 | def tearDownClass(cls): 35 | assert False 36 | 37 | -------------------------------------------------------------------------------- /robot_tests/tests/randomized_suite/flaky.robot: -------------------------------------------------------------------------------- 1 | 2 | *** Variables *** 3 | ${VAL}= true 4 | 5 | 6 | *** Test Cases *** 7 | Passing test 8 | Log This test shall pass 9 | 10 | Choose fail 11 | Should be true '${VAL}' == 'true' 12 | 13 | Choose fail with WARN 14 | Log Warning WARN 15 | Should be true '${VAL}' == 'true' 16 | 17 | Counterwise fail 18 | Set Tags should-fail 19 | Log Sorry, but cant do anything WARN 20 | Should be true '${VAL}' == 'false' 21 | 22 | Always fails 23 | Set Tags should-fail 24 | Fail Test will fail 25 | Always fails with tag 26 | Set Tags should-fail 27 | Fail Test will fail should-fail 28 | -------------------------------------------------------------------------------- /pytest/README.md: -------------------------------------------------------------------------------- 1 | # pytets-JUnit fixture tests 2 | 3 | These are fixture tests designed to produce test inputs for the pytest-junit parser using pytest testing library and mocha-junit-reporter module. 4 | 5 | The examples here can also be used as example for using TestArchiver with [pytest](https://docs.pytest.org/) 6 | 7 | [producing junit xml files](https://docs.pytest.org/en/6.2.x/usage.html#creating-junitxml-format-files) 8 | 9 | [How to insert metadata](https://docs.pytest.org/en/6.2.x/usage.html#record-testsuite-property) 10 | 11 | ## Runing fixture tests and producing xml report 12 | Pytest is included in the dev dependencies of the project and helpers for running and parsing the pytest fixture. 13 | 14 | To just generate the fixture xml: 15 | ``` 16 | pdm pytest_fixture_run 17 | ``` 18 | 19 | To both generate and parse fixture xml: 20 | ``` 21 | pdm pytest_fixture_populate 22 | ``` 23 | -------------------------------------------------------------------------------- /robot_tests/tests/top_suite/lower_suite/tagging.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Test Tags R2-D2 3 | Default Tags Robot C-3PO 4 | 5 | *** Variables *** 6 | ${HOST} human 7 | 8 | *** Test Cases *** 9 | No own tags 10 | [Documentation] This test has tags Robot, C-3PO and R2-D2. 11 | No Operation 12 | 13 | With own tags 14 | [Documentation] This test has tags not_ready, Robot-v2 and R2-D2. 15 | [Tags] Robot-v2 not_ready 16 | No Operation 17 | 18 | Own tags with variables 19 | [Documentation] This test has tags hooman-human and R2-D2. 20 | [Tags] hooman-${HOST} 21 | No Operation 22 | 23 | Empty own tags 24 | [Documentation] This test has only tag R2-D2. 25 | [Tags] 26 | No Operation 27 | 28 | Set Tags and Remove Tags Keywords 29 | [Documentation] This test has tags mytag and C-3PO. 30 | Set Tags mytag 31 | Remove Tags Robot R2* -------------------------------------------------------------------------------- /src/test_archiver/schemas/migrations/postgres/0003-test_run_mapping_cascade.sql: -------------------------------------------------------------------------------- 1 | -- 1. ----------------- 2 | -- Adds ON DELETE CASCADE for test series mappings 3 | ALTER TABLE test_series_mapping 4 | DROP CONSTRAINT test_series_mapping_series_fkey, 5 | ADD CONSTRAINT test_series_mapping_series_fkey 6 | FOREIGN KEY (series) 7 | REFERENCES test_series(id) 8 | ON DELETE CASCADE; 9 | 10 | ALTER TABLE test_series_mapping 11 | DROP CONSTRAINT test_series_mapping_test_run_id_fkey, 12 | ADD CONSTRAINT test_series_mapping_test_run_id_fkey 13 | FOREIGN KEY (test_run_id) 14 | REFERENCES test_run(id) 15 | ON DELETE CASCADE; 16 | 17 | -- 2. ----------------- 18 | -- Udate tree_hierarchy table call index as integer as it should be 19 | ALTER TABLE tree_hierarchy 20 | ALTER COLUMN call_index SET DATA TYPE int USING call_index::int; 21 | 22 | 23 | INSERT INTO schema_updates (schema_version, applied_by) 24 | VALUES (3, '{applied_by}'); 25 | -------------------------------------------------------------------------------- /pytest/fixture_tests/test_class_setups_and_teardowns.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | class TestClassWithFailingClassSetUp(unittest.TestCase): 4 | 5 | @classmethod 6 | def setUpClass(cls): 7 | assert False 8 | 9 | def test_something(self): 10 | pass 11 | 12 | def test_something_else(self): 13 | pass 14 | 15 | class TestClassWithFailingClassTearDownAndPassingTests(unittest.TestCase): 16 | 17 | @classmethod 18 | def tearDownClass(cls): 19 | assert False 20 | 21 | def test_something(self): 22 | pass 23 | 24 | def test_something_else(self): 25 | pass 26 | 27 | class TestClassWithFailingClassTearDownAndFailingTests(unittest.TestCase): 28 | 29 | @classmethod 30 | def tearDownClass(cls): 31 | assert False 32 | 33 | def test_something(self): 34 | assert False 35 | 36 | def test_something_else(self): 37 | assert False 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /robot_tests/tests/top_suite/Data-Driven.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Test Template Data driven Template 3 | Test Tags data-driven 4 | 5 | *** Test Cases *** First value Second value 6 | First passes 1 1 7 | Second passes 1 1 8 | Third passes and warns 1 2 9 | Fourth fails 2 1 10 | 11 | 12 | 13 | *** Keywords *** 14 | Data driven Template 15 | [Arguments] ${fval} ${sval} 16 | Run Keyword If ${fval}==${sval} Log Passed 17 | ... ELSE IF ${fval}<${sval} Log Warning WARN 18 | ... ELSE IF ${fval}>${sval} Fail Test failed 19 | 20 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MESSAGES CONTROL] 2 | disable=missing-docstring 3 | 4 | [FORMAT] 5 | max-line-length=110 6 | max-module-lines=1500 7 | 8 | 9 | [DESIGN] 10 | 11 | # Maximum number of arguments for function / method. 12 | max-args=20 13 | 14 | # Maximum number of attributes for a class (see R0902). 15 | max-attributes=100 16 | 17 | # Maximum number of boolean expressions in an if statement. 18 | max-bool-expr=5 19 | 20 | # Maximum number of branch for function / method body. 21 | max-branches=15 22 | 23 | # Maximum number of locals for function / method body. 24 | max-locals=15 25 | 26 | # Maximum number of parents for a class (see R0901). 27 | max-parents=7 28 | 29 | # Maximum number of public methods for a class (see R0904). 30 | max-public-methods=100 31 | 32 | # Maximum number of return / yield for function / method body. 33 | max-returns=6 34 | 35 | # Maximum number of statements in function / method body. 36 | max-statements=50 37 | 38 | # Minimum number of public methods for a class (see R0903). 39 | min-public-methods=0 40 | -------------------------------------------------------------------------------- /.github/workflows/publish.yaml: -------------------------------------------------------------------------------- 1 | name: Publish Testarchiver to Pypi 2 | 3 | on: 4 | push: 5 | tags: 6 | - '[0-9]+.[0-9]+.[0-9]+' 7 | jobs: 8 | build-and-publish: 9 | name: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@master 13 | with: 14 | fetch-depth: 0 15 | - name: Set up Python 3.12 16 | uses: actions/setup-python@v2 17 | with: 18 | python-version: 3.12 19 | - name: Install PDM 20 | run: pip install --upgrade pdm 21 | - name: Install project 22 | run: pdm install 23 | 24 | - name: Test scripts 25 | run: | 26 | pdm run testarchiver --version 27 | pdm run testarchive_schematool --version 28 | git status 29 | 30 | - name: Build package 31 | run: pdm build 32 | 33 | - name: Publish package 34 | run: pdm publish --no-build 35 | env: 36 | PDM_PUBLISH_USERNAME: __token__ 37 | PDM_PUBLISH_PASSWORD: ${{ secrets.pypi_api_token }} 38 | -------------------------------------------------------------------------------- /src/test_archiver/schemas/migrations/postgres/0001-schema_update_table_and_log_message_index.sql: -------------------------------------------------------------------------------- 1 | -- Start versioning the schema and record updates 2 | CREATE TABLE schema_updates ( 3 | id serial PRIMARY KEY, 4 | schema_version int UNIQUE NOT NULL, 5 | applied_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, 6 | initial_update boolean DEFAULT false, 7 | applied_by text 8 | ); 9 | -- Pre 2.0 Schema is version 0 10 | INSERT INTO schema_updates (schema_version, initial_update, applied_by) 11 | VALUES (0, true, '{applied_by}'); 12 | 13 | -- Add schema_version column to test_runs. 14 | -- Makes the schema incompatible for older versions of TestArchiver on purpose. 15 | ALTER TABLE test_run ADD COLUMN schema_version int 16 | REFERENCES schema_updates(schema_version) NOT NULL DEFAULT 0; 17 | ALTER TABLE test_run ALTER COLUMN schema_version SET DEFAULT NULL; 18 | 19 | -- Adds missing index for log_message table 20 | CREATE INDEX test_log_message_index ON log_message(test_run_id, suite_id, test_id); 21 | 22 | INSERT INTO schema_updates (schema_version, applied_by) 23 | VALUES (1, '{applied_by}'); 24 | -------------------------------------------------------------------------------- /robot_tests/tests/randomized_suite/bigrandom.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Test Template random-driven 3 | Test Tags data-driven randomized 4 | 5 | 6 | *** Test Cases *** Line value Max value 7 | First random 20 50 8 | Second random 30 50 9 | Third random 25 50 10 | Fourth random 15 50 11 | Fifth random 13 100 12 | Sixth random 60 100 13 | Sevenh random 30 100 14 | Eigth random 500 1000 15 | Ninth random 100 1200 16 | Tenth random 120 300 17 | 18 | 19 | *** Keywords *** 20 | random-driven 21 | [Arguments] ${lval} ${maxval} 22 | ${random}= Evaluate random.randint(1, ${maxval}) random 23 | Should Be True ${random} > ${lval} 24 | -------------------------------------------------------------------------------- /robot_tests/tests/control_structures/if_else.robot: -------------------------------------------------------------------------------- 1 | 2 | *** Test Cases *** 3 | If And Else 4 | Analyse Value ${1} 5 | Analyse Value ${-1} 6 | Analyse Value ${0} 7 | Analyse Value foo 8 | 9 | Nested If And Else 10 | Log items 11 | Log items xxx log_values=False 12 | Log items a b c 13 | 14 | 15 | *** Keywords *** 16 | Analyse Value 17 | [Arguments] ${value} 18 | IF $value > 0 19 | Log Positive value 20 | ELSE IF $value < 0 21 | Log Negative value 22 | ELSE IF $value == 0 23 | Log Zero value 24 | ELSE 25 | Fail Unexpected value: ${value} 26 | END 27 | 28 | Log items 29 | [Arguments] @{items} ${log_values}=True 30 | IF not ${items} 31 | Log No items. 32 | ELSE IF len(${items}) == 1 33 | IF ${log_values} 34 | Log One item: ${items}[0] 35 | ELSE 36 | Log One item. 37 | END 38 | ELSE 39 | Log ${{len(${items})}} items. 40 | IF ${log_values} 41 | FOR ${index} ${item} IN ENUMERATE @{items} start=1 42 | Log Item ${index}: ${item} 43 | END 44 | END 45 | END 46 | 47 | -------------------------------------------------------------------------------- /phpunit_tests/README.md: -------------------------------------------------------------------------------- 1 | # Phpunit fixture tests 2 | 3 | These are fixture tests designed to produce test inputs for the php-junit parser using the Phpunit testing library. 4 | 5 | The examples here can also be used as example for using TestArchiver with phpunit 6 | 7 | ## Required modules 8 | 9 | [phpunit](https://phpunit.readthedocs.io/en/9.3/index.html) 10 | 11 | The Phpunit package can be either downloaded as a phar or installed through composer. 12 | 13 | 14 | ``` 15 | ➜ wget -O phpunit https://phar.phpunit.de/phpunit-7.phar 16 | 17 | ➜ chmod +x phpunit 18 | 19 | ➜ ./phpunit --version 20 | ``` 21 | 22 | The phpunit executable can then be moved to /usr/local/bin if you want it in path. 23 | 24 | To install with composer 25 | 26 | ``` 27 | ➜ composer require --dev phpunit/phpunit ^7 28 | 29 | ➜ ./vendor/bin/phpunit --version 30 | ``` 31 | 32 | [Options for phpunit, contains logging](https://phpunit.readthedocs.io/en/9.3/textui.html) 33 | 34 | 35 | ## Runing tests and producing xml report 36 | 37 | ``` 38 | phpunit --log-junit output.xml fixture_tests 39 | ``` 40 | 41 | As Phpunit does not contain a timestamp for the time of test execution it is added with the parser, thus the timestamp in epimetheus for example refers to the time of testarchiver execution. 42 | -------------------------------------------------------------------------------- /robot_tests/tests/top_suite/lower_suite/embedded.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Test Tags embedded loops 3 | 4 | *** Test Cases *** 5 | Normal test case with embedded arguments 6 | The result of 1 + 5 should be 6 7 | The result of 1 + 6 should be 7 8 | 9 | Template with embedded arguments 10 | [Template] The result of ${calculation} should be ${expected} 11 | 1 + 1 2 12 | 1 + 2 3 13 | 14 | Template and for loops 15 | [Template] Another template 16 | FOR ${item} IN @{ITEMS} 17 | ${item} Robot 18 | END 19 | 20 | *** Keywords *** 21 | The result of ${calculation} should be ${expected} 22 | ${result} = Evaluate ${calculation} 23 | Log ${result} 24 | Should Be Equal As Integers ${result} ${expected} 25 | 26 | Another template 27 | [arguments] ${first_arg} ${second_arg} 28 | Log ${first_arg}, ${second_arg} WARN 29 | 30 | *** Variables *** 31 | @{ITEMS} = r o b o t 32 | -------------------------------------------------------------------------------- /robot_tests/tests/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## loop_suite 3 | Contains two robot suites using for-loop. 4 | 5 | `for_tests.robot` Different ways to use loops. 6 | 7 | `loops.robot` Loop generating random strings. 8 | ## randomized_suite 9 | Contains three suites, which are meant to produce randomized fails. 10 | 11 | `bigrandom.robot` Ten tests with different passing rates. 12 | 13 | `flaky.robot` Test generating fails with different ways. 14 | 15 | `random_pass.robot` Generates random passes. 16 | ## sleep_suite 17 | Contains two suites, that generates also timedata for log. 18 | 19 | `Behavior-driven.robot` Behavior-Driven sleep tests. 20 | 21 | `sleeper.robot` Data-Driven sleep tests. 22 | ## top_suite 23 | Upper suite for some basic tests. 24 | 25 | `Data-Driven.robot` Example of Data-Driven design on tests. 26 | 27 | `Failing_tests.robot` Three test that produces fail. 28 | 29 | `Logging.robot` Example of different types of logging. 30 | 31 | `Passing_tests.robot` Pass and use things. 32 | ### lower_suite 33 | Child suite for top_suite 34 | 35 | `documents.robot` Test generating documentation. 36 | 37 | `embedded.robot` Embedded example tests. 38 | 39 | `empty.robot` "Stupid" tests that pass, but teardown breaks. 40 | 41 | `tagging.robot` Example tests doing tagging. 42 | -------------------------------------------------------------------------------- /.github/workflows/basic_pipeline.yaml: -------------------------------------------------------------------------------- 1 | name: Basic pipeline 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | test-and-build: 7 | 8 | runs-on: ubuntu-latest 9 | strategy: 10 | fail-fast: false 11 | matrix: 12 | python-version: ['3.9', '3.10', '3.11', '3.12'] 13 | 14 | steps: 15 | # Static analysis and unittests 16 | - uses: actions/checkout@master 17 | - name: Set up Python ${{ matrix.python-version }} 18 | uses: actions/setup-python@v2 19 | with: 20 | python-version: ${{ matrix.python-version }} 21 | - name: Install PDM 22 | run: pip install --upgrade pdm 23 | - name: Install project 24 | run: pdm install 25 | - name: Lint with pylint 26 | run: pdm lint 27 | - name: Run unittests 28 | run: pdm utest 29 | - name: Run integration tests (sqlite) 30 | run: pdm itest -k RobotFixtureArchivingSqliteTests 31 | 32 | - name: Test scripts 33 | run: | 34 | pdm run testarchiver --version 35 | pdm run testarchive_schematool --version 36 | 37 | - name: Build distribution 38 | run: pdm build 39 | 40 | # Test parsing different test fixtures 41 | - name: Run and parse robot fixtures 42 | run: pdm robot_fixture_populate 43 | 44 | - name: Run and parse pytest fixtures 45 | run: pdm pytest_fixture_populate 46 | -------------------------------------------------------------------------------- /src/test_archiver/schemas/migrations/sqlite/0003-test_run_mapping_cascade.sql: -------------------------------------------------------------------------------- 1 | -- 1. ----------------- 2 | -- Adds ON DELETE CASCADE for test series mappings 3 | -- Only approach with SQLite3 is to rewrite the table definition 4 | PRAGMA writable_schema=1; 5 | UPDATE sqlite_master SET sql= 6 | 'CREATE TABLE test_series_mapping ( 7 | series int REFERENCES test_series(id) ON DELETE CASCADE, 8 | test_run_id int REFERENCES test_run(id) ON DELETE CASCADE, 9 | build_number int NOT NULL, 10 | build_id text, 11 | PRIMARY KEY (series, test_run_id, build_number) 12 | );' 13 | WHERE type='table' AND name='test_series_mapping'; 14 | PRAGMA writable_schema=0; 15 | 16 | -- 2. ----------------- 17 | -- Udate tree_hierarchy table call index as integer as it should be 18 | CREATE TABLE new_tree_hierarchy ( 19 | fingerprint text REFERENCES keyword_tree(fingerprint), 20 | subtree text REFERENCES keyword_tree(fingerprint), 21 | call_index int, 22 | PRIMARY KEY (fingerprint, subtree, call_index) 23 | ); 24 | -- Old data to copy of table 25 | INSERT INTO new_tree_hierarchy(fingerprint, subtree, call_index) 26 | SELECT fingerprint, subtree, CAST(call_index AS INTEGER) 27 | FROM tree_hierarchy; 28 | -- Drop old table and rename the new 29 | DROP TABLE tree_hierarchy; 30 | ALTER TABLE new_tree_hierarchy RENAME TO tree_hierarchy; 31 | 32 | 33 | INSERT INTO schema_updates (schema_version, applied_by) 34 | VALUES (3, '{applied_by}'); 35 | -------------------------------------------------------------------------------- /tests/unit/test_change_engine.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from unittest.mock import Mock 3 | 4 | from test_archiver.archiver_listeners import ChangeEngineListener 5 | 6 | 7 | @pytest.fixture 8 | def listener(): 9 | mock_archiver = Mock() 10 | mock_archiver.test_type = "something" 11 | mock_archiver.repository = "somewhere" 12 | engine = ChangeEngineListener(mock_archiver, "tidii") 13 | return engine 14 | 15 | 16 | def test_change_engine_listener_test_filter_skipped(listener): 17 | test1 = Mock() 18 | test1.full_name = "pytest.test_suite.test_a1" 19 | test1.status = "PASS" 20 | test2 = Mock() 21 | test2.full_name = "pytest.test_suite.test_a2" 22 | test2.status = "SKIPPED" 23 | tests = listener._filter_tests([test1, test2]) 24 | assert len(tests) == 1, "Skipped test should be filtered out." 25 | test = tests[0] 26 | assert test["name"] == "pytest.test_suite.test_a1" 27 | 28 | 29 | def test_change_engine_listener_test_filter_pass_and_fail(listener): 30 | test1 = Mock() 31 | test1.full_name = "pytest.test_suite.test_a1" 32 | test1.status = "PASS" 33 | test2 = Mock() 34 | test2.full_name = "pytest.test_suite.test_a2" 35 | test2.status = "FAIL" 36 | tests = listener._filter_tests([test1, test2]) 37 | assert len(tests) == 2, "Skipped test should be filtered out." 38 | test = tests[0] 39 | assert test["name"] == "pytest.test_suite.test_a1" 40 | test = tests[1] 41 | assert test["name"] == "pytest.test_suite.test_a2" 42 | -------------------------------------------------------------------------------- /src/test_archiver/schemas/migrations/sqlite/0001-schema_update_table_and_log_message_index.sql: -------------------------------------------------------------------------------- 1 | -- Start versioning the schema and record updates 2 | CREATE TABLE schema_updates ( 3 | id integer PRIMARY KEY AUTOINCREMENT, 4 | schema_version int UNIQUE NOT NULL, 5 | applied_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, 6 | initial_update boolean DEFAULT false, 7 | applied_by text 8 | ); 9 | -- Pre 2.0 Schema is version 0 10 | INSERT INTO schema_updates (schema_version, initial_update, applied_by) 11 | VALUES (0, 1, '{applied_by}'); 12 | 13 | -- Add schema_version column to test_runs. 14 | -- Makes the schema incompatible for older versions of TestArchiver on purpose. 15 | ALTER TABLE test_run ADD COLUMN schema_version int DEFAULT 0; 16 | CREATE TABLE temp_test_run ( 17 | id integer PRIMARY KEY AUTOINCREMENT, 18 | imported_at timestamp DEFAULT CURRENT_TIMESTAMP, 19 | archived_using text, 20 | archiver_version text, 21 | generator text, 22 | generated timestamp, 23 | rpa boolean, 24 | dryrun boolean, 25 | ignored boolean DEFAULT false, 26 | schema_version int REFERENCES schema_updates(schema_version) NOT NULL 27 | ); 28 | INSERT INTO temp_test_run SELECT * FROM test_run; 29 | DROP TABLE test_run; 30 | ALTER TABLE temp_test_run RENAME TO test_run; 31 | 32 | -- Adds missing index for log_message table 33 | CREATE INDEX test_log_message_index ON log_message(test_run_id, suite_id, test_id); 34 | 35 | INSERT INTO schema_updates (schema_version, applied_by) 36 | VALUES (1, '{applied_by}'); 37 | -------------------------------------------------------------------------------- /robot_tests/README.md: -------------------------------------------------------------------------------- 1 | # Basic usage with Robot Framework 2 | 3 | ## With output parser 4 | 5 | The robot output.xml files can be imported using `testarchiver`. 6 | 7 | ``` 8 | testarchiver --format robot --database test_archive.db output.xml 9 | ``` 10 | This should create a SQLite database file named `test_archive.db` that contains the results. 11 | 12 | See main README.md for more details. 13 | 14 | ## With Robot listener 15 | 16 | The project includes a listener that allows archiving the results using Robot Frameworks [Listener interface](https://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#listener-interface) 17 | 18 | ``` 19 | robot --listener test_archiver.ArchiverRobotListener:test_archive.db:sqlite3 my_tests.robot 20 | ``` 21 | 22 | This should create a SQLite database file named `test_archive.db` that contains the results. 23 | 24 | Arguments for ArchiverRobotListener: 25 | `test_archiver.ArchiverRobotListener:DBNAME_OR_CONFIG:DBNEGINE[:DBUSER[:DBPASSWORD[:DBHOST:[DBPORT]]]]` 26 | 27 | ## Fixture tests 28 | 29 | The tests in this folder are simple ones demonstrating some features of Robot Framework. They can be used to generate data for TestArchiver. 30 | 31 | Integration tests use these robot tests as the test material. `pdm itest` 32 | 33 | ### Usage 34 | 35 | To run robot fixture test run `pdm robot_fixture_run`. The ouput is stored to `robot_tests/normal/` directory. 36 | 37 | To run robot fixture test with ArchiverRobotListener run `pdm robot_fixture_run_with_listener`. The ouput is stored to `robot_tests/listener/` directory. 38 | -------------------------------------------------------------------------------- /tests/unit/test_ouput_parser.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from unittest.mock import Mock 3 | 4 | from test_archiver import configs, archiver 5 | from test_archiver.output_parser import ( 6 | XUnitOutputParser, 7 | JUnitOutputParser, 8 | MochaJUnitOutputParser, 9 | PytestJUnitOutputParser, 10 | MSTestOutputParser 11 | ) 12 | 13 | 14 | @pytest.fixture(scope="module") 15 | def mock_archiver(): 16 | mock_db = Mock() 17 | config = configs.Config(file_config={}) 18 | return archiver.Archiver(mock_db, config) 19 | 20 | @pytest.fixture 21 | def xunit(mock_archiver): 22 | return XUnitOutputParser(mock_archiver) 23 | 24 | 25 | @pytest.fixture 26 | def junit(mock_archiver): 27 | return JUnitOutputParser(mock_archiver) 28 | 29 | 30 | @pytest.fixture 31 | def mocha_junit(mock_archiver): 32 | return MochaJUnitOutputParser(mock_archiver) 33 | 34 | 35 | @pytest.fixture 36 | def pytest_junit(mock_archiver): 37 | return PytestJUnitOutputParser(mock_archiver) 38 | 39 | 40 | @pytest.fixture 41 | def mstest(mock_archiver): 42 | return MSTestOutputParser(mock_archiver) 43 | 44 | 45 | def test_xunit_has_test_type(xunit): 46 | assert xunit.archiver.test_type == "xunit" 47 | 48 | 49 | def test_junit_has_test_type(junit): 50 | assert junit.archiver.test_type == "junit" 51 | 52 | 53 | def test_mocha_junit_has_test_type(mocha_junit): 54 | assert mocha_junit.archiver.test_type == "mocha-junit" 55 | 56 | 57 | def test_pytest_junit_has_test_type(pytest_junit): 58 | assert pytest_junit.archiver.test_type == "pytest-junit" 59 | 60 | 61 | def test_mstest_has_test_type(mstest): 62 | assert mstest.archiver.test_type == "mstest" 63 | -------------------------------------------------------------------------------- /phpunit_tests/fixture_tests/DependenciesTest.php: -------------------------------------------------------------------------------- 1 | assertTrue(false); 10 | } 11 | 12 | public function testPass(): void 13 | { 14 | $this->assertTrue(true); 15 | } 16 | 17 | public function testPassWithParam(): string 18 | { 19 | $this->assertTrue(true); 20 | return 'first'; 21 | } 22 | /** 23 | * @depends testPass 24 | */ 25 | public function testDependencyWithPass(): void 26 | { 27 | $this->assertTrue(true); 28 | } 29 | 30 | /** 31 | * @depends testFail 32 | */ 33 | public function testDependencyWithFail(): void 34 | { 35 | $this->assertTrue(true); 36 | } 37 | 38 | /** 39 | * @depends testPassWithParam 40 | */ 41 | public function testDependencyVariableWithPass($first): void 42 | { 43 | $this->assertSame('first', $first); 44 | } 45 | 46 | public function failingAdditionsData(): array 47 | { 48 | return [ 49 | [0, 0, 0], 50 | [0, 1, 1], 51 | [1, 0, 1], 52 | [1, 1, 3] 53 | ]; 54 | } 55 | 56 | public function passingAdditionData(): array 57 | { 58 | return [ 59 | [0, 0, 0], 60 | [0, 1, 1], 61 | [1, 0, 1], 62 | [1, 1, 2] 63 | ]; 64 | } 65 | 66 | /** 67 | * @dataProvider failingAdditionsData 68 | */ 69 | public function testDatadependencyWithFail(): void 70 | { 71 | $this->assertSame($expected, $a + $b); 72 | } 73 | 74 | /** 75 | * @dataProvider passingAdditionData 76 | */ 77 | public function testDatadependencyWithPass(): void 78 | { 79 | $this->assertSame($expected, $a + $b); 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /helpers/diff2change_context_list.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | import re 5 | 6 | DESCRIPTION = """ 7 | Tool for listing changed files and/or code contexts from a git diff. 8 | The output can be used as test run metadata or input for ChangeEngine. 9 | """ 10 | 11 | USAGE_EXAMPLE = """ 12 | Example usage: git diff | diff2change_context_list.py 13 | """ 14 | 15 | DEFAULT_CONTEXT_SEPARATOR = '|-|' 16 | FILE_PATTERN = r"^diff --git a/(.+) b/(.+)$" 17 | BLOCK_PATTERN = r"^@@ [-\+0-9,]+ [-\+0-9,]+ @@ (.+)$" 18 | 19 | 20 | def main(): 21 | args = argument_parser().parse_args() 22 | 23 | changes = set() 24 | current_file = None 25 | 26 | line = input() 27 | while True: 28 | match = re.search(FILE_PATTERN, line) 29 | if match: 30 | old_file = match.group(1) 31 | if args.files: 32 | changes.add(old_file) 33 | current_file = match.group(2) 34 | if args.files: 35 | changes.add(current_file) 36 | 37 | if args.change_context: 38 | match = re.search(BLOCK_PATTERN, line) 39 | if match: 40 | change_context = match.group(1) 41 | if change_context: 42 | changes.add('{}{}{}'.format(current_file, args.separator, change_context)) 43 | 44 | try: 45 | line = input() 46 | except EOFError: 47 | break 48 | 49 | for change in sorted(list(changes)): 50 | print(change) 51 | 52 | 53 | def argument_parser(): 54 | parser = argparse.ArgumentParser(description=DESCRIPTION, epilog=USAGE_EXAMPLE) 55 | parser.add_argument('--no-files', dest='files', action='store_false', 56 | help='Do not list files') 57 | parser.add_argument('--no-change-context', dest='change_context', action='store_false', 58 | help='Do not list change contexts') 59 | parser.add_argument('--separator', dest='separator', default=DEFAULT_CONTEXT_SEPARATOR, 60 | help='Context separator string used between file name and context') 61 | return parser 62 | 63 | 64 | if __name__ == '__main__': 65 | main() 66 | -------------------------------------------------------------------------------- /robot_tests/tests/control_structures/for_loops.robot: -------------------------------------------------------------------------------- 1 | 2 | *** Variables *** 3 | @{ROW1} yksi kaksi kolme 4 | @{ROW2} one two three 5 | @{TABLE} ${ROW1} ${ROW2} 6 | 7 | @{CHARACTERS} a b c d f 8 | @{NUMBERS} 1 2 3 9 | 10 | *** Test Cases *** 11 | Simple For Loop 12 | FOR ${animal} IN cat dog 13 | No Operation 14 | Log ${animal} 15 | Log 2nd keyword 16 | END 17 | Log Outside loop 18 | 19 | Nested For Loop 20 | FOR ${row} IN @{table} 21 | FOR ${cell} IN @{row} 22 | Log ${cell} 23 | END 24 | END 25 | 26 | Multiple Loop Variables 27 | FOR ${index} ${english} ${finnish} IN 28 | ... 1 cat kissa 29 | ... 2 dog koira 30 | ... 3 horse hevonen 31 | Log ${english},${finnish},${index} 32 | END 33 | 34 | FOR-IN-ENUMERATE With Start 35 | FOR ${index} ${item} IN ENUMERATE @{ROW1} start=1 36 | Log ${index}, ${item} 37 | END 38 | 39 | Variable conversion 40 | # TODO: output.xml will omit the third value and leaves listener and parsing inconsistent 41 | # FOR ${value: bytes} IN Hello! Hyvä! \x00\x00\x07 42 | # Log ${value} formatter=repr 43 | # END 44 | FOR ${index} ${date: date} IN ENUMERATE 2023-06-15 2025-05-30 today 45 | Log ${date} formatter=repr 46 | END 47 | FOR ${item: tuple[str, date]} IN ENUMERATE 2023-06-15 2025-05-30 today 48 | Log ${item} formatter=repr 49 | END 50 | 51 | BREAK with FOR 52 | ${text} = Set Variable zero 53 | FOR ${var} IN one two three 54 | IF '${var}' == 'two' BREAK 55 | ${text} = Set Variable ${text}-${var} 56 | END 57 | Should Be Equal ${text} zero-one 58 | 59 | CONTINUE with FOR 60 | ${text} = Set Variable zero 61 | FOR ${var} IN one two three 62 | IF '${var}' == 'two' CONTINUE 63 | ${text} = Set Variable ${text}-${var} 64 | END 65 | Should Be Equal ${text} zero-one-three 66 | -------------------------------------------------------------------------------- /mocha_tests/fixture_suite.js: -------------------------------------------------------------------------------- 1 | const assert = require('assert'); 2 | 3 | // Mapping example 4 | describe('Suite', function(){ 5 | context('Subsuite', function() { 6 | it('Test case', function() {}); 7 | }); 8 | }); 9 | 10 | 11 | describe('Suite with logging', function(){ 12 | it('Test with logging', function() { 13 | this.test.consoleOutputs = [ 'This is normal logging' ]; 14 | this.test.consoleErrors = [ 'This is ERROR logging' ]; 15 | }); 16 | }); 17 | 18 | describe('Suite with a failing test', function(){ 19 | it('Failing test', function() { 20 | assert.strictEqual(1, 2); 21 | }); 22 | }); 23 | 24 | 25 | describe('Suite with failing setups and teardowns', function(){ 26 | context('Suite with failing setup', function() { 27 | before( function(){ 28 | assert.strictEqual(1, 2); 29 | }); 30 | it('Test 1', function() {}); 31 | it('Test 2', function() {}); 32 | }); 33 | 34 | context('Suite with failing teardown', function() { 35 | after( function(){ 36 | assert.strictEqual(1, 2); 37 | }); 38 | it('Test 1', function() {}); 39 | it('Test 2', function() {}); 40 | }); 41 | 42 | context('Suite with both failing setup and teardown', function() { 43 | before( function(){ 44 | assert.strictEqual(1, 2); 45 | }); 46 | after( function(){ 47 | assert.strictEqual(1, 2); 48 | }); 49 | it('Test 1', function() {}); 50 | it('Test 2', function() {}); 51 | }); 52 | 53 | context('Suite with failing test setup', function() { 54 | beforeEach( function(){ 55 | assert.strictEqual(1, 2); 56 | }); 57 | it('Test 1', function() {}); 58 | it('Test 2', function() {}); 59 | }); 60 | 61 | context('Suite with failing test teardown', function() { 62 | afterEach( function(){ 63 | assert.strictEqual(1, 2); 64 | }); 65 | it('Test 1', function() {}); 66 | it('Test 2', function() {}); 67 | }); 68 | 69 | context('Suite with both failing test setup and teardown', function() { 70 | beforeEach( function(){ 71 | assert.strictEqual(1, 2); 72 | }); 73 | afterEach( function(){ 74 | assert.strictEqual(1, 2); 75 | }); 76 | it('Test 1', function() {}); 77 | it('Test 2', function() {}); 78 | }); 79 | }); 80 | 81 | -------------------------------------------------------------------------------- /mocha_tests/README.md: -------------------------------------------------------------------------------- 1 | # Mocha-JUnit fixture tests 2 | These are fixture tests designed to produce test inputs for the mocha-junit parser using Mocha testing library and mocha-junit-reporter module. 3 | 4 | The examples here can also be used as exmple for using TestArchiver with Mocha tests 5 | 6 | ## Required npm modules 7 | [mocha-junit-reporter](https://www.npmjs.com/package/mocha-junit-reporter) 8 | ``` 9 | npm install mocha 10 | npm install mocha-junit-reporter 11 | ``` 12 | 13 | ## Runing tests and producing xml report 14 | These reporter options are required for optimal results when using the TestArchiver 15 | ``` 16 | mocha fixture_suite.js \ 17 | --reporter mocha-junit-reporter \ 18 | --reporter-options outputs=true \ 19 | --reporter-options useFullSuiteTitle=True \ 20 | --reporter-options suiteTitleSeparedBy="." \ 21 | --reporter-options testCaseSwitchClassnameAndName=True 22 | ``` 23 | 24 | ## Example configuration with Cypress 25 | 26 | Mocha-JUnit format can be used with [Cypress](https://github.com/cypress-io/cypress) testing framework. 27 | 28 | Add `mocha-junit-reporter` and `cypress-multi-reporters` to package.json devDependencies list. Example package.json config: 29 | 30 | ``` 31 | "devDependencies": { 32 | ... 33 | "mocha": "6.2.0", 34 | "cypress-multi-reporters": "1.2.1", 35 | "mocha-junit-reporter": "1.23.1" 36 | } 37 | ``` 38 | 39 | After npm package installation, configure cypress reporter options. These settings have been verified to work with TestArchiver in production use: 40 | 41 | ``` 42 | "reporter": "cypress-multi-reporters", 43 | "reporterOptions": { 44 | "reporterEnabled": "spec, mocha-junit-reporter", 45 | "mochaJunitReporterReporterOptions": { 46 | "mochaFile": "resultfolder/result-[hash].xml", 47 | "rootSuiteTitle": "Root Suite Name", 48 | "outputs": true, 49 | "testCaseSwitchClassnameAndName": true, 50 | "useFullSuiteTitle": true, 51 | "includePending": true, 52 | "suiteTitleSeparedBy": "." 53 | } 54 | }, 55 | ``` 56 | 57 | After cypress has completed with these settings, test result xml files should be stored in `resultfolder` directory. They can be imported using TestArchiver from there by using `--format mocha-junit` command line parameter. 58 | -------------------------------------------------------------------------------- /src/test_archiver/archiver_listeners.py: -------------------------------------------------------------------------------- 1 | import json 2 | from urllib.request import Request, urlopen 3 | 4 | 5 | class DefaultListener: 6 | def __init__(self, archiver): 7 | self.archiver = archiver 8 | self.suites = [] 9 | self.tests = [] 10 | 11 | def suite_result(self, suite): 12 | self.suites.append(suite) 13 | 14 | def test_result(self, test): 15 | self.tests.append(test) 16 | 17 | def end_run(self): 18 | pass 19 | 20 | 21 | class ChangeEngineListener(DefaultListener): 22 | 23 | def __init__(self, archiver, change_engine_url): 24 | super().__init__(archiver) 25 | self.change_engine_url = change_engine_url 26 | 27 | def end_run(self): 28 | self.report_changes(self.tests) 29 | 30 | def report_changes(self, tests): 31 | url = f"{self.change_engine_url}/result/" 32 | request = Request(url) 33 | request.add_header('Content-Type', 'application/json;') 34 | body = json.dumps(self._format_body(tests)) 35 | with urlopen(request, body.encode("utf-8")) as response: 36 | if response.getcode() != 200: 37 | print(f"ERROR: ChangeEngine update failed. Return code: {response.getcode()}") 38 | print(response.read()) 39 | 40 | def _filter_tests(self, tests): 41 | return [ 42 | { 43 | 'name': test.full_name, 44 | 'status': test.status, 45 | 'subtype': self.archiver.test_type, 46 | 'repository': self.archiver.repository 47 | } for test in tests if test.status != "SKIPPED" 48 | ] 49 | 50 | def _format_changes(self): 51 | metadata_changes = self._get_metadata_changes() 52 | if metadata_changes: 53 | return metadata_changes 54 | return self.archiver.changes 55 | 56 | def _get_metadata_changes(self): 57 | top_suite = self.suites[-1] 58 | changes = top_suite.metadata['changes'] if 'changes' in top_suite.metadata else None 59 | changes = changes.split('\n') if changes else [] 60 | return changes 61 | 62 | def _format_body(self, tests): 63 | return { 64 | "tests": self._filter_tests(tests), 65 | "changes": self._format_changes(), 66 | "context": self.archiver.execution_context, 67 | "execution_id": self.archiver.execution_id, 68 | } 69 | -------------------------------------------------------------------------------- /tests/unit/test_archive_listener.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from unittest.mock import Mock 3 | 4 | from test_archiver.archiver_listeners import ChangeEngineListener 5 | from test_configs import FAKE_CHANGES_FILE_DATA_2 6 | 7 | 8 | @pytest.fixture 9 | def listener(): 10 | mock_archiver = Mock() 11 | mock_archiver.test_type = 'something' 12 | mock_archiver.repository = 'somewhere' 13 | mock_archiver.execution_context = 'PR' 14 | mock_archiver.execution_id = 'job-name-here' 15 | engine = ChangeEngineListener(mock_archiver, 'tidii') 16 | mock_suite = Mock() 17 | mock_suite.metadata = {'changes': 'path/to/changes'} 18 | engine.suites = [mock_suite] 19 | return engine 20 | 21 | 22 | @pytest.fixture 23 | def listener_changes(listener): 24 | mock_suite = listener.suites.pop() 25 | mock_suite.metadata = {} 26 | listener.suites = [mock_suite] 27 | listener.archiver.changes = FAKE_CHANGES_FILE_DATA_2['changes'] 28 | return listener 29 | 30 | 31 | def test_change_engine_listener_test_filter_skipped(listener): 32 | test1 = Mock() 33 | test1.full_name = "pytest.test_suite.test_a1" 34 | test1.status = "PASS" 35 | test2 = Mock() 36 | test2.full_name = "pytest.test_suite.test_a2" 37 | test2.status = "SKIPPED" 38 | tests = listener._filter_tests([test1, test2]) 39 | assert len(tests) == 1, "Skipped test should be filtered out." 40 | test = tests[0] 41 | assert test["name"] == "pytest.test_suite.test_a1" 42 | 43 | 44 | def test_change_engine_listener_test_filter_pass_and_fail(listener): 45 | test1 = Mock() 46 | test1.full_name = "pytest.test_suite.test_a1" 47 | test1.status = "PASS" 48 | test2 = Mock() 49 | test2.full_name = "pytest.test_suite.test_a2" 50 | test2.status = "FAIL" 51 | tests = listener._filter_tests([test1, test2]) 52 | assert len(tests) == 2, "Skipped test should be filtered out." 53 | test = tests[0] 54 | assert test["name"] == "pytest.test_suite.test_a1" 55 | test = tests[1] 56 | assert test["name"] == "pytest.test_suite.test_a2" 57 | 58 | 59 | def test_change_engine_listener_execution_context(listener): 60 | test1 = Mock() 61 | test1.full_name = "pytest.test_suite.test_a1" 62 | test1.status = "PASS" 63 | body = listener._format_body([test1]) 64 | assert len(body) == 4, "tests, changes and context must be present" 65 | assert body["changes"] == ['path/to/changes'] 66 | assert body["tests"] 67 | assert body["context"] == "PR" 68 | assert body["execution_id"] == "job-name-here" 69 | 70 | 71 | def test_changes(listener_changes): 72 | test1 = Mock() 73 | test1.full_name = 'pytest.test_suite.test_a1' 74 | test1.status = 'PASS' 75 | body = listener_changes._format_body([test1]) 76 | assert len(body) == 4, 'tests, changes and context must be present' 77 | changes = body['changes'] 78 | assert len(changes) == 1 79 | change = changes[0] 80 | assert change['name'] == '/path/to/file.py' 81 | assert change['repository'] == 'RepoA' 82 | assert change['item_type'] == 'my_item_type' 83 | assert change['subtype'] == 'my_sub_item_type' 84 | assert body["execution_id"] == "job-name-here" 85 | -------------------------------------------------------------------------------- /src/test_archiver/ArchiverRobotListener.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=C0103 2 | # Module name "ArchiverRobotListener" doesn't conform to snake_case naming style (invalid-name) 3 | # Because Robot Framework needs it to have the same name as the listener class 4 | # pylint: disable=W0613 5 | # Listener methods have unused arguments 6 | 7 | from . import archiver, configs 8 | 9 | class ArchiverRobotListener: 10 | ROBOT_LISTENER_API_VERSION = 2 11 | 12 | def __init__(self, config_file_or_database, 13 | db_engine=None, user=None, pw=None, host=None, port=5432, adjust_with_system_timezone=False): 14 | # pylint: disable=too-many-positional-arguments 15 | config = configs.Config() 16 | if not db_engine: 17 | config.resolve(file_config=config_file_or_database) 18 | else: 19 | config.resolve(file_config={ 20 | 'database': config_file_or_database, 21 | 'db_engine': db_engine, 22 | 'user': user, 23 | 'password': pw, 24 | 'host': host, 25 | 'port': port, 26 | 'time_adjust_with_system_timezone': adjust_with_system_timezone}) 27 | 28 | database = archiver.database_connection(config) 29 | self.archiver = archiver.Archiver(database, config) 30 | self.archiver.test_type = "Robot Framework" 31 | self.rpa = False 32 | self.dry_run = False 33 | self.generator = None 34 | 35 | def start_suite(self, name, attrs): 36 | if not self.archiver.test_run_id: 37 | self.archiver.begin_test_run('ArchiverListener', 38 | None, 39 | self.generator, 40 | self.rpa, 41 | self.dry_run) 42 | self.archiver.begin_suite(name) 43 | 44 | def end_suite(self, name, attrs): 45 | self.archiver.end_suite(attrs) 46 | 47 | def start_test(self, name, attrs): 48 | self.archiver.begin_test(name) 49 | 50 | def end_test(self, name, attrs): 51 | self.archiver.end_test(attrs) 52 | 53 | def start_keyword(self, name, attrs): 54 | kw_type = attrs['type'] 55 | if kw_type in ('KEYWORD', 'SETUP', 'TEARDOWN', 'GROUP'): 56 | self.archiver.begin_keyword( 57 | attrs['kwname'] or kw_type, attrs['libname'], kw_type, attrs['assign'] + attrs['args']) 58 | return 59 | # The rest are control structures 60 | arguments = [attrs['condition']] if attrs.get('condition', None) else [] 61 | arguments += attrs['args'] 62 | variables = attrs.get('variables', None) 63 | if isinstance(variables, dict): 64 | # Flatten variables dict to list 65 | arguments.extend(list(sum(variables.items(), ()))) 66 | elif isinstance(variables, list): 67 | arguments.extend(variables) 68 | arguments.extend(attrs.get('values', [])) 69 | self.archiver.begin_keyword(kw_type, '', kw_type, arguments) 70 | 71 | def end_keyword(self, name, attrs): 72 | self.archiver.end_keyword(attrs) 73 | 74 | def log_message(self, message): 75 | self.archiver.begin_log_message(message['level'], message['timestamp']) 76 | self.archiver.end_log_message(message['message']) 77 | 78 | def message(self, message): 79 | if not self.generator: 80 | self.generator = message['message'] 81 | elif message['message'].startswith('Settings:'): 82 | self.process_settings(message['message']) 83 | 84 | def process_settings(self, settings): 85 | settings = dict([row.split(':', 1) for row in settings.split('\n')]) 86 | 87 | self.rpa = bool('RPA' in settings and settings['RPA'].strip() == 'True') 88 | self.dry_run = bool(settings['DryRun'].strip() == 'True') 89 | 90 | def close(self): 91 | self.archiver.end_test_run() 92 | -------------------------------------------------------------------------------- /src/test_archiver/schemas/schema_postgres.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE schema_updates ( 2 | id serial PRIMARY KEY, 3 | schema_version int UNIQUE NOT NULL, 4 | applied_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, 5 | initial_update boolean DEFAULT false, 6 | applied_by text 7 | ); 8 | INSERT INTO schema_updates(schema_version, initial_update, applied_by) 9 | VALUES (3, true, '{applied_by}'); 10 | 11 | CREATE TABLE test_series ( 12 | id serial PRIMARY KEY, 13 | name text NOT NULL, 14 | team text NOT NULL 15 | ); 16 | CREATE UNIQUE INDEX unique_test_series_idx ON test_series(team, name); 17 | 18 | CREATE TABLE test_run ( 19 | id serial PRIMARY KEY, 20 | imported_at timestamp DEFAULT CURRENT_TIMESTAMP, 21 | archived_using text, 22 | archiver_version text, 23 | generator text, 24 | generated timestamp, 25 | rpa boolean, 26 | dryrun boolean, 27 | ignored boolean DEFAULT false, 28 | schema_version int REFERENCES schema_updates(schema_version) NOT NULL 29 | ); 30 | 31 | CREATE TABLE test_series_mapping ( 32 | series int REFERENCES test_series(id) ON DELETE CASCADE, 33 | test_run_id int REFERENCES test_run(id) ON DELETE CASCADE, 34 | build_number int NOT NULL, 35 | build_id text, 36 | PRIMARY KEY (series, test_run_id, build_number) 37 | ); 38 | 39 | CREATE TABLE suite ( 40 | id serial PRIMARY KEY, 41 | name text, 42 | full_name text NOT NULL, 43 | repository text NOT NULL 44 | ); 45 | CREATE UNIQUE INDEX unique_suite_idx ON suite(repository, full_name); 46 | 47 | CREATE TABLE suite_result ( 48 | suite_id int REFERENCES suite(id) ON DELETE CASCADE NOT NULL, 49 | test_run_id int REFERENCES test_run(id) ON DELETE CASCADE NOT NULL, 50 | status text, 51 | setup_status text, 52 | execution_status text, 53 | teardown_status text, 54 | start_time timestamp, 55 | elapsed int, 56 | setup_elapsed int, 57 | execution_elapsed int, 58 | teardown_elapsed int, 59 | fingerprint text, 60 | setup_fingerprint text, 61 | execution_fingerprint text, 62 | teardown_fingerprint text, 63 | execution_path text, 64 | PRIMARY KEY (test_run_id, suite_id) 65 | ); 66 | CREATE UNIQUE INDEX unique_suite_result_idx ON suite_result(start_time, fingerprint); 67 | 68 | CREATE TABLE test_case ( 69 | id serial PRIMARY KEY, 70 | name text NOT NULL, 71 | full_name text NOT NULL, 72 | suite_id int REFERENCES suite(id) ON DELETE CASCADE NOT NULL 73 | ); 74 | CREATE UNIQUE INDEX unique_test_case_idx ON test_case(full_name, suite_id); 75 | 76 | CREATE TABLE test_result ( 77 | test_id int REFERENCES test_case(id) ON DELETE CASCADE NOT NULL, 78 | test_run_id int REFERENCES test_run(id) ON DELETE CASCADE NOT NULL, 79 | status text, 80 | setup_status text, 81 | execution_status text, 82 | teardown_status text, 83 | start_time timestamp, 84 | elapsed int, 85 | setup_elapsed int, 86 | execution_elapsed int, 87 | teardown_elapsed int, 88 | critical boolean, 89 | 90 | fingerprint text, 91 | setup_fingerprint text, 92 | execution_fingerprint text, 93 | teardown_fingerprint text, 94 | execution_path text, 95 | PRIMARY KEY (test_run_id, test_id) 96 | ); 97 | 98 | CREATE TABLE log_message ( 99 | id serial PRIMARY KEY, 100 | execution_path text, 101 | test_run_id int REFERENCES test_run(id) ON DELETE CASCADE NOT NULL, 102 | test_id int REFERENCES test_case(id) ON DELETE CASCADE, 103 | suite_id int REFERENCES suite(id) ON DELETE CASCADE NOT NULL, 104 | timestamp timestamp, 105 | log_level text NOT NULL, 106 | message text 107 | ); 108 | CREATE INDEX test_log_message_index ON log_message(test_run_id, suite_id, test_id); 109 | 110 | CREATE TABLE suite_metadata ( 111 | suite_id int REFERENCES suite(id) ON DELETE CASCADE NOT NULL, 112 | test_run_id int REFERENCES test_run(id) ON DELETE CASCADE NOT NULL, 113 | name text NOT NULL, 114 | value text, 115 | PRIMARY KEY (test_run_id, suite_id, name) 116 | ); 117 | 118 | CREATE TABLE test_tag ( 119 | test_id int REFERENCES test_case(id) ON DELETE CASCADE NOT NULL, 120 | test_run_id int REFERENCES test_run(id) ON DELETE CASCADE NOT NULL, 121 | tag text NOT NULL, 122 | PRIMARY KEY (test_run_id, test_id, tag) 123 | ); 124 | 125 | CREATE TABLE keyword_tree ( 126 | fingerprint text PRIMARY KEY, 127 | keyword text, 128 | library text, 129 | status text, 130 | arguments text[] 131 | ); 132 | 133 | CREATE TABLE tree_hierarchy ( 134 | fingerprint text REFERENCES keyword_tree(fingerprint), 135 | subtree text REFERENCES keyword_tree(fingerprint), 136 | call_index int, 137 | PRIMARY KEY (fingerprint, subtree, call_index) 138 | ); 139 | 140 | CREATE TABLE keyword_statistics ( 141 | test_run_id int REFERENCES test_run(id) ON DELETE CASCADE NOT NULL, 142 | fingerprint text REFERENCES keyword_tree(fingerprint), 143 | calls int, 144 | max_execution_time int, 145 | min_execution_time int, 146 | cumulative_execution_time int, 147 | max_call_depth int, 148 | PRIMARY KEY (test_run_id, fingerprint) 149 | ); 150 | -------------------------------------------------------------------------------- /src/test_archiver/schemas/schema_sqlite.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE schema_updates ( 2 | id integer PRIMARY KEY AUTOINCREMENT, 3 | schema_version int UNIQUE NOT NULL, 4 | applied_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, 5 | initial_update boolean DEFAULT false, 6 | applied_by text 7 | ); 8 | INSERT INTO schema_updates(schema_version, initial_update, applied_by) VALUES (3, 1, '{applied_by}'); 9 | 10 | CREATE TABLE test_series ( 11 | id integer PRIMARY KEY AUTOINCREMENT, 12 | name text NOT NULL, 13 | team text NOT NULL 14 | ); 15 | CREATE UNIQUE INDEX unique_test_series_idx ON test_series(team, name); 16 | 17 | CREATE TABLE test_run ( 18 | id integer PRIMARY KEY AUTOINCREMENT, 19 | imported_at timestamp DEFAULT CURRENT_TIMESTAMP, 20 | archived_using text, 21 | archiver_version text, 22 | generator text, 23 | generated timestamp, 24 | rpa boolean, 25 | dryrun boolean, 26 | ignored boolean DEFAULT false, 27 | schema_version int REFERENCES schema_updates(schema_version) NOT NULL 28 | ); 29 | 30 | CREATE TABLE test_series_mapping ( 31 | series int REFERENCES test_series(id) ON DELETE CASCADE, 32 | test_run_id int REFERENCES test_run(id) ON DELETE CASCADE, 33 | build_number int NOT NULL, 34 | build_id text, 35 | PRIMARY KEY (series, test_run_id, build_number) 36 | ); 37 | 38 | CREATE TABLE suite ( 39 | id integer PRIMARY KEY AUTOINCREMENT, 40 | name text, 41 | full_name text NOT NULL, 42 | repository text NOT NULL 43 | ); 44 | CREATE UNIQUE INDEX unique_suite_idx ON suite(repository, full_name); 45 | 46 | CREATE TABLE suite_result ( 47 | suite_id int REFERENCES suite(id) ON DELETE CASCADE NOT NULL, 48 | test_run_id int REFERENCES test_run(id) ON DELETE CASCADE NOT NULL, 49 | status text, 50 | setup_status text, 51 | execution_status text, 52 | teardown_status text, 53 | start_time timestamp, 54 | elapsed int, 55 | setup_elapsed int, 56 | execution_elapsed int, 57 | teardown_elapsed int, 58 | fingerprint text, 59 | setup_fingerprint text, 60 | execution_fingerprint text, 61 | teardown_fingerprint text, 62 | execution_path text, 63 | PRIMARY KEY (test_run_id, suite_id) 64 | ); 65 | CREATE UNIQUE INDEX unique_suite_result_idx ON suite_result(start_time, fingerprint); 66 | 67 | CREATE TABLE test_case ( 68 | id integer PRIMARY KEY AUTOINCREMENT, 69 | name text NOT NULL, 70 | full_name text NOT NULL, 71 | suite_id int REFERENCES suite(id) ON DELETE CASCADE NOT NULL 72 | ); 73 | CREATE UNIQUE INDEX unique_test_case_idx ON test_case(full_name, suite_id); 74 | 75 | CREATE TABLE test_result ( 76 | test_id int REFERENCES test_case(id) ON DELETE CASCADE NOT NULL, 77 | test_run_id int REFERENCES test_run(id) ON DELETE CASCADE NOT NULL, 78 | status text, 79 | setup_status text, 80 | execution_status text, 81 | teardown_status text, 82 | start_time timestamp, 83 | elapsed int, 84 | setup_elapsed int, 85 | execution_elapsed int, 86 | teardown_elapsed int, 87 | critical boolean, 88 | 89 | fingerprint text, 90 | setup_fingerprint text, 91 | execution_fingerprint text, 92 | teardown_fingerprint text, 93 | execution_path text, 94 | PRIMARY KEY (test_run_id, test_id) 95 | ); 96 | 97 | CREATE TABLE log_message ( 98 | execution_path text, 99 | test_run_id int REFERENCES test_run(id) ON DELETE CASCADE NOT NULL, 100 | test_id int REFERENCES test_case(id) ON DELETE CASCADE, 101 | suite_id int REFERENCES suite(id) ON DELETE CASCADE NOT NULL, 102 | timestamp timestamp, 103 | log_level text NOT NULL, 104 | message text 105 | ); 106 | CREATE INDEX test_log_message_index ON log_message(test_run_id, suite_id, test_id); 107 | 108 | CREATE TABLE suite_metadata ( 109 | suite_id int REFERENCES suite(id) ON DELETE CASCADE NOT NULL, 110 | test_run_id int REFERENCES test_run(id) ON DELETE CASCADE NOT NULL, 111 | name text NOT NULL, 112 | value text, 113 | PRIMARY KEY (test_run_id, suite_id, name) 114 | ); 115 | 116 | CREATE TABLE test_tag ( 117 | test_id int REFERENCES test_case(id) ON DELETE CASCADE NOT NULL, 118 | test_run_id int REFERENCES test_run(id) ON DELETE CASCADE NOT NULL, 119 | tag text NOT NULL, 120 | PRIMARY KEY (test_run_id, test_id, tag) 121 | ); 122 | 123 | CREATE TABLE keyword_tree ( 124 | fingerprint text PRIMARY KEY, 125 | keyword text, 126 | library text, 127 | status text, 128 | arguments text 129 | ); 130 | 131 | CREATE TABLE tree_hierarchy ( 132 | fingerprint text REFERENCES keyword_tree(fingerprint), 133 | subtree text REFERENCES keyword_tree(fingerprint), 134 | call_index int, 135 | PRIMARY KEY (fingerprint, subtree, call_index) 136 | ); 137 | 138 | CREATE TABLE keyword_statistics ( 139 | test_run_id int REFERENCES test_run(id) ON DELETE CASCADE NOT NULL, 140 | fingerprint text REFERENCES keyword_tree(fingerprint), 141 | calls int, 142 | max_execution_time int, 143 | min_execution_time int, 144 | cumulative_execution_time int, 145 | max_call_depth int, 146 | PRIMARY KEY (test_run_id, fingerprint) 147 | ); 148 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[codz] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py.cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # UV 98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | #uv.lock 102 | 103 | # poetry 104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 105 | # This is especially recommended for binary packages to ensure reproducibility, and is more 106 | # commonly ignored for libraries. 107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 108 | #poetry.lock 109 | #poetry.toml 110 | 111 | # pdm 112 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 113 | # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python. 114 | # https://pdm-project.org/en/latest/usage/project/#working-with-version-control 115 | #pdm.lock 116 | #pdm.toml 117 | .pdm-python 118 | .pdm-build/ 119 | 120 | # pixi 121 | # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control. 122 | #pixi.lock 123 | # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one 124 | # in the .venv directory. It is recommended not to include this directory in version control. 125 | .pixi 126 | 127 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 128 | __pypackages__/ 129 | 130 | # Celery stuff 131 | celerybeat-schedule 132 | celerybeat.pid 133 | 134 | # SageMath parsed files 135 | *.sage.py 136 | 137 | # Environments 138 | .env 139 | .envrc 140 | .venv 141 | env/ 142 | venv/ 143 | ENV/ 144 | env.bak/ 145 | venv.bak/ 146 | 147 | # Spyder project settings 148 | .spyderproject 149 | .spyproject 150 | 151 | # Rope project settings 152 | .ropeproject 153 | 154 | # mkdocs documentation 155 | /site 156 | 157 | # mypy 158 | .mypy_cache/ 159 | .dmypy.json 160 | dmypy.json 161 | 162 | # Pyre type checker 163 | .pyre/ 164 | 165 | # pytype static type analyzer 166 | .pytype/ 167 | 168 | # Cython debug symbols 169 | cython_debug/ 170 | 171 | # PyCharm 172 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 173 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 174 | # and can be added to the global gitignore or merged into this file. For a more nuclear 175 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 176 | #.idea/ 177 | 178 | # Abstra 179 | # Abstra is an AI-powered process automation framework. 180 | # Ignore directories containing user credentials, local state, and settings. 181 | # Learn more at https://abstra.io/docs 182 | .abstra/ 183 | 184 | # Visual Studio Code 185 | # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore 186 | # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore 187 | # and can be added to the global gitignore or merged into this file. However, if you prefer, 188 | # you could uncomment the following to ignore the entire vscode folder 189 | # .vscode/ 190 | 191 | # Ruff stuff: 192 | .ruff_cache/ 193 | 194 | # PyPI configuration file 195 | .pypirc 196 | 197 | # Marimo 198 | marimo/_static/ 199 | marimo/_lsp/ 200 | __marimo__/ 201 | 202 | # Streamlit 203 | .streamlit/secrets.toml 204 | 205 | .DS_Store 206 | 207 | # Project sets the dynamic version to this file 208 | version.txt 209 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | 2 | [build-system] 3 | requires = ["pdm-backend"] 4 | build-backend = "pdm.backend" 5 | 6 | [project] 7 | name = "testarchiver" 8 | dynamic = ["version"] 9 | description = "Tools for serialising test results to SQL database" 10 | authors = [ 11 | {name = "Tommi Oinonen", email = "tommi.oinonen@siili.com"}, 12 | ] 13 | dependencies = [ 14 | "psycopg2-binary>=2.8.5", 15 | ] 16 | requires-python = ">=3.9" 17 | readme = "README.md" 18 | license = {file = "LICENSE"} 19 | classifiers = [ 20 | "Programming Language :: Python :: 3", 21 | "Operating System :: OS Independent", 22 | "Topic :: Software Development :: Testing", 23 | ] 24 | 25 | [project.urls] 26 | Homepage = "https://github.com/salabs/TestArchiver" 27 | Repository = "https://github.com/salabs/TestArchiver" 28 | Issues = "https://github.com/salabs/TestArchiver/issues" 29 | 30 | [project.scripts] 31 | testarchiver = "test_archiver.output_parser:main" 32 | testarchive_schematool = "test_archiver.database:main" 33 | 34 | 35 | [tool.pdm] 36 | distribution = true 37 | 38 | [tool.pdm.version] 39 | source = "scm" 40 | write_to = "test_archiver/version.txt" 41 | 42 | [tool.pdm.dev-dependencies] 43 | dev = [ 44 | "pytest>=8.3.5", 45 | "pylint>=3.2.7", 46 | "setuptools>=75.3.2", 47 | "pytest-cov>=6.2.1", 48 | "robotframework==7.*", 49 | ] 50 | 51 | 52 | [tool.pdm.scripts] 53 | 54 | echo.help = "Echo some message" 55 | echo.cmd = "echo" 56 | 57 | utest.help = "Run unit tests" 58 | utest.cmd = [ 59 | "pytest", 60 | "--cov-report=term", 61 | "--cov=test_archiver", 62 | "tests/unit/", 63 | "--junit-xml=unittest_output.xml", 64 | ] 65 | 66 | itest.help = "Run integration tests parsing results to databases" 67 | itest.cmd = "pytest tests/integration/" 68 | 69 | lint = "pylint src/" 70 | 71 | check_code.composite = [ 72 | "lint", 73 | "utest", 74 | "echo 'All code validations passed'", 75 | ] 76 | 77 | # These are helpers to populate fixture archive for testing/demo purposes # 78 | ########################################################################### 79 | 80 | # Robot fixture helpers 81 | check_fixture_config.help = "Check fixture config json works" 82 | check_fixture_config.cmd = "testarchive_schematool --config {args:fixture_config_sqlite.json}" 83 | 84 | robot_fixture_run_with_listener.help = "Populate robot fixture archive using RobotListener" 85 | robot_fixture_run_with_listener.cmd = [ 86 | "robot", 87 | "--listener", "test_archiver.ArchiverRobotListener:{args:fixture_config_sqlite.json}", 88 | "--pythonpath", "robot_tests/libraries:robot_tests/resources:src/", 89 | "--outputdir", "robot_tests/listener", 90 | "--metadata", "team:TestArchiver", 91 | "--metadata", "series:RobotListener", 92 | "--metadata", "series2:Fixture", 93 | "--include=variables", 94 | "--exclude", "sleep", 95 | "--nostatusrc", 96 | "--console=none", 97 | "robot_tests/tests", 98 | ] 99 | 100 | robot_listener_parse.help = "Populate robot fixture archive by parsing last fixture suite output" 101 | robot_listener_parse.cmd = [ 102 | "testarchiver", 103 | "robot_tests/listener/output.xml", 104 | "--config {args:fixture_config_sqlite.json}", 105 | "--repository", "TestArchiver", 106 | "--team", "TestArchiver", 107 | "--series", "Parser", 108 | "--series", "Fixture", 109 | ] 110 | 111 | robot_fixture_run.help = "Run robot fixture suite" 112 | robot_fixture_run.cmd = [ 113 | "robot", 114 | "--console=none", 115 | "--pythonpath", "robot_tests/libraries:robot_tests/resources", 116 | "--outputdir", "robot_tests/normal", 117 | "--exclude", "sleep", 118 | "--nostatusrc", 119 | "robot_tests/tests", 120 | ] 121 | 122 | robot_fixture_parse.help = "Populate robot fixture archive by parsing last fixture suite output" 123 | robot_fixture_parse.cmd = [ 124 | "testarchiver", 125 | "robot_tests/normal/output.xml", 126 | "--config {args:fixture_config_sqlite.json}", 127 | "--repository", "TestArchiver", 128 | "--team", "TestArchiver", 129 | "--series", "Parser", 130 | "--series", "Fixture", 131 | ] 132 | 133 | robot_fixture_populate.help = "Populate full robot fixture archive with 10 builds" 134 | robot_fixture_populate.composite = [ 135 | "check_fixture_config {args:fixture_config_sqlite.json}", 136 | "robot_fixture_run_with_listener {args:fixture_config_sqlite.json}", # Test run 1 137 | "robot_fixture_run", # Test run 2 138 | "robot_fixture_parse {args:fixture_config_sqlite.json}", 139 | "robot_fixture_run", # Test run 3 140 | "robot_fixture_parse {args:fixture_config_sqlite.json}", 141 | "robot_fixture_run", # Test run 4 142 | "robot_fixture_parse {args:fixture_config_sqlite.json}", 143 | "robot_fixture_run", # Test run 5 144 | "robot_fixture_parse {args:fixture_config_sqlite.json}", 145 | "robot_fixture_run", # Test run 6 146 | "robot_fixture_parse {args:fixture_config_sqlite.json}", 147 | "robot_fixture_run", # Test run 7 148 | "robot_fixture_parse {args:fixture_config_sqlite.json}", 149 | "robot_fixture_run", # Test run 8 150 | "robot_fixture_parse {args:fixture_config_sqlite.json}", 151 | "robot_fixture_run", # Test run 9 152 | "robot_fixture_parse {args:fixture_config_sqlite.json}", 153 | "robot_fixture_run", # Test run 10 154 | "robot_fixture_parse {args:fixture_config_sqlite.json}", 155 | "echo 'Fixture archive populated'", 156 | ] 157 | 158 | # Pytest fixture helpers 159 | pytest_fixture_run.help = "Run pytest fixture" 160 | pytest_fixture_run.cmd = "python pytest/fixture_runner.py" 161 | 162 | pytest_fixture_parse.help = "Parse pytest fixture output" 163 | pytest_fixture_parse.cmd = [ 164 | "testarchiver", 165 | "pytest/pytest_fixture_output.xml", 166 | "--format", "pytest-junit", 167 | "--config {args:fixture_config_sqlite.json}", 168 | "--repository", "pytest", 169 | "--team", "TestArchiver", 170 | "--series", "pytest", 171 | ] 172 | 173 | pytest_fixture_populate.help = "Populate pytest fixture archive" 174 | pytest_fixture_populate.composite = [ 175 | "pytest_fixture_run", 176 | "pytest_fixture_parse {args:fixture_config_sqlite.json}", 177 | ] 178 | -------------------------------------------------------------------------------- /archive_api_server/database.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | from collections import defaultdict 3 | 4 | import urllib.parse 5 | import queries 6 | import sql_queries 7 | 8 | 9 | class Database: 10 | 11 | def __init__(self, host, dbname, user, password): 12 | # Escape password as it may contain special characters. 13 | # Strip whitespace from other parameters. 14 | # Strip trailing '/' from host. 15 | connection_uri = 'postgresql://{user}:{pw}@{host}/{dbname}'.format( 16 | user=user.strip(), 17 | pw=urllib.parse.quote_plus(password), 18 | host=host.strip().rstrip('/'), 19 | dbname=dbname.strip(), 20 | ) 21 | self.session = queries.TornadoSession(connection_uri) 22 | 23 | def test_series(self): 24 | return self.session.query(sql_queries.TEST_SERIES), list_of_dicts 25 | 26 | def teams(self): 27 | def series_by_team(rows): 28 | all_series = list_of_dicts(rows) 29 | teams = [] 30 | current_team_name = None 31 | current_team = None 32 | for series in all_series: 33 | if current_team_name != series['team']: 34 | if current_team: 35 | teams.append(current_team) 36 | current_team = {'name': series['team'], 'series_count': 0, 'series': []} 37 | current_team['series_count'] += 1 38 | current_team['series'].append(series) 39 | current_team_name = series['team'] 40 | if current_team: 41 | teams.append(current_team) 42 | return teams 43 | return self.session.query(sql_queries.TEST_SERIES_BY_TEAMS), series_by_team 44 | 45 | def last_update(self): 46 | sql = "SELECT * FROM test_run ORDER BY id DESC LIMIT 1" 47 | return self.session.query(sql), single_dict 48 | 49 | def db_type(self): 50 | sql = "SELECT generator, archived_using FROM test_run ORDER BY id DESC LIMIT 1" 51 | return self.session.query(sql), single_dict 52 | 53 | def builds(self, series, build, last, offset): 54 | return self.session.query(sql_queries.builds(series, build, last, offset)), list_of_dicts 55 | 56 | def builds_by_time(self, series, build, last, offset, searchTimeStart, searchTimeEnd): 57 | return self.session.query(sql_queries.builds_by_time(series, build, last, offset, searchTimeStart, searchTimeEnd)), list_of_dicts 58 | 59 | def build_results(self, series, build_num): 60 | return self.session.query(sql_queries.build_results(series, build_num)), list_of_dicts 61 | 62 | def test_run_results(self, test_run_id): 63 | return self.session.query(sql_queries.test_run_results(test_run_id)), list_of_dicts 64 | 65 | def test_run_data(self, test_run_id): 66 | return self.session.query(sql_queries.test_run_data(test_run_id)), single_dict 67 | 68 | def ignore_test_run(self, test_run_id): 69 | sql = "UPDATE test_run SET ignored=NOT ignored WHERE id={} RETURNING id, ignored" 70 | return self.session.query(sql.format(int(test_run_id))), single_dict 71 | 72 | def ignore_build(self, build_id): 73 | sql = "UPDATE test_run SET ignored = true WHERE id IN (SELECT test_run_id FROM test_series_mapping WHERE build_number IN ({}))" 74 | return self.session.query(sql.format(int(build_id))), single_dict 75 | 76 | def single_test_case_results(self, test_run_id, test_id): 77 | return self.session.query(sql_queries.single_test_result(test_run_id, test_id)), single_dict 78 | 79 | def parent_suite_results(self, test_run_id, test_id): 80 | return self.session.query(sql_queries.parent_suite_results(test_run_id, test_id)), list_of_dicts 81 | 82 | def log_message_map(self, test_run_id): 83 | def log_message_mapper(rows): 84 | messages = list_of_dicts(rows) 85 | message_map = defaultdict(lambda: []) 86 | for message in messages: 87 | key = (message['suite_id'], message['test_id']) 88 | message_map[key].append(message) 89 | return message_map 90 | return self.session.query(sql_queries.log_messages(test_run_id)), log_message_mapper 91 | 92 | def build_metadata(self, series, build_num): 93 | return self.session.query(sql_queries.build_metadata(series, build_num)), metadata_dict 94 | 95 | def test_run_metadata(self, test_run_id): 96 | return self.session.query(sql_queries.test_run_metadata(test_run_id)), metadata_dict 97 | 98 | def included_in_builds(self, test_run_id): 99 | return self.session.query(sql_queries.included_in_builds(test_run_id)), list_of_dicts 100 | 101 | def suite_result_statistics(self, series, last, offset, build_num): 102 | sql = sql_queries.status_ratios('suite', series, last, offset, build_num, per_build=False) 103 | return self.session.query(sql), single_dict 104 | 105 | def test_result_statistics(self, series, last, offset, build_num): 106 | sql = sql_queries.status_ratios('test', series, last, offset, build_num, per_build=False) 107 | return self.session.query(sql), single_dict 108 | 109 | def suite_result_statistics_per_build(self, series, last, offset, build_num): 110 | sql = sql_queries.status_ratios('suite', series, last, offset, build_num, per_build=True) 111 | return self.session.query(sql), list_of_dicts 112 | 113 | def test_result_statistics_per_build(self, series, last, offset, build_num): 114 | sql = sql_queries.status_ratios('test', series, last, offset, build_num, per_build=True) 115 | return self.session.query(sql), list_of_dicts 116 | 117 | def recently_failing_tests(self, top, series, build_num, last, offset): 118 | sql = sql_queries.recently_failing_tests(top, series, build_num, last, offset) 119 | return self.session.query(sql), list_of_dicts 120 | 121 | def recently_failing_suites(self, top, series, build_num, last, offset): 122 | sql = sql_queries.recently_failing_suites(top, series, build_num, last, offset) 123 | return self.session.query(sql), list_of_dicts 124 | 125 | def keyword_tree(self, fingerprint): 126 | sql = "SELECT * FROM keyword_tree WHERE fingerprint=%(fingerprint)s" 127 | return self.session.query(sql, {'fingerprint': fingerprint}), single_dict 128 | 129 | def subtrees(self, fingerprint): 130 | return self.session.query(sql_queries.SUBTREES, {'fingerprint': fingerprint}), list_of_dicts 131 | 132 | def tree_execution_measures(self, fingerprint, series_id, build_num, last, offset): 133 | sql = sql_queries.tree_execution_measures(fingerprint, series_id, build_num, last, offset) 134 | return self.session.query(sql), single_dict 135 | 136 | 137 | def single_dict(rows): 138 | return list_of_dicts(rows)[0] if rows else None 139 | 140 | 141 | def list_of_dicts(rows): 142 | results = [] 143 | for row in rows: 144 | for key in row: 145 | if isinstance(row[key], (datetime.time, datetime.date, datetime.datetime, datetime.timedelta)): 146 | row[key] = str(row[key]) 147 | results.append(row) 148 | return results 149 | 150 | 151 | def metadata_dict(rows): 152 | metadata = defaultdict(lambda: {}) 153 | for row in rows: 154 | key = (row['suite_id'], row['test_run_id']) 155 | metadata[key][row['name']] = row['value'] 156 | return metadata 157 | 158 | 159 | if __name__ == '__main__': 160 | pass 161 | -------------------------------------------------------------------------------- /src/test_archiver/schemas/README.md: -------------------------------------------------------------------------------- 1 | # What is archived? 2 | 3 | Since the TestArchivers data model mostly follows the data model of Robot Framework, this generic model mapping also works as the model mapping for the Robot Framework in general. 4 | 5 | ## Test items 6 | 7 | ### Test run 8 | 9 | Each test set execution or parsed output file is mapped as a test_run in the database. Each test run receives a unique id and is fingerprinted so that each test run can only be insert once. All result items have a reference to a test run that they belong to. Test runs have some metadata: 10 | 11 | - `imported_at` timestamp when the test run was imported to the archive 12 | - `archived_using` the archiver method used to import the data (i.e. Parser type or listener) 13 | - `archiver_version` version of the archiver that was used to import the results 14 | - `generator` metadata on the tool that produced the parsed output file (for Robot Framework the the robot and python version that were used to produce the output.xml) 15 | - `generated` timestamp when the parsed output file was generated 16 | - `rpa` boolean whether the execution was actually a RPA (Robotic Process Automation task). This is mainly applicable to Robot Framework. 17 | - `dryrun` boolean whether this execution was actually a dryrun and not real tests against any system under test 18 | - `ignored` boolean whether this run is for some reason or another ignored as invalid results and therefore by default should be hidden from most APIs 19 | 20 | ### Test series 21 | 22 | Test series are used to collect test results into series of builds that comprise of the actual test runs. A test series can correspond to a CI job, version control branch, release or any other meaningful sequence of consecutive execution of a set test cases. 23 | 24 | - `id` 25 | - `name` 26 | - `team` 27 | 28 | ### Builds 29 | 30 | Builds are sets of the runs that are mapped to some test series with a specific build number. 31 | 32 | - `build_number` every build uses either the build number specified when the results where archived or automatically assigned by the archiver when not specified. The build number should form an increasing series for one test series but may skip values. Ideally these should 33 | - `build_id` a possible identifier string of the build, ideally allows for mapping the builds for example to the builds in a CI environment 34 | - `test_runs` 35 | 36 | ### Suite 37 | 38 | Suites are collections of test cases that contain other subsuites or test cases. The suites can have their own setups and teardowns. The suites usually represent the folders in the test code directory. In test archiver each suite is supposed to receive its own unique id based on the `full_name` and the source `repository`. The for most frameworks (like Robot Framework) the full name is the full path of parent suite names joined by `.` characters. 39 | 40 | Suite fields: 41 | 42 | - `id` 43 | - `name` 44 | - `full_name` fully identifiable suite path of this suite 45 | - `repository` used to identify suites that have have otherwise completely same full_name but coming from e.g. different projects 46 | 47 | ### Test case 48 | 49 | Test case fields: 50 | 51 | - `id` 52 | - `name` 53 | - `full_name` fully identifiable name at least in the context of the parent suite but often (e.g. Robot Framework) the name prefixed by the full name of the parent suite 54 | - `suite_id` points to the parent suite containing the test case 55 | 56 | ### Result objects (for both suites and test cases) 57 | 58 | For both suites and test cases the result objects contain the start_time, status, fingerprint and elapsed time. The status, fingerprint and elapsed time are also presented separately for the setup execution, actual test case execution and teardown execution when that kind of data is available from the frame work. 59 | 60 | Result fields are: 61 | 62 | - `status` General status of the item 63 | 64 | - `setup_status` status of the possible setup 65 | 66 | - `execution_status` status of the actual test execution or the combined status of the subsuites or test cases of a suite 67 | 68 | - `setup_status` status of the possible teardown 69 | 70 | - `setup_fingerprint` fingerprint of the keyword tree of the item setup (see: Fingerprints and keyword tree) 71 | 72 | - `execution_fingerprint` fingerprint status of the actual test execution or the combined fingerprint of the subsuites or test cases of a suite (see: Fingerprints and keyword tree) 73 | 74 | - `teardown_fingerprint` fingerprint of the keyword tree of the item teardown (see: Fingerprints and keyword tree) 75 | 76 | - `fingerprint` Fingerprint of the result, hash calculated from the combination of item name and subfingerprints 77 | 78 | - `execution_path` Execution path of the result i.e. where in the execution tree of the test run the item was executed. E.g. `s1-s2-t3` means the third test in the second subsuite of the top suite. 79 | 80 | - `start_time` timestamp 81 | 82 | - `elapsed` total time of the item execution (milliseconds) 83 | 84 | - `setup_elapsed` total time of the item setup (milliseconds) 85 | 86 | - `execution_elapsed` total time of the actual test execution or the combined elapsed time of the subsuites or test cases of a suite (milliseconds) 87 | 88 | - `setup_elapsed` total time of the item setup (milliseconds) 89 | 90 | - `critical` whether the test is critical or not (boolean, Robot Framework specific null in other cases) 91 | 92 | ### Data linked to results objects 93 | 94 | - `log_messages` timestamp, log_level, execution path and message string up to 2000 characters 95 | 96 | - `test_tags` tags set for the test case 97 | 98 | - `suite_metadata` name-value pairs that are tied to specific suites. Metadata for the top level suite is considered related to the entire test run. 99 | 100 | ## Fingerprints and Keyword trees 101 | 102 | Tests usually consist of steps that can consist of substeps that form a tree structure. For each of these trees, TestArchiver calculates sha1 fingerprint that represents that particular subtree. In the case of Robot Framework the tree for keywords (that represent the substeps of the execution) is calculated from: 103 | 104 | - keyword name 105 | - library 106 | - status 107 | - arguments 108 | - fingerprints for sub keywords called 109 | 110 | How the fingerprints are calculated for other frameworks depends on what data of the substeps is available. For many frameworks there are no substeps reported when the test cases passes but when failures occur the fingerprint of that error is usually used. 111 | 112 | The fingerprints can be used to compare executions of test cases. When the fingerprints differ between two consecutive executions of the same test case we can infer that the execution of the test case changed some how. On the other if two executions of a test case fail with the same fingerprint, the test encountered a similar problem (possibly the same issue). 113 | 114 | ## Schema versioning 115 | From version 2.0.0 onwards the tool will manage and enforce that the schema version of the database matches that of the archiver. The tool can perform the schema updates when explicitly allowed. But in most cases it is recommended to run the updates manually using the `database.py` script. The schema version and all the updates performed are recorded to `schema_updates` table. The updates are categorized to major and minor updates and allowing each type of update is handled separately. Minor (`--allow_minor_schema_updates`) updates should only include changes that keep the database compatible to anyone reading the archive. Major (`--allow_major_schema_updates`) updates can include changes that can be incompatible to services reading the database. 116 | 117 | ``` 118 | python3 test_archiver/database.py --database test_archive.db --allow-major-schema-updates 119 | ``` 120 | 121 | # Fixture Robot Framework tests 122 | 123 | The fixture tests are used to generate test data for the archiver and the same test data is assumed by the [archiver API server](/archive_api_server) tests. Here you can find the documentation on how these test outputs are mapped to the archivers data model. The script [run_fixture_robot.sh](/run_fixture_robot.sh) executes the Robot Framework [fixture test set](/robot_tests/) 10 times and parses those results in to a test database. 124 | 125 | ## Test runs to builds mapping 126 | 127 | Fixture test set is executed 10 times. Once using the Robot Listener and then 9 times using the parser. 128 | 129 | | Series name | Fixture | Robot listener | Parser | All builds | 130 | | ------------------------- | ------- | -------------- | ------ | ---------- | 131 | | **Test run/build number** | | | | | 132 | | Run1 | 1 | 1 | - | 1 | 133 | | Run2 | 2 | - | 1 | 2 | 134 | | Run3 | 3 | - | 2 | 3 | 135 | | Run4 | 4 | - | 3 | 4 | 136 | | Run5 | 5 | - | 4 | 5 | 137 | | Run6 | 6 | - | 5 | 6 | 138 | | Run7 | 7 | - | 6 | 7 | 139 | | Run8 | 8 | - | 7 | 8 | 140 | | Run9 | 9 | - | 8 | 9 | 141 | | Run10 | 10 | - | 9 | 10 | 142 | -------------------------------------------------------------------------------- /tests/integration/test_robot_framework_parsing.py: -------------------------------------------------------------------------------- 1 | 2 | import unittest 3 | import tempfile 4 | from pathlib import Path 5 | 6 | import robot 7 | 8 | from test_archiver.output_parser import parse_xml 9 | from test_archiver.configs import Config 10 | from test_archiver.database import BaseDatabase, PostgresqlDatabase 11 | from test_archiver.database import get_connection, get_connection_and_check_schema 12 | 13 | 14 | EXPECTED_SUITE_FINGERPRINTS = ( 15 | (28, "40f5672430d118af81cebe83ae19215bfa00c43b", "c5d127ec27b3e7cee7a1468cb7402ae9d5da43a8", "Variables"), 16 | (27, "626b7a1d92583433bb5aeeeeb06b768f8b688dea", "626b7a1d92583433bb5aeeeeb06b768f8b688dea", "Passing Suite2"), 17 | (26, "f605ea592cf7dd82cf416034b602262503453223", "f605ea592cf7dd82cf416034b602262503453223", "Passing Suite1"), 18 | (25, "c2a3400d50e0a52a8835b664ee3851d49b03082e", "c2a3400d50e0a52a8835b664ee3851d49b03082e", "Teardown Failure"), 19 | (24, "9fb1c4d300b79fa95199209f8d32a41d3802c3ad", "9fb1c4d300b79fa95199209f8d32a41d3802c3ad", "Skipping"), 20 | (23, "437154e64daa5673d1f1dfed7b197e5ec3324ebc", "437154e64daa5673d1f1dfed7b197e5ec3324ebc", "Passing tests"), 21 | (22, "3f9fe7b5b308fa539fcd944b245f5057438973bc", "3f9fe7b5b308fa539fcd944b245f5057438973bc", "Tagging"), 22 | (21, "964b22820abc48e24cd40c77376aabbc0e8a1a04", "964b22820abc48e24cd40c77376aabbc0e8a1a04", "Empty"), 23 | (20, "86cd41aec1024c33708f567df61bd98596612435", "86cd41aec1024c33708f567df61bd98596612435", "Embedded"), 24 | (19, "35a4859cbb96002f38f34910cf07e70a39fe2489", "35a4859cbb96002f38f34910cf07e70a39fe2489", "Documents"), 25 | (18, "89000e1ca6c885adad34ac26180b48751bc2191e", "89000e1ca6c885adad34ac26180b48751bc2191e", "Lower Suite"), 26 | (17, "6b7661113a2fe813ea92318813929019b1e0a3bb", "6b7661113a2fe813ea92318813929019b1e0a3bb", "Logging"), 27 | (16, "3ba443851e42cd88e8592eb7bf3860e16947dcb8", "3ba443851e42cd88e8592eb7bf3860e16947dcb8", "Failing tests"), 28 | (15, "0363aa40786bcbe4e2b5a047d6f95c393f75cebe", "0363aa40786bcbe4e2b5a047d6f95c393f75cebe", "Data-Driven"), 29 | (14, "ae39ac2af17cc96c78e16f2d77740fd196f3fe90", "ae39ac2af17cc96c78e16f2d77740fd196f3fe90", "Top Suite"), 30 | # (13, "8cc2febd046cba652abd871d359d667f09919f6a", "Random Pass"), 31 | # (12, "cfe075f0ea543b599d50d9f343cb777d0a110327", "Flaky"), 32 | # (11, "98da3249f2df4e6ac55292741b0976108dbe070c", "Bigrandom"), 33 | # (10, "df8c6f0cbe9b443cf5204ffdc148dd25484f7f9c", "Randomized Suite"), 34 | (9, "17c89565ddf7e7933f6520cfb2db67fb71a1b2b9", "485965eadb7125a53463303cda316042414988fe", "Errors"), 35 | (8, "e53f0c275eb32ade2f7d49fd50e0fde642f95ad2", "e53f0c275eb32ade2f7d49fd50e0fde642f95ad2", "While Loops"), 36 | (7, "a977de5415ac04b2b25642c6f2d7e2ee5aac0514", "a977de5415ac04b2b25642c6f2d7e2ee5aac0514", "Try Except"), 37 | (6, "ab1272aaec0bc45693def5fca31932a50925ded0", "ab1272aaec0bc45693def5fca31932a50925ded0", "Other Control Structures"), 38 | (5, "0cf39c21908fcfd2ea69038dbacba78619abfd54", "0cf39c21908fcfd2ea69038dbacba78619abfd54", "If Else"), 39 | (4, "582a72f2fa8a798466f82f820c3a9c2209795e1a", "582a72f2fa8a798466f82f820c3a9c2209795e1a", "Grouped Templates"), 40 | (3, "a642b51f46ca3f3169b431db8208e3af183648f4", "a642b51f46ca3f3169b431db8208e3af183648f4", "For Loops"), 41 | (2, "5a6a1de73088bce20ee3bf7da48c4cf620801888", "5a6a1de73088bce20ee3bf7da48c4cf620801888", "Control Structures"), 42 | #(1, "001ad0634fab318d8823fa722535b447e2c641b8", "Tests"), 43 | ) 44 | 45 | class RobotFixtureTests(unittest.TestCase): 46 | 47 | 48 | def check_fixture_suite_fingerprints(self, connection: BaseDatabase, using_listener: bool): 49 | for suite_id, listener_fingerprint, parser_fingerprint, name in EXPECTED_SUITE_FINGERPRINTS: 50 | self.assertEqual(connection.fetch_one_value('suite', 'name', where_data={'id': suite_id}), name) 51 | fingerprint = listener_fingerprint if using_listener else parser_fingerprint 52 | self.assertEqual( 53 | connection.fetch_one_value('suite_result', 'fingerprint', where_data={'suite_id': suite_id}), 54 | fingerprint, f"Suite '{name}' has an unexpected fingerprint") 55 | 56 | def check_fixture_content(self, connection: BaseDatabase, using_listener: bool): 57 | self.assertEqual(connection.get_row_count('test_run'), 1) 58 | self.assertEqual(connection.get_row_count('test_case'), 75) 59 | self.assertEqual(connection.get_row_count('test_result'), 75) 60 | self.check_fixture_suite_fingerprints(connection, using_listener) 61 | 62 | def fetch_full_fixture_fingerprint(self, connection: BaseDatabase) -> str: 63 | # First check that Full fixture suite is suite id 1 64 | self.assertEqual(connection.fetch_one_value('suite', 'name', where_data={'id': 1}), 'Tests') 65 | return connection.fetch_one_value('suite_result', 'fingerprint', where_data={'suite_id': 1}) 66 | 67 | def normal_fixture_run(self): 68 | arguments = [ 69 | "--console=none", 70 | "--pythonpath=robot_tests/libraries:robot_tests/resources", 71 | "--outputdir=robot_tests/normal", 72 | "--exclude=sleep", 73 | "--nostatusrc", 74 | "robot_tests/tests", 75 | ] 76 | robot.run_cli(arguments, exit=False) 77 | 78 | 79 | class RobotFixtureArchivingPostgresTests(RobotFixtureTests): 80 | 81 | def setUp(self): 82 | self.connection: PostgresqlDatabase | None = None 83 | 84 | def tearDown(self): 85 | # pylint: disable=protected-access 86 | if self.connection: 87 | self.connection._connection.close() 88 | 89 | def _get_postgres_fixture_config(self): 90 | config = Config() 91 | config.resolve(file_config='fixture_config_postgres.json') 92 | return config 93 | 94 | def _clear_postgres_fixture_database(self): 95 | # pylint: disable=protected-access 96 | config = self._get_postgres_fixture_config() 97 | connection = None 98 | try: 99 | connection = get_connection_and_check_schema(config) 100 | connection._execute("DROP OWNED BY current_user;") 101 | connection._connection.commit() 102 | finally: 103 | if connection: 104 | connection._connection.close() 105 | 106 | def test_robot_fixture_with_listener(self): 107 | self._clear_postgres_fixture_database() 108 | arguments = [ 109 | "--listener=test_archiver.ArchiverRobotListener:fixture_config_postgres.json", 110 | "--console=none", 111 | "--pythonpath=robot_tests/libraries:robot_tests/resources:src/", 112 | "--outputdir=robot_tests/listener", 113 | "--exclude=sleep", 114 | "--nostatusrc", 115 | "robot_tests/tests", 116 | ] 117 | robot.run_cli(arguments, exit=False) 118 | 119 | config = self._get_postgres_fixture_config() 120 | self.connection = get_connection_and_check_schema(config) 121 | self.check_fixture_content(self.connection, using_listener=True) 122 | 123 | def test_parsing_robot_fixture(self): 124 | self.normal_fixture_run() 125 | self._clear_postgres_fixture_database() 126 | 127 | config = self._get_postgres_fixture_config() 128 | self.connection = get_connection_and_check_schema(config) 129 | parse_xml("robot_tests/normal/output.xml", 'robot', self.connection, config) 130 | self.check_fixture_content(self.connection, using_listener=False) 131 | 132 | 133 | class RobotFixtureArchivingSqliteTests(RobotFixtureTests): 134 | 135 | def test_robot_fixture_with_listener(self): 136 | config = Config() 137 | with tempfile.TemporaryDirectory() as temp_dir: 138 | arguments = [ 139 | f"--listener=test_archiver.ArchiverRobotListener:{Path(temp_dir) / 'fixture.db'}:sqlite", 140 | "--console=none", 141 | "--pythonpath=robot_tests/libraries:robot_tests/resources:src/", 142 | "--outputdir=robot_tests/listener", 143 | "--exclude=sleep", 144 | "--nostatusrc", 145 | "robot_tests/tests", 146 | ] 147 | robot.run_cli(arguments, exit=False) 148 | 149 | config.resolve(file_config={"db_engine": "sqlite", "database": Path(temp_dir) / "fixture.db"}) 150 | connection = get_connection(config) 151 | self.check_fixture_content(connection, using_listener=True) 152 | 153 | def test_parsing_robot_fixture(self): 154 | self.normal_fixture_run() 155 | config = Config() 156 | with tempfile.TemporaryDirectory() as temp_dir: 157 | config.resolve(file_config={"db_engine": "sqlite", "database": Path(temp_dir) / "fixture.db"}) 158 | connection = get_connection_and_check_schema(config) 159 | parse_xml("robot_tests/normal/output.xml", 'robot', connection, config) 160 | self.check_fixture_content(connection, using_listener=False) 161 | 162 | def test_listener_and_parser_create_same_fixture_fingerprint(self): 163 | listener_fingerprint = None 164 | parser_fingerprint = None 165 | config = Config() 166 | with tempfile.TemporaryDirectory() as temp_dir: 167 | arguments = [ 168 | f"--listener=test_archiver.ArchiverRobotListener:{Path(temp_dir) / 'fixture.db'}:sqlite", 169 | "--console=none", 170 | "--pythonpath=robot_tests/libraries:robot_tests/resources:src/", 171 | "--outputdir=robot_tests/listener", 172 | "--exclude=sleep", 173 | "--exclude=listener_parser_mismatch", 174 | "--nostatusrc", 175 | "robot_tests/tests", 176 | ] 177 | robot.run_cli(arguments, exit=False) 178 | 179 | config.resolve(file_config={"db_engine": "sqlite", "database": Path(temp_dir) / "fixture.db"}) 180 | connection = get_connection(config) 181 | listener_fingerprint = self.fetch_full_fixture_fingerprint(connection) 182 | 183 | config = Config() 184 | with tempfile.TemporaryDirectory() as temp_dir: 185 | config.resolve(file_config={"db_engine": "sqlite", "database": Path(temp_dir) / "fixture.db"}) 186 | connection = get_connection_and_check_schema(config) 187 | parse_xml("robot_tests/listener/output.xml", 'robot', connection, config) 188 | parser_fingerprint = self.fetch_full_fixture_fingerprint(connection) 189 | 190 | self.assertEqual(listener_fingerprint, parser_fingerprint) 191 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /archive_api_server/tests/robot/api/basic.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Library REST http://localhost:${PORT} 3 | 4 | *** Variables *** 5 | ${PORT}= 8888 6 | ${TARGET_SERIES}= 2 # Should be the 'Fixture' series 7 | 8 | *** Test cases *** 9 | 10 | Teams data 11 | GET /data/teams/ 12 | Array $.teams minItems=1 13 | String $.teams[*].name 14 | Valid series data $.teams[*] 15 | 16 | Test series data 17 | GET /data/series/ 18 | Valid series data $ 19 | 20 | Builds data 21 | [Template] Validate json from path with 22 | Valid build object $.builds[*] /data/series/${TARGET_SERIES}/builds/ 23 | Valid build object $.builds[*] /data/series/${TARGET_SERIES}/builds/?build=2 24 | Valid build object $.builds[*] /data/series/${TARGET_SERIES}/builds/?last=10 25 | Valid build object $.builds[*] /data/series/${TARGET_SERIES}/builds/?last=10&offset=5 26 | 27 | Test run results data 28 | GET /data/test_run/2/results/ 29 | Integer response status 200 30 | Valid suite result object $.suites[*] 31 | Valid build object $.included_in_builds[*] 32 | 33 | Single test run data 34 | GET /data/test_run/2/ 35 | Integer response status 200 36 | Integer $.id 37 | String $.imported_at 38 | String $.archived_using 39 | String $.archiver_version 40 | String $.generated 41 | String $.generator 42 | String $.start_time 43 | Integer $.elapsed 44 | Boolean $.ignored 45 | Boolean $.rpa 46 | Boolean $.dryrun 47 | 48 | Results data 49 | [Template] Validate json with 50 | Valid results data /data/series/${TARGET_SERIES}/results/ 51 | Valid results data /data/series/${TARGET_SERIES}/results/?build=1 52 | Valid results data /data/series/${TARGET_SERIES}/results/?last=2 53 | Valid results data /data/series/${TARGET_SERIES}/results/?last=2&offset=5 54 | 55 | Single test result data 56 | # if sleep tests are included use test case 58 otherwise 53 57 | GET /data/test_run/2/test_case/53/ 58 | Integer response status 200 59 | Valid suite result object $.suites[*] ${True} 60 | 61 | Keyword tree data 62 | [Template] Validate json from path with 63 | Valid keyword tree object $ /data/keyword_tree/92733cdd5e9d76d0a5108bcb9491aee2fe77e11c/ 64 | Valid keyword tree object $ /data/keyword_tree/34c6b7c644d6d2d0282bf8de915bce9129ab71ef/ 65 | 66 | Keyword tree stats data 67 | [Template] Validate json from path with 68 | Valid keyword stat object $ /data/keyword_tree/92733cdd5e9d76d0a5108bcb9491aee2fe77e11c/stats 69 | Valid keyword stat object $ /data/keyword_tree/34c6b7c644d6d2d0282bf8de915bce9129ab71ef/stats 70 | 71 | Test case pass ratio data 72 | [Template] Validate json with 73 | Valid status ratio data /data/test_status_statistics/ 74 | Valid status ratio data /data/test_status_statistics/?series=${TARGET_SERIES} 75 | Valid status ratio data /data/test_status_statistics/?series=${TARGET_SERIES}&build=1 1 76 | Valid status ratio data /data/test_status_statistics/?series=${TARGET_SERIES}&last=3 3 77 | Valid status ratio data /data/test_status_statistics/?series=${TARGET_SERIES}&last=3&offset=2 3 78 | 79 | Valid status ratio data /data/series/${TARGET_SERIES}/test_status_statistics/ 80 | Valid status ratio data /data/series/${TARGET_SERIES}/build/1/test_status_statistics/ 1 81 | Valid status ratio data /data/series/${TARGET_SERIES}/test_status_statistics/?last=3 3 82 | Valid status ratio data /data/series/${TARGET_SERIES}/test_status_statistics/?last=3&offset=2 3 83 | 84 | Suite pass ratio data 85 | [Template] Validate json with 86 | Valid status ratio data /data/suite_status_statistics/ 87 | Valid status ratio data /data/suite_status_statistics/?series=${TARGET_SERIES} 88 | Valid status ratio data /data/suite_status_statistics/?series=${TARGET_SERIES}&build=1 1 89 | Valid status ratio data /data/suite_status_statistics/?series=${TARGET_SERIES}&last=3 3 90 | Valid status ratio data /data/suite_status_statistics/?series=${TARGET_SERIES}&last=3&offset=2 3 91 | 92 | Valid status ratio data /data/series/${TARGET_SERIES}/suite_status_statistics/ 93 | Valid status ratio data /data/series/${TARGET_SERIES}/suite_status_statistics/?last=3 3 94 | Valid status ratio data /data/series/${TARGET_SERIES}/suite_status_statistics/?last=3&offset=2 3 95 | 96 | Recently failing tests return valid data 97 | GET /data/series/${TARGET_SERIES}/recently_failing_tests/ 98 | Integer response status 200 99 | Array $.tests 100 | Object $.tests[*] 101 | Integer $.tests[*].id 102 | Integer $.tests[*].suite_id 103 | String $.tests[*].name 104 | String $.tests[*].full_name 105 | Integer $.tests[*].fails 106 | Number $.tests[*].failiness 107 | 108 | Recently failing suites return valid data 109 | GET /data/series/${TARGET_SERIES}/recently_failing_suites/ 110 | Integer response status 200 111 | Array $.suites 112 | Object $.suites[*] 113 | Integer $.suites[*].id 114 | String $.suites[*].name 115 | String $.suites[*].full_name 116 | Integer $.suites[*].fails 117 | Number $.suites[*].failiness 118 | 119 | Ignoring and unignoring test runs should work 120 | GET /data/series/${TARGET_SERIES}/builds/ 121 | Array $.builds[0].test_run_ids maxItems=1 122 | ${test_run_ids}= Integer $.builds[0].test_run_ids[0] 123 | ${test_run_id}= Set Variable ${test_run_ids}[0] 124 | GET /data/test_run/${test_run_id}/ 125 | Boolean $.ignored false 126 | POST /data/test_run/${test_run_id}/ignore/ 127 | Boolean $.ignored true 128 | GET /data/test_run/${test_run_id}/ 129 | Boolean $.ignored true 130 | POST /data/test_run/${test_run_id}/ignore/ 131 | Boolean $.ignored false 132 | GET /data/test_run/${test_run_id}/ 133 | Boolean $.ignored false 134 | 135 | 136 | *** Keywords *** 137 | 138 | Validate json from path with 139 | [Arguments] ${validator} ${field} ${url} @{args} 140 | GET ${url} 141 | Integer response status 200 142 | Run keyword ${validator} ${field} @{args} 143 | 144 | Validate json with 145 | [Arguments] ${validator} ${url} @{args} 146 | GET ${url} 147 | Integer response status 200 148 | Run keyword ${validator} @{args} 149 | 150 | Valid series data 151 | [Arguments] ${path} 152 | Array ${path}.series minItems=1 153 | Object ${path}.series[*] 154 | Integer ${path}.series[*].id 155 | String ${path}.series[*].team 156 | String ${path}.series[*].name 157 | Integer ${path}.series[*].builds 158 | Integer ${path}.series[*].last_build 159 | 160 | Valid results data 161 | Array $.builds 162 | Valid build object $.builds[*] 163 | Array $.builds[*].suites 164 | Valid suite result object $.builds[*].suites[*] 165 | 166 | Valid suite result object 167 | [Arguments] ${path} ${detailed}=${False} 168 | Object ${path} 169 | Integer ${path}.id 170 | String ${path}.name 171 | String ${path}.full_name 172 | String ${path}.repository 173 | 174 | Integer ${path}.test_run_id 175 | String ${path}.status 176 | String ${path}.execution_status 177 | # String ${path}.setup_status 178 | # String ${path}.teardown_status 179 | 180 | String ${path}.fingerprint 181 | # String ${path}.setup_fingerprint 182 | String ${path}.execution_fingerprint 183 | # String ${path}.teardown_fingerprint 184 | 185 | #Run keyword if ${detailed} Valid keyword tree object ${path}.setup 186 | #Run keyword if ${detailed} Valid keyword tree object ${path}.teardown 187 | 188 | String ${path}.start_time 189 | Integer ${path}.elapsed 190 | # Integer ${path}.elapsed 191 | # Integer ${path}.elapsed 192 | # Integer ${path}.elapsed 193 | 194 | Array ${path}.metadata 195 | Object ${path}.metadata[*] 196 | String ${path}.metadata[*].name 197 | String ${path}.metadata[*].value 198 | 199 | #Run keyword if ${detailed} Valid log message data ${path}.log_messages[*] 200 | 201 | Array ${path}.tests 202 | Valid test result object ${path}.tests[*] ${detailed} 203 | 204 | Valid test result object 205 | [Arguments] ${path} ${detailed}=${False} 206 | Object ${path} 207 | Integer ${path}.id 208 | String ${path}.name 209 | String ${path}.full_name 210 | 211 | Integer ${path}.test_run_id 212 | String ${path}.status FAIL PASS 213 | Run keyword if ${detailed} String ${path}.execution_status 214 | Run keyword if ${detailed} String ${path}.setup_status 215 | Run keyword if ${detailed} String ${path}.teardown_status 216 | 217 | String ${path}.fingerprint 218 | Run keyword if ${detailed} String ${path}.setup_fingerprint 219 | Run keyword if ${detailed} String ${path}.execution_fingerprint 220 | Run keyword if ${detailed} String ${path}.teardown_fingerprint 221 | 222 | Run keyword if ${detailed} Valid keyword tree object ${path}.setup 223 | # Run keyword if ${detailed} Valid keyword tree object ${path}.execution 224 | Run keyword if ${detailed} Valid keyword tree object ${path}.teardown 225 | 226 | String ${path}.start_time 227 | Integer ${path}.elapsed 228 | 229 | Array ${path}.tags 230 | String ${path}.tags[*] 231 | 232 | Run keyword if ${detailed} Valid log message data ${path}.log_messages[*] 233 | 234 | Valid log message data 235 | [Arguments] ${path} 236 | Object ${path} 237 | String ${path}.timestamp 238 | String ${path}.message 239 | String ${path}.log_level 240 | 241 | Valid keyword tree object 242 | [Arguments] ${path} 243 | Object ${path} 244 | String ${path}.fingerprint 245 | String ${path}.keyword 246 | String ${path}.library 247 | String ${path}.status 248 | Array ${path}.arguments 249 | Array ${path}.children 250 | 251 | Valid keyword stat object 252 | [Arguments] ${path} 253 | Object ${path} 254 | Integer ${path}.calls 255 | Integer ${path}.max_elapsed 256 | Integer ${path}.min_elapsed 257 | Number ${path}.avg_elapsed 258 | Integer ${path}.max_call_depth 259 | 260 | Valid status ratio data 261 | [Arguments] ${max_builds}=${None} 262 | Valid status ratio object $.total 263 | Run keyword if ${max_builds} is not ${None} Array $.per_build maxItems=${max_builds} 264 | Valid status ratio object $.per_build[*] 265 | 266 | Valid status ratio object 267 | [Arguments] ${path} 268 | Object ${path} 269 | Integer ${path}.total 270 | Integer ${path}.passed 271 | Integer ${path}.failed 272 | Number ${path}.pass_ratio 273 | Number ${path}.fail_ratio 274 | 275 | Valid build object 276 | [Arguments] ${path} 277 | Object ${path} 278 | Integer ${path}.build_number 279 | String ${path}.started_at 280 | Array ${path}.test_run_ids 281 | Integer ${path}.test_run_ids[*] 282 | 283 | -------------------------------------------------------------------------------- /tests/unit/test_configs.py: -------------------------------------------------------------------------------- 1 | 2 | import argparse 3 | import unittest 4 | from datetime import date 5 | from unittest import mock 6 | 7 | from test_archiver import configs 8 | 9 | 10 | # pylint: disable=protected-access 11 | 12 | 13 | class TestHelperFunctions(unittest.TestCase): 14 | 15 | def test_parse_key_value_pairs(self): 16 | self.assertEqual(configs.parse_key_value_pairs(None), {}) 17 | self.assertEqual(configs.parse_key_value_pairs([]), {}) 18 | value_list = ['FOO:BAR', 'bar:1:2'] 19 | expected = {'FOO': 'BAR', 'bar': '1:2'} 20 | self.assertEqual(configs.parse_key_value_pairs(value_list), expected) 21 | self.assertEqual(configs.parse_key_value_pairs({'a': 1, 'b': 2}), {'a': 1, 'b': 2}) 22 | 23 | 24 | FAKE_CONFIG_FILE_DATA = {'database': 'archive.db', 'user': 'worker_user', 'port': 1234, 25 | 'metadata': {'version': '1.2.3', 'environment': 'integration'}} 26 | 27 | FAKE_CHANGES_FILE_DATA_1 = { 28 | "context": "Integration", 29 | "changes": [ 30 | { 31 | "name": "/path/to/file.py", 32 | "repository": "RepoA", 33 | "item_type": "my_item_type", 34 | "subtype": "my_subtype" 35 | } 36 | ] 37 | } 38 | 39 | FAKE_CHANGES_FILE_DATA_2 = { 40 | "changes": [ 41 | { 42 | "name": "/path/to/file.py", 43 | "repository": "RepoA", 44 | "item_type": "my_item_type", 45 | "subtype": "my_sub_item_type" 46 | } 47 | ] 48 | } 49 | 50 | 51 | class TestConfig(unittest.TestCase): 52 | 53 | def test_resolve_option(self): 54 | # pylint: disable=protected-access 55 | config = configs.Config() 56 | config._cli_args = argparse.Namespace() 57 | self.assertEqual(config.resolve_option('foo_option'), None) 58 | 59 | config._cli_args = argparse.Namespace() 60 | self.assertEqual(config.resolve_option('foo_option', default='bar'), 'bar') 61 | 62 | config._cli_args = argparse.Namespace(foo_option='100') 63 | self.assertEqual(config.resolve_option('foo_option', default=10, cast_as=int), 100) 64 | 65 | config._cli_args = argparse.Namespace(foo_option=100) 66 | self.assertEqual(config.resolve_option('foo_option', default=10, cast_as=int), 100) 67 | 68 | config._cli_args = argparse.Namespace(foo_option='foo') 69 | with self.assertRaises(ValueError): 70 | config.resolve_option('foo_option', cast_as=int) 71 | 72 | config._cli_args = argparse.Namespace(foo_option='full') 73 | self.assertEqual(config.resolve_option('foo_option', cast_as=configs._log_message_length), 0) 74 | 75 | config._cli_args = argparse.Namespace(foo_option='bar') 76 | with self.assertRaises(ValueError): 77 | config.resolve_option('foo_option', cast_as=configs._log_message_length) 78 | 79 | def test_default_configs_are_resolved(self): 80 | config = configs.Config() 81 | config.resolve() 82 | self.assertEqual(config.database, 'test_archive') 83 | self.assertEqual(config.port, 5432) 84 | self.assertEqual(config.require_ssl, True) 85 | self.assertEqual(config.metadata, {}) 86 | 87 | def test_file_configs_are_resolved(self): 88 | config = configs.Config() 89 | config.resolve(file_config=FAKE_CONFIG_FILE_DATA) 90 | self.assertEqual(config.database, 'archive.db') 91 | self.assertEqual(config.port, 1234) 92 | self.assertEqual(config.metadata, {'version': '1.2.3', 'environment': 'integration'}) 93 | 94 | @mock.patch('test_archiver.configs.read_config_file', return_value=FAKE_CONFIG_FILE_DATA) 95 | def test_config_file_is_read(self, fake_read_config_file): 96 | config = configs.Config() 97 | config.resolve(file_config='foobar.json') 98 | fake_read_config_file.asser_called_once() 99 | 100 | def test_cli_configs_are_resolved(self): 101 | fake_cli_args = argparse.Namespace(user='cli_user', port=1234, metadata=['foo:bar']) 102 | config = configs.Config() 103 | config.resolve(cli_args=fake_cli_args) 104 | self.assertEqual(config.database, 'test_archive') 105 | self.assertEqual(config.user, 'cli_user') 106 | self.assertEqual(config.port, 1234) 107 | self.assertEqual(config.metadata, {'foo': 'bar'}) 108 | 109 | def test_cli_configs_have_higher_precedence_than_config_files(self): 110 | fake_cli_args = argparse.Namespace(user='cli_user', port=4321, 111 | metadata=['version:3.2.1', 'cli_data:foobar']) 112 | config = configs.Config() 113 | config.resolve(cli_args=fake_cli_args, file_config=FAKE_CONFIG_FILE_DATA) 114 | self.assertEqual(config.database, 'archive.db') 115 | self.assertEqual(config.user, 'cli_user') 116 | self.assertEqual(config.port, 4321) 117 | self.assertEqual(config.metadata, {'version': '3.2.1', 'cli_data': 'foobar', 118 | 'environment': 'integration'}) 119 | #'metadata': {'version': '1.2.3', 'environment': 'integration'} 120 | 121 | def test_allowing_major_schema_update_overrides_minor_updates(self): 122 | fake_cli_args = argparse.Namespace(allow_minor_schema_updates=True) 123 | config = configs.Config() 124 | config.resolve(cli_args=fake_cli_args) 125 | self.assertEqual(config.allow_minor_schema_updates, True) 126 | self.assertEqual(config.allow_major_schema_updates, False) 127 | 128 | fake_cli_args = argparse.Namespace(allow_major_schema_updates=True) 129 | config = configs.Config() 130 | config.resolve(cli_args=fake_cli_args) 131 | self.assertEqual(config.allow_minor_schema_updates, True) 132 | self.assertEqual(config.allow_major_schema_updates, True) 133 | 134 | fake_cli_args = argparse.Namespace(allow_major_schema_updates=True, allow_minor_schema_updates=False) 135 | config = configs.Config() 136 | config.resolve(cli_args=fake_cli_args) 137 | self.assertEqual(config.allow_minor_schema_updates, True) 138 | self.assertEqual(config.allow_major_schema_updates, True) 139 | 140 | def test_log_level_ignored(self): 141 | fake_cli_args = argparse.Namespace() 142 | config = configs.Config() 143 | config.resolve(cli_args=fake_cli_args) 144 | self.assertFalse(config.log_level_ignored('TRACE')) 145 | self.assertFalse(config.log_level_ignored('DEBUG')) 146 | self.assertFalse(config.log_level_ignored('INFO')) 147 | self.assertFalse(config.log_level_ignored('WARN')) 148 | self.assertFalse(config.log_level_ignored('ERROR')) 149 | self.assertFalse(config.log_level_ignored('FAIL')) 150 | self.assertFalse(config.log_level_ignored('OTHER_FOOBAR')) 151 | 152 | fake_cli_args = argparse.Namespace(ignore_logs_below='INFO') 153 | config = configs.Config() 154 | config.resolve(cli_args=fake_cli_args) 155 | self.assertTrue(config.log_level_ignored('TRACE')) 156 | self.assertTrue(config.log_level_ignored('DEBUG')) 157 | self.assertFalse(config.log_level_ignored('INFO')) 158 | self.assertFalse(config.log_level_ignored('WARN')) 159 | self.assertFalse(config.log_level_ignored('ERROR')) 160 | self.assertFalse(config.log_level_ignored('FAIL')) 161 | self.assertFalse(config.log_level_ignored('OTHER_FOOBAR')) 162 | 163 | fake_cli_args = argparse.Namespace(ignore_logs_below='WARN') 164 | config = configs.Config() 165 | config.resolve(cli_args=fake_cli_args) 166 | self.assertTrue(config.log_level_ignored('TRACE')) 167 | self.assertTrue(config.log_level_ignored('DEBUG')) 168 | self.assertTrue(config.log_level_ignored('INFO')) 169 | self.assertFalse(config.log_level_ignored('WARN')) 170 | self.assertFalse(config.log_level_ignored('ERROR')) 171 | self.assertFalse(config.log_level_ignored('FAIL')) 172 | self.assertFalse(config.log_level_ignored('OTHER_FOOBAR')) 173 | 174 | def test_max_log_message_length_is_handled_correctly(self): 175 | fake_cli_args = argparse.Namespace() 176 | config = configs.Config() 177 | config.resolve(cli_args=fake_cli_args) 178 | self.assertEqual(config.max_log_message_length, 2000) 179 | 180 | fake_cli_args = argparse.Namespace(max_log_message_length=None) 181 | config.resolve(cli_args=fake_cli_args) 182 | self.assertEqual(config.max_log_message_length, 2000) 183 | 184 | fake_cli_args = argparse.Namespace(max_log_message_length=0) 185 | config.resolve(cli_args=fake_cli_args) 186 | self.assertEqual(config.max_log_message_length, 0) 187 | 188 | fake_cli_args = argparse.Namespace(max_log_message_length='full') 189 | config.resolve(cli_args=fake_cli_args) 190 | self.assertEqual(config.max_log_message_length, 0) 191 | 192 | fake_cli_args = argparse.Namespace(max_log_message_length='bar') 193 | with self.assertRaises(ValueError): 194 | config.resolve(cli_args=fake_cli_args) 195 | 196 | fake_cli_args = argparse.Namespace(max_log_message_length=100) 197 | config.resolve(cli_args=fake_cli_args) 198 | self.assertEqual(config.max_log_message_length, 100) 199 | 200 | fake_cli_args = argparse.Namespace(max_log_message_length=-100) 201 | config.resolve(cli_args=fake_cli_args) 202 | self.assertEqual(config.max_log_message_length, -100) 203 | 204 | class TestExecutionContext(unittest.TestCase): 205 | 206 | def test_execution_context(self): 207 | fake_cli_args = argparse.Namespace(execution_context='PR') 208 | config = configs.Config() 209 | config.resolve(cli_args=fake_cli_args) 210 | self.assertEqual(config.execution_context, 'PR') 211 | 212 | fake_cli_args = argparse.Namespace() 213 | config = configs.Config() 214 | config.resolve(cli_args=fake_cli_args) 215 | self.assertEqual(config.execution_context, 'default') 216 | 217 | @mock.patch('test_archiver.configs.read_config_file', return_value=FAKE_CHANGES_FILE_DATA_1) 218 | def test_execution_context_and_changes(self, fake_changes_file_data): 219 | fake_cli_args = argparse.Namespace(execution_context='PR', changes='foobar.json') 220 | config = configs.Config() 221 | config.resolve(cli_args=fake_cli_args) 222 | self.assertEqual(config.execution_context, 'PR') 223 | 224 | fake_cli_args = argparse.Namespace(changes='foobar.json') 225 | config = configs.Config() 226 | config.resolve(cli_args=fake_cli_args) 227 | self.assertEqual(config.execution_context, 'Integration') 228 | 229 | @mock.patch('test_archiver.configs.read_config_file', return_value=FAKE_CHANGES_FILE_DATA_2) 230 | def test_execution_context_when_not_set_in_changes(self, fake_changes_file_data): 231 | fake_cli_args = argparse.Namespace(changes='foobar.json') 232 | config = configs.Config() 233 | config.resolve(cli_args=fake_cli_args) 234 | self.assertEqual(config.execution_context, 'default') 235 | 236 | @mock.patch('test_archiver.configs.read_config_file', return_value=FAKE_CHANGES_FILE_DATA_1) 237 | def test_changes(self, fake_changes_file_data): 238 | fake_cli_args = argparse.Namespace(changes='foobar.json') 239 | config = configs.Config() 240 | config.resolve(cli_args=fake_cli_args) 241 | changes = config.changes 242 | self.assertTrue(len(changes) == 1) 243 | self.assertEqual(changes[0]['name'], '/path/to/file.py') 244 | self.assertEqual(changes[0]['repository'], 'RepoA') 245 | self.assertEqual(changes[0]['item_type'], 'my_item_type') 246 | self.assertEqual(changes[0]['subtype'], 'my_subtype') 247 | 248 | @mock.patch('test_archiver.configs.read_config_file', return_value={}) 249 | def test_changes_when_no_changes_in_file(self, fake_changes_file_data): 250 | fake_cli_args = argparse.Namespace(changes='foobar.json') 251 | config = configs.Config() 252 | config.resolve(cli_args=fake_cli_args) 253 | self.assertEqual(config.changes, []) 254 | 255 | def test_changes_when_no_changes(self): 256 | fake_cli_args = argparse.Namespace() 257 | config = configs.Config() 258 | config.resolve(cli_args=fake_cli_args) 259 | self.assertEqual(config.changes, []) 260 | 261 | def test_execution_id(self): 262 | fake_cli_args = argparse.Namespace(execution_id='job_name_here') 263 | config = configs.Config() 264 | config.resolve(cli_args=fake_cli_args) 265 | assert config.execution_id == 'job_name_here', 'Execution-id should be correct' 266 | 267 | fake_cli_args = argparse.Namespace() 268 | config = configs.Config() 269 | config.resolve(cli_args=fake_cli_args) 270 | assert config.execution_id == 'Not set', 'Execution-id should be correct' 271 | 272 | 273 | class TestHelperFunctions(unittest.TestCase): 274 | 275 | def test_parse_date(self): 276 | self.assertEqual(configs._parse_date("2024-01-01"), date(2024,1,1)) 277 | with self.assertRaises(ValueError): 278 | configs._parse_date("foo") 279 | 280 | 281 | if __name__ == '__main__': 282 | unittest.main() 283 | -------------------------------------------------------------------------------- /tests/unit/test_archiver_module.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=W0212 2 | 3 | import unittest 4 | from unittest.mock import Mock 5 | 6 | from test_archiver import configs, archiver 7 | 8 | class TestTestItem(unittest.TestCase): 9 | 10 | def setUp(self): 11 | self.mock_db = Mock() 12 | self.config = configs.Config(file_config={}) 13 | self.archiver = archiver.Archiver(self.mock_db, self.config) 14 | self.item = archiver.TestItem(self.archiver) 15 | 16 | def test_parent_suite(self): 17 | self.assertEqual(self.item.parent_suite(), None) 18 | 19 | test_run = archiver.TestRun(self.archiver, 'unittests', 'never', 'unittests', None, None) 20 | self.archiver.stack.append(test_run) 21 | self.assertEqual(self.item.parent_suite(), None) 22 | 23 | suite1 = archiver.Suite(self.archiver, 'mock_suite1', 'mock_repo') 24 | self.archiver.stack.append(suite1) 25 | self.assertEqual(self.item.parent_suite(), suite1) 26 | 27 | suite2 = archiver.Suite(self.archiver, 'mock_suite2', 'mock_repo') 28 | self.archiver.stack.append(suite2) 29 | self.assertEqual(self.item.parent_suite(), suite2) 30 | 31 | test = archiver.Test(self.archiver, 'mock_test', None) 32 | self.archiver.stack.append(test) 33 | self.assertEqual(self.item.parent_suite(), suite2) 34 | 35 | def test_parent_test(self): 36 | self.assertEqual(self.item.parent_test(), None) 37 | 38 | test_run = archiver.TestRun(self.archiver, 'unittests', 'never', 'unittests', None, None) 39 | self.archiver.stack.append(test_run) 40 | self.assertEqual(self.item.parent_test(), None) 41 | 42 | suite1 = archiver.Suite(self.archiver, 'mock_suite1', 'mock_repo') 43 | self.archiver.stack.append(suite1) 44 | self.assertEqual(self.item.parent_test(), None) 45 | 46 | suite2 = archiver.Suite(self.archiver, 'mock_suite2', 'mock_repo') 47 | self.archiver.stack.append(suite2) 48 | self.assertEqual(self.item.parent_test(), None) 49 | 50 | test = archiver.Test(self.archiver, 'mock_test', None) 51 | self.archiver.stack.append(test) 52 | self.assertEqual(self.item.parent_test(), test) 53 | 54 | keyword = archiver.Keyword(self.archiver, 'mock_kw', None, None, None) 55 | self.archiver.stack.append(keyword) 56 | self.assertEqual(self.item.parent_test(), test) 57 | 58 | def test_parent_item(self): 59 | self.assertEqual(self.item._parent_item(), None) 60 | 61 | test_run = archiver.TestRun(self.archiver, 'unittests', 'never', 'unittests', None, None) 62 | self.archiver.stack.append(test_run) 63 | self.assertEqual(self.item._parent_item(), test_run) 64 | 65 | suite1 = archiver.Suite(self.archiver, 'mock_suite1', 'mock_repo') 66 | self.archiver.stack.append(suite1) 67 | self.assertEqual(self.item._parent_item(), suite1) 68 | 69 | suite2 = archiver.Suite(self.archiver, 'mock_suite2', 'mock_repo') 70 | self.archiver.stack.append(suite2) 71 | self.assertEqual(self.item._parent_item(), suite2) 72 | 73 | test = archiver.Test(self.archiver, 'mock_test', None) 74 | self.archiver.stack.append(test) 75 | self.assertEqual(self.item._parent_item(), test) 76 | 77 | keyword = archiver.Keyword(self.archiver, 'mock_kw', None, None, None) 78 | self.archiver.stack.append(keyword) 79 | self.assertEqual(self.item._parent_item(), keyword) 80 | 81 | def test_test_run_id(self): 82 | self.assertEqual(self.item.test_run_id(), None) 83 | 84 | self.mock_db.insert_and_return_id.return_value = 1234 85 | self.archiver.begin_test_run('unittests', 'never', 'unittests', None, None) 86 | self.assertEqual(self.item.test_run_id(), 1234) 87 | 88 | 89 | class SutFingerprintedItem(archiver.FingerprintedItem): 90 | def _execution_path_identifier(self): 91 | return 'sut' 92 | 93 | 94 | class TestFingerprintedItem(unittest.TestCase): 95 | 96 | def setUp(self): 97 | self.mock_db = Mock() 98 | self.config = configs.Config(file_config={}) 99 | self.archiver = archiver.Archiver(self.mock_db, self.config) 100 | self.item = SutFingerprintedItem(self.archiver, 'SUT item') 101 | 102 | def test_child_counter(self): 103 | self.assertEqual(self.item.child_counter('a'), 1) 104 | self.assertEqual(self.item.child_counter('a'), 2) 105 | self.assertEqual(self.item.child_counter('b'), 1) 106 | self.assertEqual(self.item.child_counter('b'), 2) 107 | self.assertEqual(self.item.child_counter('a'), 3) 108 | 109 | def test_execution_path(self): 110 | self.assertEqual(self.item.execution_path(), 'sut1') 111 | 112 | self.archiver.stack.append(self.item) 113 | item2 = SutFingerprintedItem(self.archiver, 'SUT item 2') 114 | self.archiver.stack.append(item2) 115 | self.assertEqual(item2.execution_path(), 'sut1-sut1') 116 | self.archiver.stack.pop(1) 117 | 118 | item3 = SutFingerprintedItem(self.archiver, 'SUT item 3') 119 | self.archiver.stack.append(item3) 120 | print(self.archiver.stack) 121 | self.assertEqual(item3.execution_path(), 'sut1-sut2') 122 | 123 | self.item._execution_path = 'path-to-foo' 124 | self.assertEqual(self.item.execution_path(), 'path-to-foo') 125 | 126 | 127 | class TestCase(unittest.TestCase): 128 | 129 | def setUp(self): 130 | self.mock_db = Mock() 131 | 132 | def test_keywords_are_not_archived(self): 133 | config = configs.Config() 134 | config.resolve(file_config={'archive_keywords': False}) 135 | sut_archiver = archiver.Archiver(self.mock_db, config) 136 | sut_archiver.begin_suite('Some suite of tests') 137 | test_case = sut_archiver.begin_test('Some test case') 138 | test_case.subtree_fingerprints = ['abcdef1234567890'] 139 | 140 | keyword = sut_archiver.begin_keyword('Fake kw', 'unittests', 'mock') 141 | keyword.subtree_fingerprints = ['abcdef1234567890'] 142 | keyword.insert_results() 143 | test_case.insert_results() 144 | self.mock_db.insert_or_ignore.assert_not_called() 145 | self.assertEqual(len(sut_archiver.keyword_statistics), 0) 146 | 147 | 148 | class TestKeyword(unittest.TestCase): 149 | 150 | def setUp(self): 151 | self.mock_db = Mock() 152 | 153 | def test_keyword_is_inserted_by_default(self): 154 | config = configs.Config() 155 | config.resolve() 156 | sut_archiver = archiver.Archiver(self.mock_db, config) 157 | sut_archiver.begin_suite('Some suite of tests') 158 | sut_archiver.begin_test('Some test case') 159 | 160 | keyword = sut_archiver.begin_keyword('Fake kw', 'unittests', 'mock') 161 | keyword.insert_results() 162 | self.mock_db.insert_or_ignore.assert_called_once() 163 | self.assertEqual(len(sut_archiver.keyword_statistics), 1) 164 | 165 | def test_keyword_statistics_are_not_collected(self): 166 | config = configs.Config() 167 | config.resolve(file_config={'archive_keyword_statistics': False}) 168 | sut_archiver = archiver.Archiver(self.mock_db, config) 169 | sut_archiver.begin_suite('Some suite of tests') 170 | sut_archiver.begin_test('Some test case') 171 | 172 | keyword = sut_archiver.begin_keyword('Fake kw', 'unittests', 'mock') 173 | keyword.insert_results() 174 | self.mock_db.insert_or_ignore.assert_called_once() 175 | self.assertEqual(len(sut_archiver.keyword_statistics), 0) 176 | 177 | def test_keywords_are_not_archived(self): 178 | config = configs.Config() 179 | config.resolve(file_config={'archive_keywords': False}) 180 | sut_archiver = archiver.Archiver(self.mock_db, config) 181 | sut_archiver.begin_suite('Some suite of tests') 182 | sut_archiver.begin_test('Some test case') 183 | 184 | keyword = sut_archiver.begin_keyword('Fake kw', 'unittests', 'mock') 185 | keyword.subtree_fingerprints = ['abcdef1234567890'] 186 | keyword.insert_results() 187 | self.mock_db.insert_or_ignore.assert_not_called() 188 | self.assertEqual(len(sut_archiver.keyword_statistics), 0) 189 | 190 | 191 | class TestLogMessage(unittest.TestCase): 192 | 193 | def setUp(self): 194 | self.mock_db = Mock() 195 | 196 | def test_insert_not_ignored_by_default(self): 197 | config = configs.Config() 198 | config.resolve() 199 | sut_archiver = archiver.Archiver(self.mock_db, config) 200 | sut_archiver.begin_suite('Some suite of tests') 201 | 202 | message = archiver.LogMessage(sut_archiver, 'WARN', 'some_timestamp') 203 | message.insert('Some log message') 204 | self.mock_db.insert.assert_called_once() 205 | message = archiver.LogMessage(sut_archiver, 'INFO', 'some_timestamp') 206 | message.insert('Some log message') 207 | self.assertEqual(self.mock_db.insert.call_count, 2) 208 | message = archiver.LogMessage(sut_archiver, 'TRACE', 'some_timestamp') 209 | message.insert('Some log message') 210 | self.assertEqual(self.mock_db.insert.call_count, 3) 211 | 212 | def test_insert_adheres_to_log_level_cut_off(self): 213 | config = configs.Config() 214 | config.resolve(file_config={'ignore_logs_below': 'WARN'}) 215 | sut_archiver = archiver.Archiver(self.mock_db, config) 216 | sut_archiver.begin_suite('Some suite of tests') 217 | 218 | message = archiver.LogMessage(sut_archiver, 'WARN', 'some_timestamp') 219 | message.insert('Some log message') 220 | self.mock_db.insert.assert_called_once() 221 | message = archiver.LogMessage(sut_archiver, 'INFO', 'some_timestamp') 222 | message.insert('Some log message') 223 | self.mock_db.insert.assert_called_once() 224 | message = archiver.LogMessage(sut_archiver, 'TRACE', 'some_timestamp') 225 | message.insert('Some log message') 226 | self.mock_db.insert.assert_called_once() 227 | 228 | def test_logs_not_inserted_when_logs_ignored(self): 229 | config = configs.Config() 230 | config.resolve(file_config={'ignore_logs': True}) 231 | sut_archiver = archiver.Archiver(self.mock_db, config) 232 | sut_archiver.begin_suite('Some suite of tests') 233 | 234 | message = archiver.LogMessage(sut_archiver, 'WARN', 'some_timestamp') 235 | message.insert('Some log message') 236 | self.mock_db.insert.assert_not_called() 237 | message = archiver.LogMessage(sut_archiver, 'INFO', 'some_timestamp') 238 | message.insert('Some log message') 239 | self.mock_db.insert.assert_not_called() 240 | message = archiver.LogMessage(sut_archiver, 'TRACE', 'some_timestamp') 241 | message.insert('Some log message') 242 | self.mock_db.insert.assert_not_called() 243 | message = archiver.LogMessage(sut_archiver, 'FOO', 'some_timestamp') 244 | message.insert('Some log message') 245 | self.mock_db.insert.assert_not_called() 246 | 247 | def test_max_log_message_length_is_used(self): 248 | config = configs.Config() 249 | config.resolve(file_config={'max_log_message_length': 10}) 250 | sut_archiver = archiver.Archiver(self.mock_db, config) 251 | sut_archiver.begin_suite('Some suite of tests') 252 | message = archiver.LogMessage(sut_archiver, 'WARN', 'some_timestamp') 253 | message.insert('Some log message') 254 | self.assertEqual(self.mock_db.insert.mock_calls[0].args[1]['message'], "Some log m") 255 | 256 | config.resolve(file_config={'max_log_message_length': -10}) 257 | sut_archiver = archiver.Archiver(self.mock_db, config) 258 | sut_archiver.begin_suite('Some suite of tests') 259 | message = archiver.LogMessage(sut_archiver, 'WARN', 'some_timestamp') 260 | message.insert('Some log message') 261 | self.assertEqual(self.mock_db.insert.mock_calls[1].args[1]['message'], "og message") 262 | 263 | config.resolve(file_config={'max_log_message_length': 'full'}) 264 | sut_archiver = archiver.Archiver(self.mock_db, config) 265 | sut_archiver.begin_suite('Some suite of tests') 266 | message = archiver.LogMessage(sut_archiver, 'WARN', 'some_timestamp') 267 | message.insert('Some log message') 268 | self.assertEqual(self.mock_db.insert.mock_calls[2].args[1]['message'], 'Some log message') 269 | 270 | config.resolve(file_config={'max_log_message_length': 0}) 271 | sut_archiver = archiver.Archiver(self.mock_db, config) 272 | sut_archiver.begin_suite('Some suite of tests') 273 | message = archiver.LogMessage(sut_archiver, 'WARN', 'some_timestamp') 274 | message.insert('Some log message') 275 | self.assertEqual(self.mock_db.insert.mock_calls[3].args[1]['message'], 'Some log message') 276 | 277 | 278 | class TestArchiverClass(unittest.TestCase): 279 | 280 | def setUp(self): 281 | self.mock_db = Mock() 282 | self.config = configs.Config(file_config={}) 283 | self.archiver = archiver.Archiver(self.mock_db, self.config) 284 | 285 | def test_suite_execution_paths_are_set_or_generated(self): 286 | suite1 = self.archiver.begin_suite('mock suite 1', execution_path='path-to-s1') 287 | self.assertEqual(suite1.execution_path(), 'path-to-s1') 288 | 289 | suite2 = self.archiver.begin_suite('mock suite 2') 290 | self.assertEqual(suite2.execution_path(), 'path-to-s1-s1') 291 | 292 | self.archiver.end_suite() 293 | suite3 = self.archiver.begin_suite('mock suite 3') 294 | self.assertEqual(suite3.execution_path(), 'path-to-s1-s2') 295 | 296 | def test_test_execution_paths_are_set(self): 297 | self.archiver.begin_test_run('unittests', 'never', 'unittests', None, None) 298 | suite1 = self.archiver.begin_suite('mock suite 1', execution_path='path-to-s1') 299 | self.assertEqual(suite1.execution_path(), 'path-to-s1') 300 | 301 | test1 = self.archiver.begin_test('mock test 1', execution_path='path-to-t3') 302 | self.assertEqual(test1.execution_path(), 'path-to-t3') 303 | self.archiver.end_test() 304 | 305 | suite2 = self.archiver.begin_suite('mock suite 2', execution_path='path-to-s11') 306 | self.assertEqual(suite2.execution_path(), 'path-to-s11') 307 | 308 | test2 = self.archiver.begin_test('mock test 2') 309 | self.assertEqual(test2.execution_path(), 'path-to-s11-t1') 310 | 311 | self.archiver.end_test() 312 | suite3 = self.archiver.begin_test('mock test 3') 313 | self.assertEqual(suite3.execution_path(), 'path-to-s11-t2') 314 | 315 | def test_keyword_and_log_message_execution_paths_are_generated(self): 316 | self.archiver.begin_test_run('unittests', 'never', 'unittests', None, None) 317 | suite1 = self.archiver.begin_suite('mock suite 1') 318 | self.assertEqual(suite1.execution_path(), 's1') 319 | 320 | keyword1 = self.archiver.begin_keyword('mock kw', 'unitests', 'setup') 321 | self.assertEqual(keyword1.execution_path(), 's1-k1') 322 | keyword2 = self.archiver.begin_keyword('mock kw', 'unitests', 'kw') 323 | self.assertEqual(keyword2.execution_path(), 's1-k1-k1') 324 | self.archiver.end_keyword() 325 | self.archiver.end_keyword() 326 | 327 | test1 = self.archiver.begin_test('mock test 1') 328 | self.assertEqual(test1.execution_path(), 's1-t1') 329 | 330 | keyword1 = self.archiver.begin_keyword('mock kw', 'unitests', 'setup') 331 | self.assertEqual(keyword1.execution_path(), 's1-t1-k1') 332 | keyword2 = self.archiver.begin_keyword('mock kw', 'unitests', 'kw') 333 | self.archiver.end_keyword() 334 | keyword3 = self.archiver.begin_keyword('mock kw', 'unitests', 'kw') 335 | self.assertEqual(keyword3.execution_path(), 's1-t1-k1-k2') 336 | self.assertEqual(keyword2.execution_path(), 's1-t1-k1-k1') 337 | 338 | def test_execution_context(self): 339 | self.assertEqual(self.archiver.execution_context, 'default') 340 | 341 | def test_changes(self): 342 | self.assertEqual(self.archiver.changes, []) 343 | 344 | def test_execution_id(self): 345 | assert self.archiver.execution_id == "Not set" 346 | 347 | 348 | if __name__ == '__main__': 349 | unittest.main() 350 | -------------------------------------------------------------------------------- /src/test_archiver/configs.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import sys 4 | from collections import defaultdict 5 | from datetime import datetime 6 | 7 | from . import version 8 | 9 | 10 | def read_config_file(file_name): 11 | with open(file_name, 'r', encoding='utf-8') as config_file: 12 | return json.load(config_file) 13 | 14 | 15 | def parse_key_value_pairs(values): 16 | if isinstance(values, dict): 17 | return values.copy() 18 | pairs = {} 19 | for item in values or []: 20 | try: 21 | name, value = item.split(':', 1) 22 | pairs[name] = value 23 | except Exception as ex: 24 | raise ValueError(f"Unsupported format for key-value pair: '{item}' use NAME:VALUE") from ex 25 | return pairs 26 | 27 | def _log_message_length(value): 28 | try: 29 | return int(value) 30 | except ValueError as error: 31 | if value == 'full': 32 | return 0 33 | raise error 34 | 35 | def _parse_date(date_string): 36 | return datetime.strptime(date_string, '%Y-%m-%d').date() 37 | 38 | 39 | LOG_LEVEL_MAP = defaultdict(lambda: 100) 40 | LOG_LEVEL_MAP[None] = 0 41 | LOG_LEVEL_MAP["TRACE"] = 1 42 | LOG_LEVEL_MAP["DEBUG"] = 10 43 | LOG_LEVEL_MAP["INFO"] = 20 44 | LOG_LEVEL_MAP["WARN"] = 30 45 | LOG_LEVEL_MAP["SKIP"] = 30 46 | LOG_LEVEL_MAP["ERROR"] = 40 47 | LOG_LEVEL_MAP["FAIL"] = 50 48 | 49 | LOG_LEVEL_CUT_OFF_OPTIONS = ('TRACE', 'DEBUG', 'INFO', 'WARN') 50 | 51 | 52 | class Singleton(type): 53 | _instance = {} 54 | 55 | def __call__(cls, *args, **kwargs): 56 | if cls not in cls._instance: 57 | cls._instance[cls] = super(Singleton, cls).__call__(*args, **kwargs) 58 | return cls._instance[cls] 59 | 60 | 61 | class Config(metaclass=Singleton): 62 | # pylint: disable=attribute-defined-outside-init 63 | 64 | def __init__(self): 65 | self._changes = 'changes' 66 | self._default = 'default' 67 | self._cli_args = None 68 | self._file_config = None 69 | self.resolve() 70 | 71 | def resolve(self, *, cli_args=None, file_config=None): 72 | self._cli_args = cli_args 73 | file_config = file_config or (getattr(cli_args, 'config_file', None) if cli_args else None) 74 | if isinstance(file_config, str): 75 | self._file_config = read_config_file(file_config) 76 | else: 77 | self._file_config = file_config or {} 78 | self._resolve_options() 79 | return self 80 | 81 | def _resolve_options(self): 82 | # Database connection 83 | self.database = self.resolve_option('database', default='test_archive') 84 | self.user = self.resolve_option('user') 85 | self.password = self.resolve_option('password') 86 | self.host = self.resolve_option('host') 87 | self.port = self.resolve_option('port', default=5432, cast_as=int) 88 | self.db_engine = self.resolve_option('db_engine', default='sqlite') 89 | self.require_ssl = self.resolve_option('require_ssl', default=True, cast_as=bool) 90 | 91 | # Test metadata 92 | self.team = self.resolve_option('team') 93 | self.repository = self.resolve_option('repository', default='default repo') 94 | self.series = self.resolve_list_option('series') 95 | self.metadata = self.resolve_map_option('metadata') 96 | 97 | # Schema updates 98 | self.allow_major_schema_updates = self.resolve_option('allow_major_schema_updates', 99 | default=False, cast_as=bool) 100 | self.allow_minor_schema_updates = self.resolve_option('allow_minor_schema_updates', 101 | default=False, cast_as=bool) 102 | # If major updates are allowed then minor ones are as well 103 | self.allow_minor_schema_updates = self.allow_major_schema_updates or self.allow_minor_schema_updates 104 | 105 | # Cleaning history 106 | self.keep_builds = self.resolve_option('keep_builds', default=0, cast_as=int) 107 | self.keep_months = self.resolve_option('keep_months', default=0, cast_as=int) 108 | self.keep_after = self.resolve_option('keep_after', default=None, cast_as=_parse_date) 109 | self.clean_team = self.resolve_option('clean_team', default=None) 110 | self.clean_logs = self.resolve_option('clean_logs', default=False, cast_as=bool) 111 | self.clean_logs_below = self.resolve_option('clean_logs_below', default=None) 112 | self.clean_keyword_stats = self.resolve_option('clean_keyword_stats', default=False, cast_as=bool) 113 | 114 | # Limit archived data 115 | self.archive_keywords = self.resolve_option('archive_keywords', default=True, cast_as=bool) 116 | self.archive_keyword_statistics = self.resolve_option('archive_keyword_statistics', default=True, 117 | cast_as=bool) 118 | self.ignore_logs = self.resolve_option('ignore_logs', default=False, cast_as=bool) 119 | self.ignore_logs_below = self.resolve_option('ignore_logs_below', default=None) 120 | self.max_log_message_length = self.resolve_option('max_log_message_length', 121 | cast_as=_log_message_length, default=2000) 122 | 123 | # Adjust timestamps 124 | self.time_adjust_secs = self.resolve_option('time_adjust_secs', default=0, cast_as=int) 125 | self.time_adjust_with_system_timezone = self.resolve_option('time_adjust_with_system_timezone', 126 | default=False, cast_as=bool) 127 | # ChangeEngine listener 128 | self.change_engine_url = self.resolve_option('change_engine_url') 129 | self.execution_context = self.resolve_execution_context() 130 | self.changes = self.resolve_changes() 131 | self.execution_id = self.resolve_option('execution_id', default='Not set') 132 | 133 | def resolve_option(self, name, default=None, cast_as=str): 134 | if self._cli_args and name in self._cli_args and getattr(self._cli_args, name) is not None: 135 | value = getattr(self._cli_args, name) 136 | else: 137 | value = self._file_config.get(name, default) 138 | if value is None: 139 | value = default 140 | try: 141 | return value if value is None else cast_as(value) 142 | except ValueError as value_error: 143 | print(f"Error: incompatible value for option '{name}'") 144 | raise value_error 145 | 146 | def resolve_execution_context(self): 147 | execution_context = self.resolve_option('execution_context') 148 | if execution_context is None: 149 | changes = self.resolve_option(self._changes) 150 | if changes is None: 151 | execution_context = self._default 152 | else: 153 | data = read_config_file(changes) 154 | execution_context = data.get('context', self._default) 155 | return execution_context 156 | 157 | def resolve_changes(self): 158 | changes_file = self.resolve_option(self._changes) 159 | if changes_file is None: 160 | return [] 161 | data = read_config_file(changes_file) 162 | return data.get(self._changes, []) 163 | 164 | def resolve_list_option(self, name): 165 | values = self._file_config.get(name, []) 166 | if self._cli_args and name in self._cli_args and getattr(self._cli_args, name) is not None: 167 | values.extend(getattr(self._cli_args, name)) 168 | return values 169 | 170 | def resolve_map_option(self, name): 171 | values = parse_key_value_pairs(self._file_config.get(name, [])) 172 | if self._cli_args and name in self._cli_args and getattr(self._cli_args, name) is not None: 173 | values.update(parse_key_value_pairs(getattr(self._cli_args, name))) 174 | return values 175 | 176 | def log_level_ignored(self, log_level): 177 | return LOG_LEVEL_MAP[log_level] < LOG_LEVEL_MAP[self.ignore_logs_below] 178 | 179 | 180 | def base_argument_parser(description): 181 | parser = argparse.ArgumentParser(description=description) 182 | parser.add_argument('--version', '-v', action='version', 183 | version=f'%(prog)s {version.ARCHIVER_VERSION}') 184 | parser.add_argument('--config', dest='config_file', 185 | help=('Path to JSON config file containing database credentials and other ' 186 | 'configurations. Options given on command line will override options ' 187 | 'set in a config file.')) 188 | 189 | group = parser.add_argument_group('Database connection') 190 | group.add_argument('--dbengine', dest='db_engine', 191 | help='Database engine, postgresql or sqlite (default)') 192 | group.add_argument('--database', help='database name') 193 | group.add_argument('--host', help='database host name', default=None) 194 | group.add_argument('--user', help='database user') 195 | group.add_argument('--pw', '--password', dest='password', help='database password') 196 | group.add_argument('--port', help='database port (default: 5432)') 197 | group.add_argument('--dont-require-ssl', dest='require_ssl', action='store_false', default=None, 198 | help='Disable the default behavior to require ssl from the target database.') 199 | 200 | group = parser.add_argument_group('Schema updates') 201 | group.add_argument('--allow-minor-schema-updates', action='store_true', default=None, 202 | help=('Allow TestArchiver to perform MINOR (backwards compatible) schema ' 203 | 'updates the test archive')) 204 | group.add_argument('--allow-major-schema-updates', action='store_true', default=None, 205 | help=('Allow TestArchiver to perform MAJOR (backwards incompatible) schema ' 206 | 'updates the test archive')) 207 | 208 | group = parser.add_argument_group( 209 | 'Clean history', 210 | description=('If any of the following options are used the archiver will delete the oldest ' 211 | 'result data. What data is kept can be controlled with these --keep-X options ' 212 | 'that can also be mixed. If deletion targets are not specified with --clean-X ' 213 | 'options all test run data is cleaned. --keep-X options should be used when ' 214 | 'using --clean-X options otherwise entire history is cleared. ' 215 | 'It is recomended to run the cleaning operations separately from result ' 216 | "archiving with python3 -m test_archiver.database")) 217 | group.add_argument('--keep-builds', default=None, 218 | help=('Delete old result data but keep the data for at least given ' 219 | 'number of builds in each series.')) 220 | group.add_argument('--keep-months', default=None, 221 | help=('Delete old result data but keep data that was archived more recently than ' 222 | 'given number of months ago')) 223 | group.add_argument('--keep-after', default=None, 224 | help=('Delete old result data but keep data that was archived more recently than ' 225 | 'given date in ISO 8601 format yyyy-mm-dd.')) 226 | group.add_argument('--clean-team', default=None, 227 | help=('Delete results only archived under given team name. ' 228 | 'Use this with --keep-X options. Otherwise will delete entire history')) 229 | group.add_argument('--clean-logs', action='store_true', default=None, 230 | help=('Delete oldest log messages but not test results or runs. ' 231 | 'Use this with --keep-X options. Otherwise will delete entire log history')) 232 | group.add_argument('--clean-logs-below', default=None, choices=LOG_LEVEL_CUT_OFF_OPTIONS, 233 | help=('Delete oldest log messages that are bellow given log level ' 234 | 'but not test results or runs. ' 235 | 'Use this with --keep-X options. Otherwise will delete entire log history')) 236 | group.add_argument('--clean-keyword-stats', action='store_true', default=None, 237 | help=('Delete oldest keyword statistics data but not test results or runs. ' 238 | 'Use this with --keep-X options. Otherwise will delete entire log history')) 239 | 240 | 241 | group = parser.add_argument_group('Limit archived data') 242 | group.add_argument('--no-keywords', dest='archive_keywords', action='store_false', 243 | default=None, help='Do not archive keyword data') 244 | group.add_argument('--no-keyword-stats', dest='archive_keyword_statistics', action='store_false', 245 | default=None, help='Do not archive keyword statistics') 246 | group.add_argument('--ignore-logs-below', default=None, choices=LOG_LEVEL_CUT_OFF_OPTIONS, 247 | help=('Sets a cut off level for archived log messages. ' 248 | 'By default archives all available log messages.')) 249 | group.add_argument('--ignore-logs', action='store_true', default=None, 250 | help='Do not archive any log messages') 251 | group.add_argument('--max_log_message_length', 252 | help="""Specify how many characters of the log message that is archived. 253 | full or 0: archives the complete log. 254 | positive integers: archives number of characters from the beginning. 255 | negative integers: archives number of characters from the end.""") 256 | 257 | group = parser.add_argument_group('Adjust timestamps') 258 | group.add_argument('--time-adjust-secs', dest='time_adjust_secs', 259 | help='Adjust time in timestamps by given seconds. This can be used to change time ' 260 | 'to utc before writing the results to database, especially if the test system ' 261 | 'uses local time, such as robot framework. ' 262 | 'For example if test were run in Finland (GMT+3) in summer (+1hr), calculate ' 263 | 'total hours by minutes and seconds and invert to adjust in correct direction,' 264 | ' i.e. -(3+1)*60*60, so --time-adjust-secs -14400. ' 265 | 'This option is useful if you are archiving in a different location to where ' 266 | 'tests are run.' 267 | 'If you are running tests and archiving in same timezone, ' 268 | 'time-adjust-with-system-timezone may be a better option. ' 269 | 'This option may be used in conjunction with ' 270 | '--time-adjust-with-system-timezone if desired.') 271 | group.add_argument('--time-adjust-with-system-timezone', dest='time_adjust_with_system_timezone', 272 | default=None, action='store_true', 273 | help='Adjust the time in timestamps by the system timezone (including daylight ' 274 | 'savings adjust). If you are archiving tests in the same timezone as you are ' 275 | 'running tests, setting this option will ensure time written to the database ' 276 | 'is in UTC/GMT time. This assumes that if multiple computers are used that ' 277 | 'their timezone and daylight savings settings are identical. ' 278 | 'Take care also that you do not run tests just before a daylight savings time ' 279 | 'adjust and archive just after, as times will be out by one hour. This could ' 280 | 'easily happen if long running tests cross a timezone adjust boundary. ' 281 | 'This option may be used in conjunction with --time-adjust-secs.') 282 | return parser 283 | 284 | 285 | def configuration(argument_parser): 286 | if sys.version_info[0] < 3: 287 | sys.exit('Unsupported Python version (' + str(sys.version_info.major) + '). Please use version 3.') 288 | 289 | args = argument_parser().parse_args() 290 | config = Config() 291 | config.resolve(cli_args=args) 292 | return config, args 293 | -------------------------------------------------------------------------------- /tests/unit/test_database.py: -------------------------------------------------------------------------------- 1 | 2 | # pylint: disable=W0212 3 | 4 | import os 5 | import shutil 6 | import unittest 7 | from unittest.mock import Mock 8 | 9 | from test_archiver import database, configs 10 | 11 | 12 | class TestSchemaCheckingAndUpdatesWithMockDatabase(unittest.TestCase): 13 | 14 | def setUp(self): 15 | class MockDatabase(database.BaseDatabase): 16 | def _db_engine_identifier(self): 17 | return 'mock' 18 | 19 | MockDatabase._connect = Mock() 20 | MockDatabase._latest_update_applied = Mock() 21 | MockDatabase._initialize_schema = Mock() 22 | MockDatabase._run_script = Mock() 23 | MockDatabase.fetch_one_value = Mock() 24 | self.mock_db_class = MockDatabase 25 | 26 | def test_check_and_update_schema_initializes_schema(self): 27 | config = configs.Config().resolve() 28 | mock_db = self.mock_db_class(config) 29 | mock_db._latest_update_applied.return_value = None 30 | mock_db._initialize_schema.return_value = True 31 | 32 | mock_db.check_and_update_schema() 33 | mock_db._initialize_schema.assert_called_once() 34 | mock_db._run_script.assert_not_called() 35 | 36 | def test_check_and_update_schema_runs_updates_on_v1_schema_when_allowed(self): 37 | config = configs.Config() 38 | config.resolve(file_config={'allow_major_schema_updates': True}) 39 | mock_db = self.mock_db_class(config) 40 | mock_db._latest_update_applied.return_value = None 41 | mock_db._initialize_schema.return_value = False 42 | 43 | mock_db.check_and_update_schema() 44 | mock_db._initialize_schema.assert_called_once() 45 | self.assertEqual(mock_db._run_script.call_count, len(database.SCHEMA_UPDATES)) 46 | 47 | def test_check_and_update_schema_runs_updates_on_v2_schema_when_allowed(self): 48 | config = configs.Config(file_config={'allow_major_schema_updates': True}) 49 | mock_db = self.mock_db_class(config) 50 | mock_db._latest_update_applied.return_value = 1 51 | mock_db._initialize_schema.return_value = False 52 | mock_db._schema_updates = ((1001, False, 'major update'),) 53 | 54 | mock_db.check_and_update_schema() 55 | mock_db._initialize_schema.assert_not_called() 56 | self.assertEqual(mock_db._run_script.call_count, 1) 57 | 58 | def test_check_and_update_schema_does_not_run_any_updates_without_permission(self): 59 | mock_db = self.mock_db_class(configs.Config()) 60 | mock_db._latest_update_applied.return_value = 0 61 | mock_db._initialize_schema.return_value = False 62 | 63 | mock_db._schema_updates = ((1001, False, 'major_update.sql'),) 64 | with self.assertRaises(database.ArchiverSchemaException): 65 | mock_db.check_and_update_schema() 66 | mock_db._run_script.assert_not_called() 67 | 68 | mock_db._schema_updates = ((1001, True, 'minor_update.sql'),) 69 | with self.assertRaises(database.ArchiverSchemaException): 70 | mock_db.check_and_update_schema() 71 | mock_db._run_script.assert_not_called() 72 | 73 | def test_check_and_update_schema_does_not_run_major_updates_without_permission(self): 74 | mock_db = self.mock_db_class(configs.Config(file_config={'allow_minor_schema_updates': True})) 75 | mock_db._latest_update_applied.return_value = 0 76 | mock_db._schema_updates = ((1001, False, 'major_update.sql'),) 77 | 78 | with self.assertRaises(database.ArchiverSchemaException): 79 | mock_db.check_and_update_schema() 80 | 81 | def test_check_and_update_schema_fails_when_schema_is_too_new(self): 82 | mock_db = self.mock_db_class(configs.Config(file_config={'allow_major_schema_updates': True})) 83 | mock_db._latest_update_applied.return_value = 10002 84 | mock_db.fetch_one_value.return_value = 'a.b.c' 85 | mock_db._schema_updates = ((1001, False, 'major_update.sql'),) 86 | 87 | with self.assertRaises(database.ArchiverSchemaException): 88 | mock_db.check_and_update_schema() 89 | mock_db._run_script.assert_not_called() 90 | 91 | 92 | class TestSqliteDatabaseTemplate(unittest.TestCase): 93 | 94 | @classmethod 95 | def setUpClass(cls): 96 | cls.dir_path = os.path.join(os.path.dirname(__file__), 'temp_sqlite_dbs') 97 | try: 98 | shutil.rmtree(cls.dir_path) 99 | except FileNotFoundError: 100 | pass 101 | os.mkdir(cls.dir_path) 102 | 103 | @classmethod 104 | def tearDownClass(cls): 105 | # Comment this if you need to inspect the databases 106 | shutil.rmtree(cls.dir_path) 107 | 108 | def setUp(self): 109 | # Create database file for each test case 110 | temp_db = '{}.{}.db'.format(self.__class__.__name__, self._testMethodName) 111 | full_path = os.path.join(self.__class__.dir_path, temp_db) 112 | config = configs.Config() 113 | config.resolve(file_config={'database': full_path}) 114 | self.database = database.SQLiteDatabase(config) 115 | self.assertTrue(self.database._initialize_schema()) 116 | 117 | def tearDown(self): 118 | self.database._connection.commit() 119 | 120 | class TestSqliteDatabaseMethods(TestSqliteDatabaseTemplate): 121 | 122 | def test_initialize_schema(self): 123 | inital_update = self.database.fetch_one_value('schema_updates', 'initial_update', {'id': 1}) 124 | self.assertTrue(inital_update) 125 | self.assertFalse(self.database._initialize_schema()) 126 | 127 | def test_check_and_update_schema_with_newer_schema_already_applied(self): 128 | # Insert newer schema version 129 | self.database.insert('schema_updates', {"schema_version": self.database.current_schema_version() + 1, 130 | "applied_by": 'future version'}) 131 | with self.assertRaises(database.ArchiverSchemaException): 132 | self.database.check_and_update_schema() 133 | 134 | def test_return_id_or_insert_and_return_id(self): 135 | data = {'name': 'First suite', 'full_name': 'First suite', 'repository': 'foo repo'} 136 | returned_id_1 = self.database.return_id_or_insert_and_return_id('suite', data, ['full_name']) 137 | returned_id_2 = self.database.return_id_or_insert_and_return_id('suite', data, ['full_name']) 138 | self.assertEqual(returned_id_1, returned_id_2) 139 | data = {'name': 'Second suite', 'full_name': 'Second suite', 'repository': 'foo repo'} 140 | returned_id_3 = self.database.return_id_or_insert_and_return_id('suite', data, ['full_name']) 141 | self.assertNotEqual(returned_id_1, returned_id_3) 142 | 143 | def test_insert_and_return_id(self): 144 | data = {'name': 'First suite', 'full_name': 'First suite', 'repository': 'foo repo'} 145 | returned_id_1 = self.database.insert_and_return_id('suite', data, ['full_name']) 146 | with self.assertRaises(database.IntegrityError): 147 | self.database.insert_and_return_id('suite', data, ['full_name']) 148 | data = {'name': 'Second suite', 'full_name': 'Second suite', 'repository': 'foo repo'} 149 | returned_id_2 = self.database.return_id_or_insert_and_return_id('suite', data, ['full_name']) 150 | self.assertNotEqual(returned_id_1, returned_id_2) 151 | 152 | def test_insert_or_ignore(self): 153 | data = {'fingerprint': '1234567890123456789012345678901234567890', 'status': 'PASS'} 154 | self.database.insert_or_ignore('keyword_tree', data, ['fingerprint']) 155 | self.database.insert_or_ignore('keyword_tree', data, ['fingerprint']) 156 | data = {'fingerprint': '0987654321098765432109876543210987654321', 'status': 'FAIL'} 157 | self.database.insert_or_ignore('keyword_tree', data, ['fingerprint']) 158 | row_count = self.database.fetch_one_value('keyword_tree', 'count(*)') 159 | self.assertEqual(row_count, 2) 160 | 161 | def test_update(self): 162 | data = {'fingerprint': '1234567890123456789012345678901234567890', 'status': 'PASS'} 163 | self.database.insert_or_ignore('keyword_tree', data, ['fingerprint']) 164 | self.database.update('keyword_tree', {'status': 'FAIL'}, 165 | {'fingerprint': '1234567890123456789012345678901234567890'}) 166 | row_count = self.database.fetch_one_value('keyword_tree', 'count(*)') 167 | self.assertEqual(row_count, 1) 168 | updated = self.database.fetch_one_value('keyword_tree', 'status', 169 | {'fingerprint': '1234567890123456789012345678901234567890'}) 170 | self.assertEqual(updated, 'FAIL') 171 | 172 | def test_insert(self): 173 | data = {'fingerprint': '1234567890123456789012345678901234567890', 'status': 'PASS'} 174 | self.database.insert('keyword_tree', data) 175 | with self.assertRaises(database.IntegrityError): 176 | self.database.insert('keyword_tree', data) 177 | data = {'fingerprint': '0987654321098765432109876543210987654321', 'status': 'FAIL'} 178 | self.database.insert('keyword_tree', data) 179 | row_count = self.database.fetch_one_value('keyword_tree', 'count(*)') 180 | self.assertEqual(row_count, 2) 181 | 182 | def test_applying_schema_updates(self): 183 | latest_update = self.database._latest_update_applied() 184 | self.assertTrue(latest_update < 10001) 185 | 186 | self.database._schema_updates = ((10001, True, 'testing/10001-minor_test_update1.sql'),) 187 | with self.assertRaises(database.ArchiverSchemaException): 188 | self.database.check_and_update_schema() 189 | self.assertTrue(latest_update < 10001) 190 | 191 | self.database.allow_minor_schema_updates = True 192 | self.database.check_and_update_schema() 193 | latest_update = self.database._latest_update_applied() 194 | self.assertEqual(latest_update, 10001) 195 | 196 | self.database._schema_updates = ((10001, True, 'testing/10001-minor_test_update1.sql'), 197 | (10002, True, 'testing/10002-minor_test_update2.sql')) 198 | self.database.check_and_update_schema() 199 | latest_update = self.database._latest_update_applied() 200 | self.assertEqual(latest_update, 10002) 201 | 202 | self.database._schema_updates = ((10001, True, 'testing/10001-minor_test_update1.sql'), 203 | (10002, True, 'testing/10002-minor_test_update2.sql'), 204 | (10003, False, 'testing/10003-major_test_update.sql')) 205 | with self.assertRaises(database.ArchiverSchemaException): 206 | self.database.check_and_update_schema() 207 | latest_update = self.database._latest_update_applied() 208 | self.assertEqual(latest_update, 10002) 209 | 210 | self.database.allow_major_schema_updates = True 211 | self.database.check_and_update_schema() 212 | latest_update = self.database._latest_update_applied() 213 | self.assertEqual(latest_update, 10003) 214 | 215 | def test_delete(self): 216 | data = {'fingerprint': '123456789012345678901234567890123456789A', 'status': 'PASS'} 217 | self.database.insert('keyword_tree', data) 218 | data = {'fingerprint': '123456789012345678901234567890123456789B', 'status': 'PASS'} 219 | self.database.insert('keyword_tree', data) 220 | row_count = self.database.fetch_one_value('keyword_tree', 'count(*)') 221 | self.assertEqual(row_count, 2) 222 | 223 | self.database.delete('keyword_tree', ('123456789012345678901234567890123456789A',), 224 | where_query="WHERE fingerprint=?") 225 | row_count = self.database.fetch_one_value('keyword_tree', 'count(*)') 226 | self.assertEqual(row_count, 1) 227 | 228 | self.database.delete('keyword_tree') 229 | row_count = self.database.fetch_one_value('keyword_tree', 'count(*)') 230 | self.assertEqual(row_count, 0) 231 | 232 | 233 | class TestSqliteDatabaseCleaning(TestSqliteDatabaseTemplate): 234 | 235 | def _generate_simple_archive(self): 236 | series_with_first_id = self.database.return_id_or_insert_and_return_id( 237 | 'test_series', {'name': 'Series with only first run', 'team': 'Team 1'}, ['team', 'name']) 238 | series_with_all_id = self.database.return_id_or_insert_and_return_id( 239 | 'test_series', {'name': 'Series with all test runs', 'team': 'Team 2'}, ['team', 'name']) 240 | suite_id = self.database.return_id_or_insert_and_return_id( 241 | 'suite', {'full_name': 'Mock suite', 'name': 'Mock suite', 'repository': 'mock repo'}, 242 | ['full_name']) 243 | test_id = self.database.return_id_or_insert_and_return_id( 244 | 'test_case', {'full_name': 'Mock suite', 'name': 'Mock suite', 'suite_id': suite_id}, 245 | ['full_name']) 246 | kw_fingerprint = '1234567890123456789012345678901234567890' 247 | self.database.insert_or_ignore('keyword_tree', {'fingerprint': kw_fingerprint}) 248 | 249 | # First run in both series 250 | test_run_id = self._generate_test_run(suite_id, test_id, kw_fingerprint) 251 | self.database.insert('test_series_mapping', {'series': series_with_first_id, 252 | 'test_run_id': test_run_id, 'build_number': '1'}) 253 | self.database.insert('test_series_mapping', {'series': series_with_all_id, 254 | 'test_run_id': test_run_id, 'build_number': '1'}) 255 | # Two runs in the second build of all runs series 256 | test_run_id = self._generate_test_run(suite_id, test_id, kw_fingerprint) 257 | self.database.insert('test_series_mapping', {'series': series_with_all_id, 258 | 'test_run_id': test_run_id, 'build_number': '2'}) 259 | test_run_id = self._generate_test_run(suite_id, test_id, kw_fingerprint) 260 | self.database.insert('test_series_mapping', {'series': series_with_all_id, 261 | 'test_run_id': test_run_id, 'build_number': '2'}) 262 | # One more in the third build of all runs series 263 | test_run_id = self._generate_test_run(suite_id, test_id, kw_fingerprint) 264 | self.database.insert('test_series_mapping', {'series': series_with_all_id, 265 | 'test_run_id': test_run_id, 'build_number': '3'}) 266 | 267 | def _generate_test_run(self, suite_id, test_id, kw_fingerprint): 268 | test_run_id = self.database.insert_and_return_id( 269 | 'test_run', {'archived_using': 'unittests', 270 | 'schema_version': self.database.current_schema_version()}) 271 | self.database.insert('suite_result', {'suite_id': suite_id, 'test_run_id': test_run_id}) 272 | self.database.insert('test_result', {'test_id': test_id, 'test_run_id': test_run_id}) 273 | self.database.insert('log_message', {'test_run_id': test_run_id, 'suite_id': suite_id, 274 | 'log_level': 'WARN', 'message': 'Warning'}) 275 | self.database.insert('log_message', {'test_run_id': test_run_id, 'suite_id': suite_id, 276 | 'log_level': 'DEBUG', 'message': 'Debug...'}) 277 | self.database.insert('suite_metadata', {'test_run_id': test_run_id, 'suite_id': suite_id, 278 | 'name': 'VERSION', 'value': '1.2.3'}) 279 | self.database.insert('test_tag', {'test_run_id': test_run_id, 'test_id': test_id, 'tag': 'mock tag'}) 280 | self.database.insert('keyword_statistics', 281 | {'test_run_id': test_run_id, 'fingerprint': kw_fingerprint}) 282 | return test_run_id 283 | 284 | def assert_number_of_rows(self, table, expected_rows): 285 | row_count = self.database.fetch_one_value(table, 'count(*)') 286 | self.assertEqual(row_count, expected_rows) 287 | 288 | def test_delete_history_no_cleaning(self): 289 | self.database._run_ids_to_clean_query = Mock() 290 | self.database.delete_history(None, None, None, None, None, None, None) 291 | self.database._run_ids_to_clean_query.assert_not_called() 292 | 293 | def test_delete_history_completely(self): 294 | self._generate_simple_archive() 295 | self.database.delete_history(None, None, None, '2222-01-01', None, None, None) 296 | self.assert_number_of_rows('test_run', 0) 297 | self.assert_number_of_rows('test_result', 0) 298 | self.assert_number_of_rows('log_message', 0) 299 | self.assert_number_of_rows('test_series_mapping', 0) 300 | 301 | def test_delete_history_keeping_last_builds(self): 302 | self._generate_simple_archive() 303 | self.database.delete_history(None, 3, None, None, None, None, None) 304 | self.assert_number_of_rows('test_run', 4) 305 | self.assert_number_of_rows('test_result', 4) 306 | self.assert_number_of_rows('log_message', 2*4) 307 | 308 | self.database.delete_history(None, 1, None, None, None, None, None) 309 | self.assert_number_of_rows('test_run', 2) 310 | self.assert_number_of_rows('test_result', 2) 311 | self.assert_number_of_rows('log_message', 2*2) 312 | 313 | def test_delete_history_logs_only(self): 314 | self._generate_simple_archive() 315 | self.database.delete_history(None, 1, None, None, True, None, None) 316 | self.assert_number_of_rows('test_run', 4) 317 | self.assert_number_of_rows('test_result', 4) 318 | self.assert_number_of_rows('log_message', 4) 319 | 320 | self.database.delete_history(None, None, None, None, True, None, None) 321 | self.assert_number_of_rows('test_run', 4) 322 | self.assert_number_of_rows('test_result', 4) 323 | self.assert_number_of_rows('log_message', 0) 324 | 325 | def test_delete_history_logs_only_below_warn(self): 326 | self._generate_simple_archive() 327 | self.database.delete_history(None, 1, None, None, None, 'WARN', None) 328 | self.assert_number_of_rows('test_run', 4) 329 | self.assert_number_of_rows('test_result', 4) 330 | self.assert_number_of_rows('log_message', 6) 331 | 332 | self.database.delete_history(None, None, None, None, None, 'WARN', None) 333 | self.assert_number_of_rows('test_run', 4) 334 | self.assert_number_of_rows('test_result', 4) 335 | self.assert_number_of_rows('log_message', 4) 336 | 337 | def test_delete_history_kw_stats_only(self): 338 | self._generate_simple_archive() 339 | self.database.delete_history(None, 1, None, None, None, None, True) 340 | self.assert_number_of_rows('test_run', 4) 341 | self.assert_number_of_rows('test_result', 4) 342 | self.assert_number_of_rows('keyword_statistics', 2) 343 | 344 | self.database.delete_history(None, None, None, None, None, None, True) 345 | self.assert_number_of_rows('test_run', 4) 346 | self.assert_number_of_rows('test_result', 4) 347 | self.assert_number_of_rows('keyword_statistics', 0) 348 | 349 | 350 | if __name__ == '__main__': 351 | unittest.main() 352 | --------------------------------------------------------------------------------