From 1e6676697363731826660902b3e784c44d7b61d7 Mon Sep 17 00:00:00 2001 From: Daniel Date: Mon, 11 Dec 2023 14:29:18 +0100 Subject: [PATCH] add TestResult, all test executions return TestResult --- pytest_abra/coordinator.py | 8 +++++--- pytest_abra/runner.py | 40 ++++++++++++++++++------------------- pytest_abra/shared_types.py | 16 +++++++++++++++ 3 files changed, 41 insertions(+), 23 deletions(-) create mode 100644 pytest_abra/shared_types.py diff --git a/pytest_abra/coordinator.py b/pytest_abra/coordinator.py index a8dc94f..5149787 100644 --- a/pytest_abra/coordinator.py +++ b/pytest_abra/coordinator.py @@ -10,6 +10,7 @@ from pytest_abra.dir_manager import DirManager from pytest_abra.env_manager import EnvFile, EnvManager from pytest_abra.html_helper import merge_html_reports from pytest_abra.runner import Runner +from pytest_abra.shared_types import TestResult from pytest_abra.utils import generate_random_string, load_json_to_environ, rmtree @@ -42,12 +43,13 @@ class Coordinator: def run_tests(self) -> None: logger.info("calling run_tests()") self.runners: list[Runner] = self._load_runners(self.ENV.env_files) + status_list: list[TestResult] = [] for runner in self.runners: - runner.run_setups() + status_list.extend(runner.run_setups()) for runner in self.runners: - runner.run_tests() + status_list.extend(runner.run_tests()) for runner in self.runners: - runner.run_cleanups() + status_list.extend(runner.run_cleanups()) logger.info("run_tests() finished") def _load_runners(self, env_files: list[EnvFile]) -> list[Runner]: diff --git a/pytest_abra/runner.py b/pytest_abra/runner.py index 1951cce..894283c 100644 --- a/pytest_abra/runner.py +++ b/pytest_abra/runner.py @@ -6,6 +6,8 @@ from typing import TYPE_CHECKING, Callable, NamedTuple import pytest from loguru import logger +from pytest_abra.shared_types import STATUS, TestResult + if TYPE_CHECKING: from pytest_abra import Coordinator, DirManager, EnvFile @@ -41,29 +43,28 @@ class Runner: logger.info(f"creating instance of {self.__class__.__name__}") - def run_setups(self): + def run_setups(self) -> list[TestResult]: """runs the setup scripts if available""" - self._execute_tests_list(self.setups) + return self._execute_tests_list(self.setups) - def run_tests(self): + def run_tests(self) -> list[TestResult]: """runs the test scripts if available""" - self._execute_tests_list(self.tests) + return self._execute_tests_list(self.tests) - def run_cleanups(self): + def run_cleanups(self) -> list[TestResult]: """runs the cleanup scripts if available""" - self._execute_tests_list(self.cleanups) + return self._execute_tests_list(self.cleanups) - def _execute_tests_list(self, test_list: list[Test]): + def _execute_tests_list(self, test_list: list[Test]) -> list[TestResult]: """Runs all tests given in the list. If condition is defined, it is also checked.""" # check if required dependencies have passed if not self._dependencies_passed(): logger.warning(f"skipping run_tests() of {self.env_type} (one or more dependencies have not passed)") - return + return [TestResult("skipped_dep", test.test_file) for test in test_list] - for test in test_list: - self._run_test_with_checks(test) + return [self._run_test_with_checks(test) for test in test_list] - def _run_test_with_checks(self, test: Test): + def _run_test_with_checks(self, test: Test) -> TestResult: identifier_string = self.combine_names(self.env_type, test.test_file) test_files = list(self._tests_path.rglob(test.test_file)) @@ -76,20 +77,22 @@ class Runner: logger.info(f"continuing {identifier_string} (passed before but prevent_skip=True)") else: logger.info(f"skipping {identifier_string} (test has passed)") - return + return TestResult("skipped_pas", test.test_file) if test.condition: condition_result = self._call_condition_function(test.condition) if not condition_result: # test condition is defined but not met logger.info(f"skipping {identifier_string} (test condition is not met)") - self._create_status_file(self.DIR, status="skipped", identifier_string=identifier_string) - return + self._create_status_file(self.DIR, status="skipped_con", identifier_string=identifier_string) + return TestResult("skipped_con", test.test_file) # test condition is undefined or not met logger.info(f"running {identifier_string}") exit_code = self._call_pytest(full_test_path) - self._create_status_file(self.DIR, status=exit_code, identifier_string=identifier_string) + status = self.exit_code_to_str(exit_code) + self._create_status_file(self.DIR, status=status, identifier_string=identifier_string) + return TestResult(status, test.test_file) def _call_condition_function(self, condition_function: Callable[[ConditionArgs], bool]): """run the test condition function with multiple arguments""" @@ -105,14 +108,11 @@ class Runner: def _create_status_file( cls, DIR: "DirManager", - status: int | str, + status: STATUS, identifier_string: str, ): """create result file to indicated passed/failed/skipped test""" - if isinstance(status, int): - status = cls.exit_code_to_str(status) - # remove matching files for status_file in cls._get_status_files(DIR, identifier_string): status_file.unlink() @@ -203,7 +203,7 @@ class Runner: return all(results) @staticmethod - def exit_code_to_str(result_int: int) -> str: + def exit_code_to_str(result_int: int) -> STATUS: """converts the pytest exit code (int) into a meaningful string""" match result_int: case 0: diff --git a/pytest_abra/shared_types.py b/pytest_abra/shared_types.py new file mode 100644 index 0000000..ff0e147 --- /dev/null +++ b/pytest_abra/shared_types.py @@ -0,0 +1,16 @@ +from typing import Literal, NamedTuple + +""" +passed: test passed +failed: test failed +skipped_con: test skipped because condition was not met +skipped_dep: test skipped because dependencies did not finish +skipped_pas: test skipped because it passed before +""" + +STATUS = Literal["passed", "failed", "skipped_con", "skipped_dep", "skipped_pas"] + + +class TestResult(NamedTuple): + status: STATUS + test_name: str