add TestResult, all test executions return TestResult
This commit is contained in:
parent
616fe8a491
commit
1e66766973
3 changed files with 41 additions and 23 deletions
|
|
@ -10,6 +10,7 @@ from pytest_abra.dir_manager import DirManager
|
||||||
from pytest_abra.env_manager import EnvFile, EnvManager
|
from pytest_abra.env_manager import EnvFile, EnvManager
|
||||||
from pytest_abra.html_helper import merge_html_reports
|
from pytest_abra.html_helper import merge_html_reports
|
||||||
from pytest_abra.runner import Runner
|
from pytest_abra.runner import Runner
|
||||||
|
from pytest_abra.shared_types import TestResult
|
||||||
from pytest_abra.utils import generate_random_string, load_json_to_environ, rmtree
|
from pytest_abra.utils import generate_random_string, load_json_to_environ, rmtree
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -42,12 +43,13 @@ class Coordinator:
|
||||||
def run_tests(self) -> None:
|
def run_tests(self) -> None:
|
||||||
logger.info("calling run_tests()")
|
logger.info("calling run_tests()")
|
||||||
self.runners: list[Runner] = self._load_runners(self.ENV.env_files)
|
self.runners: list[Runner] = self._load_runners(self.ENV.env_files)
|
||||||
|
status_list: list[TestResult] = []
|
||||||
for runner in self.runners:
|
for runner in self.runners:
|
||||||
runner.run_setups()
|
status_list.extend(runner.run_setups())
|
||||||
for runner in self.runners:
|
for runner in self.runners:
|
||||||
runner.run_tests()
|
status_list.extend(runner.run_tests())
|
||||||
for runner in self.runners:
|
for runner in self.runners:
|
||||||
runner.run_cleanups()
|
status_list.extend(runner.run_cleanups())
|
||||||
logger.info("run_tests() finished")
|
logger.info("run_tests() finished")
|
||||||
|
|
||||||
def _load_runners(self, env_files: list[EnvFile]) -> list[Runner]:
|
def _load_runners(self, env_files: list[EnvFile]) -> list[Runner]:
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,8 @@ from typing import TYPE_CHECKING, Callable, NamedTuple
|
||||||
import pytest
|
import pytest
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
|
|
||||||
|
from pytest_abra.shared_types import STATUS, TestResult
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from pytest_abra import Coordinator, DirManager, EnvFile
|
from pytest_abra import Coordinator, DirManager, EnvFile
|
||||||
|
|
||||||
|
|
@ -41,29 +43,28 @@ class Runner:
|
||||||
|
|
||||||
logger.info(f"creating instance of {self.__class__.__name__}")
|
logger.info(f"creating instance of {self.__class__.__name__}")
|
||||||
|
|
||||||
def run_setups(self):
|
def run_setups(self) -> list[TestResult]:
|
||||||
"""runs the setup scripts if available"""
|
"""runs the setup scripts if available"""
|
||||||
self._execute_tests_list(self.setups)
|
return self._execute_tests_list(self.setups)
|
||||||
|
|
||||||
def run_tests(self):
|
def run_tests(self) -> list[TestResult]:
|
||||||
"""runs the test scripts if available"""
|
"""runs the test scripts if available"""
|
||||||
self._execute_tests_list(self.tests)
|
return self._execute_tests_list(self.tests)
|
||||||
|
|
||||||
def run_cleanups(self):
|
def run_cleanups(self) -> list[TestResult]:
|
||||||
"""runs the cleanup scripts if available"""
|
"""runs the cleanup scripts if available"""
|
||||||
self._execute_tests_list(self.cleanups)
|
return self._execute_tests_list(self.cleanups)
|
||||||
|
|
||||||
def _execute_tests_list(self, test_list: list[Test]):
|
def _execute_tests_list(self, test_list: list[Test]) -> list[TestResult]:
|
||||||
"""Runs all tests given in the list. If condition is defined, it is also checked."""
|
"""Runs all tests given in the list. If condition is defined, it is also checked."""
|
||||||
# check if required dependencies have passed
|
# check if required dependencies have passed
|
||||||
if not self._dependencies_passed():
|
if not self._dependencies_passed():
|
||||||
logger.warning(f"skipping run_tests() of {self.env_type} (one or more dependencies have not passed)")
|
logger.warning(f"skipping run_tests() of {self.env_type} (one or more dependencies have not passed)")
|
||||||
return
|
return [TestResult("skipped_dep", test.test_file) for test in test_list]
|
||||||
|
|
||||||
for test in test_list:
|
return [self._run_test_with_checks(test) for test in test_list]
|
||||||
self._run_test_with_checks(test)
|
|
||||||
|
|
||||||
def _run_test_with_checks(self, test: Test):
|
def _run_test_with_checks(self, test: Test) -> TestResult:
|
||||||
identifier_string = self.combine_names(self.env_type, test.test_file)
|
identifier_string = self.combine_names(self.env_type, test.test_file)
|
||||||
|
|
||||||
test_files = list(self._tests_path.rglob(test.test_file))
|
test_files = list(self._tests_path.rglob(test.test_file))
|
||||||
|
|
@ -76,20 +77,22 @@ class Runner:
|
||||||
logger.info(f"continuing {identifier_string} (passed before but prevent_skip=True)")
|
logger.info(f"continuing {identifier_string} (passed before but prevent_skip=True)")
|
||||||
else:
|
else:
|
||||||
logger.info(f"skipping {identifier_string} (test has passed)")
|
logger.info(f"skipping {identifier_string} (test has passed)")
|
||||||
return
|
return TestResult("skipped_pas", test.test_file)
|
||||||
|
|
||||||
if test.condition:
|
if test.condition:
|
||||||
condition_result = self._call_condition_function(test.condition)
|
condition_result = self._call_condition_function(test.condition)
|
||||||
if not condition_result:
|
if not condition_result:
|
||||||
# test condition is defined but not met
|
# test condition is defined but not met
|
||||||
logger.info(f"skipping {identifier_string} (test condition is not met)")
|
logger.info(f"skipping {identifier_string} (test condition is not met)")
|
||||||
self._create_status_file(self.DIR, status="skipped", identifier_string=identifier_string)
|
self._create_status_file(self.DIR, status="skipped_con", identifier_string=identifier_string)
|
||||||
return
|
return TestResult("skipped_con", test.test_file)
|
||||||
|
|
||||||
# test condition is undefined or not met
|
# test condition is undefined or not met
|
||||||
logger.info(f"running {identifier_string}")
|
logger.info(f"running {identifier_string}")
|
||||||
exit_code = self._call_pytest(full_test_path)
|
exit_code = self._call_pytest(full_test_path)
|
||||||
self._create_status_file(self.DIR, status=exit_code, identifier_string=identifier_string)
|
status = self.exit_code_to_str(exit_code)
|
||||||
|
self._create_status_file(self.DIR, status=status, identifier_string=identifier_string)
|
||||||
|
return TestResult(status, test.test_file)
|
||||||
|
|
||||||
def _call_condition_function(self, condition_function: Callable[[ConditionArgs], bool]):
|
def _call_condition_function(self, condition_function: Callable[[ConditionArgs], bool]):
|
||||||
"""run the test condition function with multiple arguments"""
|
"""run the test condition function with multiple arguments"""
|
||||||
|
|
@ -105,14 +108,11 @@ class Runner:
|
||||||
def _create_status_file(
|
def _create_status_file(
|
||||||
cls,
|
cls,
|
||||||
DIR: "DirManager",
|
DIR: "DirManager",
|
||||||
status: int | str,
|
status: STATUS,
|
||||||
identifier_string: str,
|
identifier_string: str,
|
||||||
):
|
):
|
||||||
"""create result file to indicated passed/failed/skipped test"""
|
"""create result file to indicated passed/failed/skipped test"""
|
||||||
|
|
||||||
if isinstance(status, int):
|
|
||||||
status = cls.exit_code_to_str(status)
|
|
||||||
|
|
||||||
# remove matching files
|
# remove matching files
|
||||||
for status_file in cls._get_status_files(DIR, identifier_string):
|
for status_file in cls._get_status_files(DIR, identifier_string):
|
||||||
status_file.unlink()
|
status_file.unlink()
|
||||||
|
|
@ -203,7 +203,7 @@ class Runner:
|
||||||
return all(results)
|
return all(results)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def exit_code_to_str(result_int: int) -> str:
|
def exit_code_to_str(result_int: int) -> STATUS:
|
||||||
"""converts the pytest exit code (int) into a meaningful string"""
|
"""converts the pytest exit code (int) into a meaningful string"""
|
||||||
match result_int:
|
match result_int:
|
||||||
case 0:
|
case 0:
|
||||||
|
|
|
||||||
16
pytest_abra/shared_types.py
Normal file
16
pytest_abra/shared_types.py
Normal file
|
|
@ -0,0 +1,16 @@
|
||||||
|
from typing import Literal, NamedTuple
|
||||||
|
|
||||||
|
"""
|
||||||
|
passed: test passed
|
||||||
|
failed: test failed
|
||||||
|
skipped_con: test skipped because condition was not met
|
||||||
|
skipped_dep: test skipped because dependencies did not finish
|
||||||
|
skipped_pas: test skipped because it passed before
|
||||||
|
"""
|
||||||
|
|
||||||
|
STATUS = Literal["passed", "failed", "skipped_con", "skipped_dep", "skipped_pas"]
|
||||||
|
|
||||||
|
|
||||||
|
class TestResult(NamedTuple):
|
||||||
|
status: STATUS
|
||||||
|
test_name: str
|
||||||
Loading…
Add table
Add a link
Reference in a new issue