e2e_tests/pytest_abra/runner.py
Daniel d1ff1183a5 refactoring (#13)
* general project refactoring

* various small improvements

* improve imap fixture with helper functions and typing

* add wordpress send email setup

* add wordpress receive email test

* add various documentation

Reviewed-on: local-it-infrastructure/e2e_tests#13
Co-authored-by: Daniel <d.brummerloh@gmail.com>
Co-committed-by: Daniel <d.brummerloh@gmail.com>
2023-12-08 18:17:31 +01:00

215 lines
7.9 KiB
Python

from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, Callable, NamedTuple
import pytest
from loguru import logger
if TYPE_CHECKING:
from pytest_abra.coordinator import Coordinator
from pytest_abra.env_manager import EnvFile
class ConditionArgs(NamedTuple):
env_config: dict[str, str]
runner_index: int
env_files: list["EnvFile"]
@dataclass
class Test:
test_file: str
condition: Callable[[ConditionArgs], bool] | None = None
prevent_skip: bool = False
class Runner:
env_type: str = ""
setups: list[Test] = []
tests: list[Test] = []
cleanups: list[Test] = []
dependencies: list[str] = []
def __init__(self, coordinator: "Coordinator", runner_index: int):
self.coordinator = coordinator
self.runner_index = runner_index
self.DIR = coordinator.DIR
self.ENV = coordinator.ENV
self.RUNNER_DICT = coordinator.RUNNER_DICT
logger.info(f"creating instance of {self.__class__.__name__}")
def run_setups(self):
"""runs the setup scripts if available"""
self._execute_test_list(self.setups)
def run_tests(self):
"""runs the test scripts if available"""
self._execute_test_list(self.tests)
def run_cleanups(self):
"""runs the cleanup scripts if available"""
self._execute_test_list(self.cleanups)
def _execute_test_list(self, test_list: list[Test]):
"""runs the main test script and if available and sub test scripts if their running condition is met"""
# check if required dependencies have passed
if not self._dependencies_passed():
logger.warning(f"skipping run_tests() of {self.env_type}, because some dependencies have not passed")
return
for test in test_list:
self._run_test_with_checks(test)
def _run_test_with_checks(self, test: Test):
# dependency passed: true / false
# already_passed: true / false
# prevent_skip: true / false
# condition_available: true / pass
# condition_met: true / false
identifier_string = self.combine_names(self.env_type, test.test_file)
results = list(self.DIR.RECIPES.rglob(test.test_file))
assert len(results) == 1, f"{test.test_file} should exist exactly 1 time, but found {len(results)} times"
full_test_path = results[0]
# check if test aleady passed
if self._is_test_passed(identifier_string, remove_existing=True):
if test.prevent_skip:
logger.info(f"continuing , test {identifier_string} has passed but prevent_skip=True")
else:
logger.info(f"skipping {identifier_string}, test has passed")
return
if test.condition:
condition_result = self._run_condition(test.condition)
if not condition_result:
# test condition is defined but not met
logger.info(f"skipping {identifier_string}, test condition is not met")
return
# test condition is undefined or not met
logger.info(f"running {identifier_string}")
result = self._call_pytest(full_test_path)
self._create_result_file(result=result, identifier_string=identifier_string)
def _run_condition(self, condition_function: Callable[[ConditionArgs], bool]):
"""run the test condition function with multiple arguments"""
# more arguments can be added later without changing the function signature
conditon_args = ConditionArgs(
env_files=self.ENV.env_files,
runner_index=self.runner_index,
env_config=self.ENV.env_files[self.runner_index].env_config,
)
return condition_function(conditon_args)
def _is_test_passed(self, identifier_string: str, remove_existing: bool = False) -> bool:
"""returns True if the selected test matching identifier_string already passed
This is determined by the presence of a specific output file in the RESULTS folder that
matches identifier_string
remove_existing: If True, result files matching identifier_string with a status
other than 'passed' will be deleted"""
already_passed = False
for result in self.DIR.RESULTS.glob("*"):
if identifier_string in result.name:
# process any result file (passed / failed / skipped) if it exists
if "passed" in result.name:
already_passed = True
elif remove_existing:
result.unlink()
return already_passed
def _call_pytest(self, full_test_path: Path) -> int:
"""runs pytest programmatically on a specific file
all tests in the file [full_test_path] will be run along with command line arguments"""
command_arguments = []
# command_arguments.append("--traceconfig")
command_arguments.append("-v")
# command_arguments.append("-rx")
command_arguments.append(str(full_test_path))
command_arguments.append("--runner_index")
command_arguments.append(str(self.runner_index))
# set root dir for tests output (used in DirManager). this is our custom argument
command_arguments.append("--output_dir")
command_arguments.append(str(self.DIR.OUTPUT_DIR))
command_arguments.append("--session_id")
command_arguments.append(self.DIR.session_id)
command_arguments.append("--timeout")
command_arguments.append(str(self.coordinator.TIMEOUT))
# artifacts dir from pytest
# warning: https://github.com/microsoft/playwright-pytest/issues/111
# --output only works with the given context and page fixture
# folder needs to be unique! traces will not appear, if every pytest run has same output dir
command_arguments.append("--output")
command_arguments.append(str(self.DIR.RECORDS / "traces" / full_test_path.stem))
# tracing
command_arguments.append("--tracing")
command_arguments.append("retain-on-failure")
# command_arguments.append("on")
# Disable capturing. With -s set, prints will go to console as if pytest is not there.
# command_arguments.append("-s")
# headed
# command_arguments.append("--headed")
# html report. Will be combined into one file later.
command_arguments.append(f"--html={self.DIR.RECORDS / 'html' / full_test_path.with_suffix('.html').name}")
return pytest.main(command_arguments)
def _create_result_file(
self,
result: int,
identifier_string: str,
):
"""create result file to indicated passed/failed or skipped test"""
full_name = self.combine_names(self.result_int_to_str(result), identifier_string)
file_path = self.DIR.RESULTS / full_name
with open(file_path, "w") as _:
pass # create empty file
def _dependencies_passed(self):
"""returns true if all setups of each dependency have passed"""
# todo: what about conditional setups?
passed_tests = [r.name for r in self.DIR.RESULTS.glob("*") if "passed" in r.name]
results = []
for dependency in self.dependencies:
dependency_runner = self.coordinator.RUNNER_DICT[dependency]
for setup_name in dependency_runner.setups:
dependencie_identifier = self.combine_names(dependency_runner.env_type, setup_name.test_file)
results.append(any(dependencie_identifier in f for f in passed_tests))
return all(results)
@staticmethod
def result_int_to_str(result_int: int) -> str:
"""converts the pytest exit code (int) into a meaningful string"""
match result_int:
case -1:
return "skipped"
case 0:
return "passed"
case _:
return "failed"
@staticmethod
def combine_names(*names: str) -> str:
return "-".join(names)