rework-output-and-test-logic (#3)
* fix flakey tests in authentik / wordpress * make it possible to rerun tests partially -> passed will be skipped, failed will be repeated * improve organization of all outputs (moving, renaming, keeping multiple versions etc.) * add html reports, replace .txt tracebacks * combine all html reports into one * add demo runner with comments for documentation purposes Reviewed-on: local-it-infrastructure/e2e_tests#3 Co-authored-by: Daniel <d.brummerloh@gmail.com> Co-committed-by: Daniel <d.brummerloh@gmail.com>
This commit is contained in:
parent
d2cd6ba47f
commit
8172f685de
24 changed files with 588 additions and 418 deletions
125
src/runner.py
125
src/runner.py
|
|
@ -3,23 +3,24 @@ from typing import Callable, Optional, TypedDict
|
|||
|
||||
import pytest
|
||||
from dotenv import dotenv_values
|
||||
from icecream import ic
|
||||
from loguru import logger
|
||||
|
||||
from src.dirmanager import DirManager
|
||||
|
||||
|
||||
class SubTest(TypedDict):
|
||||
condition: Callable[[Path], bool]
|
||||
condition: Callable[[dict[str, str]], bool]
|
||||
test_file: str
|
||||
|
||||
|
||||
class Runner:
|
||||
name: Optional[str] = None
|
||||
test_dir_name: Optional[str] = None
|
||||
name: str = ""
|
||||
test_dir_name: str = ""
|
||||
main_setup_name: Optional[str] = None
|
||||
main_test_name: Optional[str] = None
|
||||
dependencies: list[type["Runner"]] = []
|
||||
sub_tests: list[SubTest] = []
|
||||
dependencies: list[str] = []
|
||||
prevent_skip = False
|
||||
|
||||
def __init__(self, dotenv_path: Path, output_dir: Path, session_id: str):
|
||||
self.dotenv_path = dotenv_path
|
||||
|
|
@ -28,24 +29,54 @@ class Runner:
|
|||
self.session_id = session_id
|
||||
self.DIRS = DirManager(output_dir, session_id)
|
||||
|
||||
ic(f"creating instance of {self.__class__.__name__}")
|
||||
assert self.test_dir_name is not None
|
||||
logger.info(f"creating instance of {self.__class__.__name__}")
|
||||
assert self.test_dir_name
|
||||
self.root_dir = Path(__file__).parent
|
||||
|
||||
def _run_main_test(self):
|
||||
def _run_main_setup_and_test(self):
|
||||
if isinstance(self.main_setup_name, str):
|
||||
full_path = self.root_dir / self.test_dir_name / self.main_setup_name
|
||||
self._run_pytest(full_path)
|
||||
self._run_test_if_required(
|
||||
identifier_string=self.combine_names(self.name, self.main_setup_name),
|
||||
test_path=self.root_dir / self.test_dir_name / self.main_setup_name,
|
||||
)
|
||||
|
||||
if isinstance(self.main_test_name, str):
|
||||
full_path = self.root_dir / self.test_dir_name / self.main_test_name
|
||||
self._run_pytest(full_path)
|
||||
self._run_test_if_required(
|
||||
identifier_string=self.combine_names(self.name, self.main_test_name),
|
||||
test_path=self.root_dir / self.test_dir_name / self.main_test_name,
|
||||
)
|
||||
|
||||
def _run_pytest(self, full_test_path: Path):
|
||||
"""runs pytest programmatically
|
||||
def _run_test_if_required(self, identifier_string: str, test_path: Path):
|
||||
if not self.prevent_skip and self._test_already_passed(identifier_string, remove_existing=True):
|
||||
logger.info(f"skipping {identifier_string}")
|
||||
else:
|
||||
logger.info(f"running {identifier_string}")
|
||||
result = self._call_pytest(test_path)
|
||||
self._create_result_file(result=result, identifier_string=identifier_string)
|
||||
|
||||
will run all tests in the file at full_test_path with some command line arguments"""
|
||||
def _test_already_passed(self, identifier_string: str, remove_existing: bool = False) -> bool:
|
||||
"""returns True if the selected test (matching test_name + sub_test_name) already passed
|
||||
|
||||
ic(f"running test: {full_test_path}")
|
||||
This is determined by the presence of a specific output file in the RESULTS folder that
|
||||
matches identifier_string
|
||||
|
||||
remove_existing: If True, result files matching test_name + sub_test_name with a status
|
||||
other than 'passed' will be deleted"""
|
||||
|
||||
already_passed = False
|
||||
for result in self.DIRS.RESULTS.glob("*"):
|
||||
if identifier_string in result.name:
|
||||
# process any result file (passed / failed / skipped) if it exists
|
||||
if "passed" in result.name:
|
||||
already_passed = True
|
||||
elif remove_existing:
|
||||
result.unlink()
|
||||
return already_passed
|
||||
|
||||
def _call_pytest(self, full_test_path: Path) -> int:
|
||||
"""runs pytest programmatically on a specific file
|
||||
|
||||
all tests in the file [full_test_path] will be run along with command line arguments"""
|
||||
|
||||
command_arguments = []
|
||||
|
||||
|
|
@ -66,9 +97,8 @@ class Runner:
|
|||
# warning: https://github.com/microsoft/playwright-pytest/issues/111
|
||||
# --output only works with the given context and page fixture
|
||||
# folder needs to be unique! traces will not appear, if every pytest run has same output dir
|
||||
output = self.DIRS.RESULTS / full_test_path.stem
|
||||
command_arguments.append("--output")
|
||||
command_arguments.append(str(output))
|
||||
command_arguments.append(str(self.DIRS.RECORDS / "traces" / full_test_path.stem))
|
||||
|
||||
# tracing
|
||||
command_arguments.append("--tracing")
|
||||
|
|
@ -81,27 +111,56 @@ class Runner:
|
|||
# headed
|
||||
# command_arguments.append("--headed")
|
||||
|
||||
pytest.main(command_arguments)
|
||||
# html report. Will be combined into one file later.
|
||||
command_arguments.append(f"--html={self.DIRS.RECORDS / 'html' / full_test_path.with_suffix('.html').name}")
|
||||
|
||||
return pytest.main(command_arguments)
|
||||
|
||||
def run_tests(self):
|
||||
self._check_dependencies_finished()
|
||||
self._run_main_test()
|
||||
self._assert_dependencies_passed()
|
||||
self._run_main_setup_and_test()
|
||||
for sub_test in self.sub_tests:
|
||||
condition_function = sub_test["condition"]
|
||||
if condition_function(self.dotenv_path):
|
||||
test_name = sub_test["test_file"]
|
||||
full_test_path = self.root_dir / self.test_dir_name / test_name
|
||||
self._run_pytest(full_test_path)
|
||||
self._create_progress_file()
|
||||
sub_test_name = sub_test["test_file"]
|
||||
identifier_string = self.combine_names(self.name, sub_test_name)
|
||||
if condition_function(self.config):
|
||||
test_path = self.root_dir / self.test_dir_name / sub_test_name
|
||||
self._run_test_if_required(identifier_string=identifier_string, test_path=test_path)
|
||||
else:
|
||||
self._create_result_file(result=-1, identifier_string=identifier_string)
|
||||
|
||||
def _create_progress_file(self):
|
||||
"""create progress file to indicated finished test"""
|
||||
file_path = self.DIRS.PROGRESS / self.name
|
||||
def _create_result_file(
|
||||
self,
|
||||
result: int,
|
||||
identifier_string: str,
|
||||
):
|
||||
"""create result file to indicated passed/failed or skipped test"""
|
||||
|
||||
full_name = self.combine_names(self.result_int_to_str(result), identifier_string)
|
||||
file_path = self.DIRS.RESULTS / full_name
|
||||
with open(file_path, "w") as _:
|
||||
pass # create empty file
|
||||
|
||||
def _check_dependencies_finished(self):
|
||||
"""look for progress file of dependencies to confirm they have ran"""
|
||||
finished_tests = [result.name for result in self.DIRS.PROGRESS.glob("*")]
|
||||
@staticmethod
|
||||
def result_int_to_str(result_int: int) -> str:
|
||||
match result_int:
|
||||
case -1:
|
||||
return "skipped"
|
||||
case 0:
|
||||
return "passed"
|
||||
case _:
|
||||
return "failed"
|
||||
|
||||
@staticmethod
|
||||
def combine_names(*names: str) -> str:
|
||||
return "-".join(names)
|
||||
|
||||
def _assert_dependencies_passed(self):
|
||||
"""assert that all dependencie setups passed before"""
|
||||
|
||||
passed_tests = [r.name for r in self.DIRS.RESULTS.glob("*") if "passed" in r.name]
|
||||
for dependencie in self.dependencies:
|
||||
assert dependencie in finished_tests
|
||||
dependencie_identifier = self.combine_names(dependencie.name, dependencie.main_setup_name)
|
||||
assert any(
|
||||
dependencie_identifier in f for f in passed_tests
|
||||
), f"could not run {self.name} because {dependencie} did not run before"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue