e2e_tests/src/runner.py
Daniel 2e33f8f014 make-all-env-files-available (#4)
Before, a test had only access to it's own env file / configuration (wordpress could see wordpress env file). Now, all env files are available. Wordpress test can also read authentik env file, for example to get the authentik domain.

Reviewed-on: local-it-infrastructure/e2e_tests#4
Co-authored-by: Daniel <d.brummerloh@gmail.com>
Co-committed-by: Daniel <d.brummerloh@gmail.com>
2023-11-30 10:53:20 +01:00

169 lines
6.5 KiB
Python

from pathlib import Path
from typing import Callable, Optional, TypedDict
import pytest
from dotenv import dotenv_values
from loguru import logger
from src.dirmanager import DirManager
class SubTest(TypedDict):
condition: Callable[[dict[str, str]], bool]
test_file: str
class Runner:
name: str = ""
test_dir_name: str = ""
main_setup_name: Optional[str] = None
main_test_name: Optional[str] = None
dependencies: list[type["Runner"]] = []
sub_tests: list[SubTest] = []
prevent_skip = False
def __init__(self, dotenv_path: Path, output_dir: Path, session_id: str):
self.dotenv_path = dotenv_path
self.config: dict[str, str] = dotenv_values(dotenv_path) # type: ignore
self.output_dir = output_dir
self.session_id = session_id
self.DIRS = DirManager(output_dir, session_id)
logger.info(f"creating instance of {self.__class__.__name__}")
assert self.test_dir_name
self.root_dir = Path(__file__).parent
def run_tests(self):
# check if required dependencies have passed
self._assert_dependencies_passed()
# run main setup if available
if isinstance(self.main_setup_name, str):
self._run_or_skip_test(
identifier_string=self.combine_names(self.name, self.main_setup_name),
test_path=self.root_dir / self.test_dir_name / self.main_setup_name,
)
# run main test if available
if isinstance(self.main_test_name, str):
self._run_or_skip_test(
identifier_string=self.combine_names(self.name, self.main_test_name),
test_path=self.root_dir / self.test_dir_name / self.main_test_name,
)
# run sub tests if conditions are met
for sub_test in self.sub_tests:
condition_function = sub_test["condition"]
sub_test_name = sub_test["test_file"]
identifier_string = self.combine_names(self.name, sub_test_name)
if condition_function(self.config):
test_path = self.root_dir / self.test_dir_name / sub_test_name
self._run_or_skip_test(identifier_string=identifier_string, test_path=test_path)
else:
self._create_result_file(result=-1, identifier_string=identifier_string)
def _run_or_skip_test(self, identifier_string: str, test_path: Path):
if not self.prevent_skip and self._is_test_passed(identifier_string, remove_existing=True):
logger.info(f"skipping {identifier_string}")
else:
logger.info(f"running {identifier_string}")
result = self._call_pytest(test_path)
self._create_result_file(result=result, identifier_string=identifier_string)
def _is_test_passed(self, identifier_string: str, remove_existing: bool = False) -> bool:
"""returns True if the selected test (matching test_name + sub_test_name) already passed
This is determined by the presence of a specific output file in the RESULTS folder that
matches identifier_string
remove_existing: If True, result files matching test_name + sub_test_name with a status
other than 'passed' will be deleted"""
already_passed = False
for result in self.DIRS.RESULTS.glob("*"):
if identifier_string in result.name:
# process any result file (passed / failed / skipped) if it exists
if "passed" in result.name:
already_passed = True
elif remove_existing:
result.unlink()
return already_passed
def _call_pytest(self, full_test_path: Path) -> int:
"""runs pytest programmatically on a specific file
all tests in the file [full_test_path] will be run along with command line arguments"""
command_arguments = []
# command_arguments.append("-v")
# command_arguments.append("-rx")
command_arguments.append(str(full_test_path))
command_arguments.append("--env_file")
command_arguments.append(str(self.dotenv_path))
command_arguments.append("--output_dir")
command_arguments.append(str(self.DIRS.OUTPUT))
command_arguments.append("--session_id")
command_arguments.append(self.session_id)
# artifacts dir
# warning: https://github.com/microsoft/playwright-pytest/issues/111
# --output only works with the given context and page fixture
# folder needs to be unique! traces will not appear, if every pytest run has same output dir
command_arguments.append("--output")
command_arguments.append(str(self.DIRS.RECORDS / "traces" / full_test_path.stem))
# tracing
command_arguments.append("--tracing")
command_arguments.append("retain-on-failure")
# command_arguments.append("on")
# Disable capturing. With -s set, prints will go to console as if pytest is not there.
# command_arguments.append("-s")
# headed
# command_arguments.append("--headed")
# html report. Will be combined into one file later.
command_arguments.append(f"--html={self.DIRS.RECORDS / 'html' / full_test_path.with_suffix('.html').name}")
return pytest.main(command_arguments)
def _create_result_file(
self,
result: int,
identifier_string: str,
):
"""create result file to indicated passed/failed or skipped test"""
full_name = self.combine_names(self.result_int_to_str(result), identifier_string)
file_path = self.DIRS.RESULTS / full_name
with open(file_path, "w") as _:
pass # create empty file
def _assert_dependencies_passed(self):
"""assert that all dependencie setups passed before"""
passed_tests = [r.name for r in self.DIRS.RESULTS.glob("*") if "passed" in r.name]
for dependencie in self.dependencies:
dependencie_identifier = self.combine_names(dependencie.name, dependencie.main_setup_name)
assert any(
dependencie_identifier in f for f in passed_tests
), f"could not run {self.name} because {dependencie} did not run before"
@staticmethod
def result_int_to_str(result_int: int) -> str:
match result_int:
case -1:
return "skipped"
case 0:
return "passed"
case _:
return "failed"
@staticmethod
def combine_names(*names: str) -> str:
return "-".join(names)