e2e_tests/src/runner.py
2023-12-04 15:31:22 +01:00

201 lines
7.4 KiB
Python

from dataclasses import dataclass
from pathlib import Path
from typing import Callable
import pytest
from dotenv import dotenv_values
from loguru import logger
from src.dir_manager import DirManager
from src.tests_authentik.runner_authentik import RunnerAuthentik
from src.tests_nextcloud.runner_nextcloud import RunnerNextcloud
from src.tests_wordpress.runner_wordpress import RunnerWordpress
# Register all runners here. Each .env file with TYPE=authentik will be run with RunnerAuthentik
RUNNER_DICT: dict[str, type["Runner"]] = {
"authentik": RunnerAuthentik,
"wordpress": RunnerWordpress,
"nextcloud": RunnerNextcloud,
}
@dataclass
class Test:
test_file: str
condition: Callable[[dict[str, str]], bool] | None = None
prevent_skip: bool = False
class Runner:
name: str = ""
test_dir_name: str = ""
setups: list[Test] = []
tests: list[Test] = []
cleanups: list[Test] = []
dependencies: list[type["Runner"]] = []
prevent_skip = False
def __init__(self, dotenv_path: Path, output_dir: Path, session_id: str):
self.dotenv_path = dotenv_path
self.config: dict[str, str] = dotenv_values(dotenv_path) # type: ignore
self.output_dir = output_dir
self.session_id = session_id
self.DIRS = DirManager(output_dir, session_id)
logger.info(f"creating instance of {self.__class__.__name__}")
assert self.test_dir_name
self.root_dir = Path(__file__).parent
def run_setups(self):
"""runs the setup scripts if available"""
self._execute_test_list(self.setups)
def run_tests(self):
"""runs the test scripts if available"""
self._execute_test_list(self.tests)
def run_cleanups(self):
"""runs the cleanup scripts if available"""
self._execute_test_list(self.cleanups)
def _execute_test_list(self, test_list: list[Test]):
"""runs the main test script and if available and sub test scripts if their running condition is met"""
# check if required dependencies have passed
if not self._dependencies_passed():
logger.warning(f"skipping run_tests() of {self.name}, because some dependencies have not passed")
return
for test in test_list:
self._run_test_with_checks(test)
def _run_test_with_checks(self, test: Test):
# dependency passed: true / false
# already_passed: true / false
# prevent_skip: true / false
# condition_available: true / pass
# condition_met: true / false
identifier_string = self.combine_names(self.name, test.test_file)
test_path = self.root_dir / self.test_dir_name / test.test_file
# check if test aleady passed
if self._is_test_passed(identifier_string, remove_existing=True):
if test.prevent_skip:
logger.info(f"continuing , test {identifier_string} has passed but prevent_skip=True")
else:
logger.info(f"skipping {identifier_string}, test has passed")
return
if test.condition and not test.condition(self.config):
# test condition is defined but not met
logger.info(f"skipping {identifier_string}, test condition is not met")
return
# test condition is undefined or not met
logger.info(f"running {identifier_string}")
result = self._call_pytest(test_path)
self._create_result_file(result=result, identifier_string=identifier_string)
def _is_test_passed(self, identifier_string: str, remove_existing: bool = False) -> bool:
"""returns True if the selected test matching identifier_string already passed
This is determined by the presence of a specific output file in the RESULTS folder that
matches identifier_string
remove_existing: If True, result files matching identifier_string with a status
other than 'passed' will be deleted"""
already_passed = False
for result in self.DIRS.RESULTS.glob("*"):
if identifier_string in result.name:
# process any result file (passed / failed / skipped) if it exists
if "passed" in result.name:
already_passed = True
elif remove_existing:
result.unlink()
return already_passed
def _call_pytest(self, full_test_path: Path) -> int:
"""runs pytest programmatically on a specific file
all tests in the file [full_test_path] will be run along with command line arguments"""
command_arguments = []
command_arguments.append("-v")
# command_arguments.append("-rx")
command_arguments.append(str(full_test_path))
command_arguments.append("--env_file")
command_arguments.append(str(self.dotenv_path))
# set root dir for tests output (used in DirManager). this is our custom argument
command_arguments.append("--output_dir")
command_arguments.append(str(self.DIRS.OUTPUT_DIR))
command_arguments.append("--session_id")
command_arguments.append(self.session_id)
# artifacts dir from pytest
# warning: https://github.com/microsoft/playwright-pytest/issues/111
# --output only works with the given context and page fixture
# folder needs to be unique! traces will not appear, if every pytest run has same output dir
command_arguments.append("--output")
command_arguments.append(str(self.DIRS.RECORDS / "traces" / full_test_path.stem))
# tracing
command_arguments.append("--tracing")
command_arguments.append("retain-on-failure")
# command_arguments.append("on")
# Disable capturing. With -s set, prints will go to console as if pytest is not there.
# command_arguments.append("-s")
# headed
# command_arguments.append("--headed")
# html report. Will be combined into one file later.
command_arguments.append(f"--html={self.DIRS.RECORDS / 'html' / full_test_path.with_suffix('.html').name}")
return pytest.main(command_arguments)
def _create_result_file(
self,
result: int,
identifier_string: str,
):
"""create result file to indicated passed/failed or skipped test"""
full_name = self.combine_names(self.result_int_to_str(result), identifier_string)
file_path = self.DIRS.RESULTS / full_name
with open(file_path, "w") as _:
pass # create empty file
def _dependencies_passed(self):
"""returns true if all setups of each dependency have passed"""
# todo: what about conditional setups?
passed_tests = [r.name for r in self.DIRS.RESULTS.glob("*") if "passed" in r.name]
results = []
for dependencie_runner in self.dependencies:
for setup_name in dependencie_runner.setups:
dependencie_identifier = self.combine_names(dependencie_runner.name, setup_name.test_file)
results.append(any(dependencie_identifier in f for f in passed_tests))
return all(results)
@staticmethod
def result_int_to_str(result_int: int) -> str:
"""converts the pytest exit code (int) into a meaningful string"""
match result_int:
case -1:
return "skipped"
case 0:
return "passed"
case _:
return "failed"
@staticmethod
def combine_names(*names: str) -> str:
return "-".join(names)