new-features (#5)
* refactoring and rework: runner now has setups / tests / cleanups as lists * add nextcloud runner * add email testing prototype with imap fixture * add dependency resolution (sort env files in input so that test order is correct) Reviewed-on: local-it-infrastructure/e2e_tests#5 Co-authored-by: Daniel <d.brummerloh@gmail.com> Co-committed-by: Daniel <d.brummerloh@gmail.com>
This commit is contained in:
parent
2e33f8f014
commit
d3dc0f942a
32 changed files with 573 additions and 247 deletions
115
src/runner.py
115
src/runner.py
|
|
@ -1,5 +1,6 @@
|
|||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Callable, Optional, TypedDict
|
||||
from typing import Callable
|
||||
|
||||
import pytest
|
||||
from dotenv import dotenv_values
|
||||
|
|
@ -8,18 +9,20 @@ from loguru import logger
|
|||
from src.dirmanager import DirManager
|
||||
|
||||
|
||||
class SubTest(TypedDict):
|
||||
condition: Callable[[dict[str, str]], bool]
|
||||
@dataclass
|
||||
class Test:
|
||||
test_file: str
|
||||
condition: Callable[[dict[str, str]], bool] | None = None
|
||||
prevent_skip: bool = False
|
||||
|
||||
|
||||
class Runner:
|
||||
name: str = ""
|
||||
test_dir_name: str = ""
|
||||
main_setup_name: Optional[str] = None
|
||||
main_test_name: Optional[str] = None
|
||||
setups: list[Test] = []
|
||||
tests: list[Test] = []
|
||||
cleanups: list[Test] = []
|
||||
dependencies: list[type["Runner"]] = []
|
||||
sub_tests: list[SubTest] = []
|
||||
prevent_skip = False
|
||||
|
||||
def __init__(self, dotenv_path: Path, output_dir: Path, session_id: str):
|
||||
|
|
@ -33,50 +36,63 @@ class Runner:
|
|||
assert self.test_dir_name
|
||||
self.root_dir = Path(__file__).parent
|
||||
|
||||
def run_setups(self):
|
||||
"""runs the setup scripts if available"""
|
||||
self._execute_test_list(self.setups)
|
||||
|
||||
def run_tests(self):
|
||||
"""runs the test scripts if available"""
|
||||
self._execute_test_list(self.tests)
|
||||
|
||||
def run_cleanups(self):
|
||||
"""runs the cleanup scripts if available"""
|
||||
self._execute_test_list(self.cleanups)
|
||||
|
||||
def _execute_test_list(self, test_list: list[Test]):
|
||||
"""runs the main test script and if available and sub test scripts if their running condition is met"""
|
||||
# check if required dependencies have passed
|
||||
self._assert_dependencies_passed()
|
||||
if not self._dependencies_passed():
|
||||
logger.warning(f"skipping run_tests() of {self.name}, because some dependencies have not passed")
|
||||
return
|
||||
|
||||
# run main setup if available
|
||||
if isinstance(self.main_setup_name, str):
|
||||
self._run_or_skip_test(
|
||||
identifier_string=self.combine_names(self.name, self.main_setup_name),
|
||||
test_path=self.root_dir / self.test_dir_name / self.main_setup_name,
|
||||
)
|
||||
for test in test_list:
|
||||
self._run_test_with_checks(test)
|
||||
|
||||
# run main test if available
|
||||
if isinstance(self.main_test_name, str):
|
||||
self._run_or_skip_test(
|
||||
identifier_string=self.combine_names(self.name, self.main_test_name),
|
||||
test_path=self.root_dir / self.test_dir_name / self.main_test_name,
|
||||
)
|
||||
def _run_test_with_checks(self, test: Test):
|
||||
# dependency passed: true / false
|
||||
# already_passed: true / false
|
||||
# prevent_skip: true / false
|
||||
# condition_available: true / pass
|
||||
# condition_met: true / false
|
||||
|
||||
# run sub tests if conditions are met
|
||||
for sub_test in self.sub_tests:
|
||||
condition_function = sub_test["condition"]
|
||||
sub_test_name = sub_test["test_file"]
|
||||
identifier_string = self.combine_names(self.name, sub_test_name)
|
||||
if condition_function(self.config):
|
||||
test_path = self.root_dir / self.test_dir_name / sub_test_name
|
||||
self._run_or_skip_test(identifier_string=identifier_string, test_path=test_path)
|
||||
identifier_string = self.combine_names(self.name, test.test_file)
|
||||
test_path = self.root_dir / self.test_dir_name / test.test_file
|
||||
|
||||
# check if test aleady passed
|
||||
if self._is_test_passed(identifier_string, remove_existing=True):
|
||||
if test.prevent_skip:
|
||||
logger.info(f"continuing , test {identifier_string} has passed but prevent_skip=True")
|
||||
else:
|
||||
self._create_result_file(result=-1, identifier_string=identifier_string)
|
||||
logger.info(f"skipping {identifier_string}, test has passed")
|
||||
return
|
||||
|
||||
def _run_or_skip_test(self, identifier_string: str, test_path: Path):
|
||||
if not self.prevent_skip and self._is_test_passed(identifier_string, remove_existing=True):
|
||||
logger.info(f"skipping {identifier_string}")
|
||||
else:
|
||||
logger.info(f"running {identifier_string}")
|
||||
result = self._call_pytest(test_path)
|
||||
self._create_result_file(result=result, identifier_string=identifier_string)
|
||||
if test.condition and not test.condition(self.config):
|
||||
# test condition is defined but not met
|
||||
logger.info(f"skipping {identifier_string}, test condition is not met")
|
||||
return
|
||||
|
||||
# test condition is undefined or not met
|
||||
logger.info(f"running {identifier_string}")
|
||||
result = self._call_pytest(test_path)
|
||||
self._create_result_file(result=result, identifier_string=identifier_string)
|
||||
|
||||
def _is_test_passed(self, identifier_string: str, remove_existing: bool = False) -> bool:
|
||||
"""returns True if the selected test (matching test_name + sub_test_name) already passed
|
||||
"""returns True if the selected test matching identifier_string already passed
|
||||
|
||||
This is determined by the presence of a specific output file in the RESULTS folder that
|
||||
matches identifier_string
|
||||
|
||||
remove_existing: If True, result files matching test_name + sub_test_name with a status
|
||||
remove_existing: If True, result files matching identifier_string with a status
|
||||
other than 'passed' will be deleted"""
|
||||
|
||||
already_passed = False
|
||||
|
|
@ -96,20 +112,21 @@ class Runner:
|
|||
|
||||
command_arguments = []
|
||||
|
||||
# command_arguments.append("-v")
|
||||
command_arguments.append("-v")
|
||||
# command_arguments.append("-rx")
|
||||
command_arguments.append(str(full_test_path))
|
||||
|
||||
command_arguments.append("--env_file")
|
||||
command_arguments.append(str(self.dotenv_path))
|
||||
|
||||
# set root dir for tests output (used in DirManager). this is our custom argument
|
||||
command_arguments.append("--output_dir")
|
||||
command_arguments.append(str(self.DIRS.OUTPUT))
|
||||
command_arguments.append(str(self.DIRS.OUTPUT_DIR))
|
||||
|
||||
command_arguments.append("--session_id")
|
||||
command_arguments.append(self.session_id)
|
||||
|
||||
# artifacts dir
|
||||
# artifacts dir from pytest
|
||||
# warning: https://github.com/microsoft/playwright-pytest/issues/111
|
||||
# --output only works with the given context and page fixture
|
||||
# folder needs to be unique! traces will not appear, if every pytest run has same output dir
|
||||
|
|
@ -144,18 +161,22 @@ class Runner:
|
|||
with open(file_path, "w") as _:
|
||||
pass # create empty file
|
||||
|
||||
def _assert_dependencies_passed(self):
|
||||
"""assert that all dependencie setups passed before"""
|
||||
def _dependencies_passed(self):
|
||||
"""returns true if all setups of each dependency have passed"""
|
||||
|
||||
# todo: what about conditional setups?
|
||||
|
||||
passed_tests = [r.name for r in self.DIRS.RESULTS.glob("*") if "passed" in r.name]
|
||||
for dependencie in self.dependencies:
|
||||
dependencie_identifier = self.combine_names(dependencie.name, dependencie.main_setup_name)
|
||||
assert any(
|
||||
dependencie_identifier in f for f in passed_tests
|
||||
), f"could not run {self.name} because {dependencie} did not run before"
|
||||
results = []
|
||||
for dependencie_runner in self.dependencies:
|
||||
for setup_name in dependencie_runner.setups:
|
||||
dependencie_identifier = self.combine_names(dependencie_runner.name, setup_name.test_file)
|
||||
results.append(any(dependencie_identifier in f for f in passed_tests))
|
||||
return all(results)
|
||||
|
||||
@staticmethod
|
||||
def result_int_to_str(result_int: int) -> str:
|
||||
"""converts the pytest exit code (int) into a meaningful string"""
|
||||
match result_int:
|
||||
case -1:
|
||||
return "skipped"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue