refactor for independent test dirs (#7)
* make it so that the actual tests can be moved anywhere, for example in abra recipe repos -> major refactoring with pytest test discovery magic * create RUNNER_DICT dynamically with importlib -> none of the tests are hardcoded, more tests can be added by placing a folder * autoload fixtures with pytest plugins * add URL fixture to navigate on web pages. Includes url parser based on python urllib to generate correct links * fix nextcloud setups and tests * add email groundwork with imbox Reviewed-on: local-it-infrastructure/e2e_tests#7 Co-authored-by: Daniel <d.brummerloh@gmail.com> Co-committed-by: Daniel <d.brummerloh@gmail.com>
This commit is contained in:
parent
3fa10aaa69
commit
f9c21c6e6b
45 changed files with 373 additions and 228 deletions
190
abratest/runner.py
Normal file
190
abratest/runner.py
Normal file
|
|
@ -0,0 +1,190 @@
|
|||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
|
||||
import pytest
|
||||
from dotenv import dotenv_values
|
||||
from loguru import logger
|
||||
|
||||
from abratest.dir_manager import DirManager
|
||||
|
||||
|
||||
@dataclass
|
||||
class Test:
|
||||
test_file: str
|
||||
condition: Callable[[dict[str, str]], bool] | None = None
|
||||
prevent_skip: bool = False
|
||||
|
||||
|
||||
class Runner:
|
||||
name: str = ""
|
||||
test_dir_name: str = ""
|
||||
setups: list[Test] = []
|
||||
tests: list[Test] = []
|
||||
cleanups: list[Test] = []
|
||||
dependencies: list[str] = []
|
||||
_dependency_runners: list[type["Runner"]] = []
|
||||
|
||||
def __init__(self, dotenv_path: Path, DIR: DirManager):
|
||||
self.dotenv_path = dotenv_path
|
||||
self.config: dict[str, str] = dotenv_values(dotenv_path) # type: ignore
|
||||
self.DIR = DIR
|
||||
|
||||
logger.info(f"creating instance of {self.__class__.__name__}")
|
||||
assert self.test_dir_name
|
||||
self.root_dir = Path(__file__).parent
|
||||
|
||||
def run_setups(self):
|
||||
"""runs the setup scripts if available"""
|
||||
self._execute_test_list(self.setups)
|
||||
|
||||
def run_tests(self):
|
||||
"""runs the test scripts if available"""
|
||||
self._execute_test_list(self.tests)
|
||||
|
||||
def run_cleanups(self):
|
||||
"""runs the cleanup scripts if available"""
|
||||
self._execute_test_list(self.cleanups)
|
||||
|
||||
def _execute_test_list(self, test_list: list[Test]):
|
||||
"""runs the main test script and if available and sub test scripts if their running condition is met"""
|
||||
# check if required dependencies have passed
|
||||
if not self._dependencies_passed():
|
||||
logger.warning(f"skipping run_tests() of {self.name}, because some dependencies have not passed")
|
||||
return
|
||||
|
||||
for test in test_list:
|
||||
self._run_test_with_checks(test)
|
||||
|
||||
def _run_test_with_checks(self, test: Test):
|
||||
# dependency passed: true / false
|
||||
# already_passed: true / false
|
||||
# prevent_skip: true / false
|
||||
# condition_available: true / pass
|
||||
# condition_met: true / false
|
||||
|
||||
identifier_string = self.combine_names(self.name, test.test_file)
|
||||
full_test_path = self.DIR.RECIPES / self.name / self.test_dir_name / test.test_file
|
||||
|
||||
# check if test aleady passed
|
||||
if self._is_test_passed(identifier_string, remove_existing=True):
|
||||
if test.prevent_skip:
|
||||
logger.info(f"continuing , test {identifier_string} has passed but prevent_skip=True")
|
||||
else:
|
||||
logger.info(f"skipping {identifier_string}, test has passed")
|
||||
return
|
||||
|
||||
if test.condition and not test.condition(self.config):
|
||||
# test condition is defined but not met
|
||||
logger.info(f"skipping {identifier_string}, test condition is not met")
|
||||
return
|
||||
|
||||
# test condition is undefined or not met
|
||||
logger.info(f"running {identifier_string}")
|
||||
result = self._call_pytest(full_test_path)
|
||||
self._create_result_file(result=result, identifier_string=identifier_string)
|
||||
|
||||
def _is_test_passed(self, identifier_string: str, remove_existing: bool = False) -> bool:
|
||||
"""returns True if the selected test matching identifier_string already passed
|
||||
|
||||
This is determined by the presence of a specific output file in the RESULTS folder that
|
||||
matches identifier_string
|
||||
|
||||
remove_existing: If True, result files matching identifier_string with a status
|
||||
other than 'passed' will be deleted"""
|
||||
|
||||
already_passed = False
|
||||
for result in self.DIR.RESULTS.glob("*"):
|
||||
if identifier_string in result.name:
|
||||
# process any result file (passed / failed / skipped) if it exists
|
||||
if "passed" in result.name:
|
||||
already_passed = True
|
||||
elif remove_existing:
|
||||
result.unlink()
|
||||
return already_passed
|
||||
|
||||
def _call_pytest(self, full_test_path: Path) -> int:
|
||||
"""runs pytest programmatically on a specific file
|
||||
|
||||
all tests in the file [full_test_path] will be run along with command line arguments"""
|
||||
|
||||
command_arguments = []
|
||||
|
||||
# command_arguments.append("--traceconfig")
|
||||
|
||||
command_arguments.append("-v")
|
||||
# command_arguments.append("-rx")
|
||||
command_arguments.append(str(full_test_path))
|
||||
|
||||
command_arguments.append("--env_file")
|
||||
command_arguments.append(str(self.dotenv_path))
|
||||
|
||||
# set root dir for tests output (used in DirManager). this is our custom argument
|
||||
command_arguments.append("--output_dir")
|
||||
command_arguments.append(str(self.DIR.OUTPUT_DIR))
|
||||
|
||||
command_arguments.append("--session_id")
|
||||
command_arguments.append(self.DIR.session_id)
|
||||
|
||||
# artifacts dir from pytest
|
||||
# warning: https://github.com/microsoft/playwright-pytest/issues/111
|
||||
# --output only works with the given context and page fixture
|
||||
# folder needs to be unique! traces will not appear, if every pytest run has same output dir
|
||||
command_arguments.append("--output")
|
||||
command_arguments.append(str(self.DIR.RECORDS / "traces" / full_test_path.stem))
|
||||
|
||||
# tracing
|
||||
command_arguments.append("--tracing")
|
||||
command_arguments.append("retain-on-failure")
|
||||
# command_arguments.append("on")
|
||||
|
||||
# Disable capturing. With -s set, prints will go to console as if pytest is not there.
|
||||
# command_arguments.append("-s")
|
||||
|
||||
# headed
|
||||
# command_arguments.append("--headed")
|
||||
|
||||
# html report. Will be combined into one file later.
|
||||
command_arguments.append(f"--html={self.DIR.RECORDS / 'html' / full_test_path.with_suffix('.html').name}")
|
||||
|
||||
return pytest.main(command_arguments)
|
||||
|
||||
def _create_result_file(
|
||||
self,
|
||||
result: int,
|
||||
identifier_string: str,
|
||||
):
|
||||
"""create result file to indicated passed/failed or skipped test"""
|
||||
|
||||
full_name = self.combine_names(self.result_int_to_str(result), identifier_string)
|
||||
file_path = self.DIR.RESULTS / full_name
|
||||
with open(file_path, "w") as _:
|
||||
pass # create empty file
|
||||
|
||||
def _dependencies_passed(self):
|
||||
"""returns true if all setups of each dependency have passed"""
|
||||
|
||||
# todo: what about conditional setups?
|
||||
|
||||
passed_tests = [r.name for r in self.DIR.RESULTS.glob("*") if "passed" in r.name]
|
||||
results = []
|
||||
for dependency_runner in self._dependency_runners:
|
||||
for setup_name in dependency_runner.setups:
|
||||
dependencie_identifier = self.combine_names(dependency_runner.name, setup_name.test_file)
|
||||
results.append(any(dependencie_identifier in f for f in passed_tests))
|
||||
return all(results)
|
||||
|
||||
@staticmethod
|
||||
def result_int_to_str(result_int: int) -> str:
|
||||
"""converts the pytest exit code (int) into a meaningful string"""
|
||||
match result_int:
|
||||
case -1:
|
||||
return "skipped"
|
||||
case 0:
|
||||
return "passed"
|
||||
case _:
|
||||
return "failed"
|
||||
|
||||
@staticmethod
|
||||
def combine_names(*names: str) -> str:
|
||||
return "-".join(names)
|
||||
Loading…
Add table
Add a link
Reference in a new issue