refactor for independent test dirs #7

Merged
dan merged 49 commits from independent-test-dirs into dev 2023-12-05 21:41:46 +01:00
4 changed files with 35 additions and 21 deletions
Showing only changes of commit 52922a5be3 - Show all commits

12
main.py
View file

@ -8,6 +8,13 @@ from src.coordinator import Coordinator
from src.dir_manager import DirManager
from src.utils import get_session_id
# --------------------------- set all env variables -------------------------- #
# add abra-testing dir
os.environ["PYTEST_PLUGINS"] = "src.plugin-abra" # "src.plugin,src.other"
# ----------------------------- lookup env files ----------------------------- #
@ -31,6 +38,7 @@ ENV_FILES = [
OUTPUT_DIR = Path("./test-output").resolve()
RECIPES_DIR = Path("./src-tests").resolve()
# -------------------------- enable playwright debug ------------------------- #
@ -67,7 +75,9 @@ logger.add(log_file)
# ---------------------------- initialize and run ---------------------------- #
coordinator = Coordinator(ENV_FILES, output_dir=OUTPUT_DIR, session_id=session_id)
coordinator = Coordinator(
env_paths_list=ENV_FILES, output_dir=OUTPUT_DIR, session_id=session_id, recipes_dir=RECIPES_DIR
)
coordinator.setup_test()
coordinator.run_test()
coordinator.combine_html()

View file

@ -11,14 +11,14 @@ from src.utils import rmtree
class Coordinator:
def __init__(self, env_paths_list: list[Path], output_dir: Path, session_id: str) -> None:
def __init__(self, env_paths_list: list[Path], output_dir: Path, session_id: str, recipes_dir: Path) -> None:
# logging
out_string = "".join([e.name + "\n" for e in env_paths_list])
out_string += f"output_dir = {output_dir}\n"
out_string += f"session_id = {session_id}"
logger.info(f"initialize Coordinator instance with\nenv_paths_list =\n{out_string}")
self.DIR = DirManager(output_dir=output_dir, session_id=session_id)
self.DIR = DirManager(output_dir=output_dir, session_id=session_id, recipes_dir=recipes_dir)
self.ENV = EnvManager(env_paths_list)
def setup_test(self) -> None:
@ -45,9 +45,7 @@ class Coordinator:
dependency_classes: list[type[Runner]] = []
for dependency in RunnerClass.dependencies:
dependency_classes.append(RUNNER_DICT[dependency])
runner_instance = RunnerClass(
dotenv_path=env_file.env_path, output_dir=self.DIR.output_dir, session_id=self.DIR.session_id
)
runner_instance = RunnerClass(dotenv_path=env_file.env_path, DIR=self.DIR)
runner_instance._dependency_runners = dependency_classes
runners.append(runner_instance)
return runners

View file

@ -16,12 +16,14 @@ class DirManager:
...
"""
def __init__(self, output_dir: Path | str, session_id: str):
# root test dir
def __init__(self, output_dir: Path | str, session_id: str, recipes_dir: Path | str = ""):
if isinstance(output_dir, str):
output_dir = Path(output_dir)
self.output_dir = output_dir.resolve()
self.session_id = session_id
if isinstance(recipes_dir, str):
recipes_dir = Path(recipes_dir)
self.recipes_dir = recipes_dir
def create_all_dirs(self) -> None:
dirs: list[Path] = [
@ -63,3 +65,7 @@ class DirManager:
@property
def RESULTS(self):
return self.SESSION / "results"
@property
def RECIPES(self):
return self.recipes_dir

View file

@ -25,12 +25,10 @@ class Runner:
dependencies: list[str] = []
_dependency_runners: list[type["Runner"]] = []
def __init__(self, dotenv_path: Path, output_dir: Path, session_id: str):
def __init__(self, dotenv_path: Path, DIR: DirManager):
self.dotenv_path = dotenv_path
self.config: dict[str, str] = dotenv_values(dotenv_path) # type: ignore
self.output_dir = output_dir
self.session_id = session_id
self.DIRS = DirManager(output_dir, session_id)
self.DIR = DIR
logger.info(f"creating instance of {self.__class__.__name__}")
assert self.test_dir_name
@ -66,7 +64,7 @@ class Runner:
# condition_met: true / false
identifier_string = self.combine_names(self.name, test.test_file)
test_path = self.root_dir / self.test_dir_name / test.test_file
full_test_path = self.DIR.RECIPES / self.test_dir_name / test.test_file
# check if test aleady passed
if self._is_test_passed(identifier_string, remove_existing=True):
@ -83,7 +81,7 @@ class Runner:
# test condition is undefined or not met
logger.info(f"running {identifier_string}")
result = self._call_pytest(test_path)
result = self._call_pytest(full_test_path)
self._create_result_file(result=result, identifier_string=identifier_string)
def _is_test_passed(self, identifier_string: str, remove_existing: bool = False) -> bool:
@ -96,7 +94,7 @@ class Runner:
other than 'passed' will be deleted"""
already_passed = False
for result in self.DIRS.RESULTS.glob("*"):
for result in self.DIR.RESULTS.glob("*"):
if identifier_string in result.name:
# process any result file (passed / failed / skipped) if it exists
if "passed" in result.name:
@ -112,6 +110,8 @@ class Runner:
command_arguments = []
# command_arguments.append("--traceconfig")
command_arguments.append("-v")
# command_arguments.append("-rx")
command_arguments.append(str(full_test_path))
@ -121,17 +121,17 @@ class Runner:
# set root dir for tests output (used in DirManager). this is our custom argument
command_arguments.append("--output_dir")
command_arguments.append(str(self.DIRS.OUTPUT_DIR))
command_arguments.append(str(self.DIR.OUTPUT_DIR))
command_arguments.append("--session_id")
command_arguments.append(self.session_id)
command_arguments.append(self.DIR.session_id)
# artifacts dir from pytest
# warning: https://github.com/microsoft/playwright-pytest/issues/111
# --output only works with the given context and page fixture
# folder needs to be unique! traces will not appear, if every pytest run has same output dir
command_arguments.append("--output")
command_arguments.append(str(self.DIRS.RECORDS / "traces" / full_test_path.stem))
command_arguments.append(str(self.DIR.RECORDS / "traces" / full_test_path.stem))
# tracing
command_arguments.append("--tracing")
@ -145,7 +145,7 @@ class Runner:
# command_arguments.append("--headed")
# html report. Will be combined into one file later.
command_arguments.append(f"--html={self.DIRS.RECORDS / 'html' / full_test_path.with_suffix('.html').name}")
command_arguments.append(f"--html={self.DIR.RECORDS / 'html' / full_test_path.with_suffix('.html').name}")
return pytest.main(command_arguments)
@ -157,7 +157,7 @@ class Runner:
"""create result file to indicated passed/failed or skipped test"""
full_name = self.combine_names(self.result_int_to_str(result), identifier_string)
file_path = self.DIRS.RESULTS / full_name
file_path = self.DIR.RESULTS / full_name
with open(file_path, "w") as _:
pass # create empty file
@ -166,7 +166,7 @@ class Runner:
# todo: what about conditional setups?
passed_tests = [r.name for r in self.DIRS.RESULTS.glob("*") if "passed" in r.name]
passed_tests = [r.name for r in self.DIR.RESULTS.glob("*") if "passed" in r.name]
results = []
for dependency_runner in self._dependency_runners:
for setup_name in dependency_runner.setups: