authentik setup and tracing (#2)
* authentik sessions created successfully during setup without breaking tracing * setup works on EN and DE localization by using regex patterns * automated tracing with pytest --trace option, manual hook no longer needed Reviewed-on: local-it-infrastructure/e2e_tests#2 Co-authored-by: Daniel <d.brummerloh@gmail.com> Co-committed-by: Daniel <d.brummerloh@gmail.com>
This commit is contained in:
parent
97ed87c79f
commit
d2cd6ba47f
22 changed files with 519 additions and 304 deletions
|
|
@ -2,8 +2,11 @@ from pathlib import Path
|
|||
from typing import Callable, Optional, TypedDict
|
||||
|
||||
import pytest
|
||||
from dotenv import dotenv_values
|
||||
from icecream import ic
|
||||
|
||||
from src.dirmanager import DirManager
|
||||
|
||||
|
||||
class SubTest(TypedDict):
|
||||
condition: Callable[[Path], bool]
|
||||
|
|
@ -11,23 +14,31 @@ class SubTest(TypedDict):
|
|||
|
||||
|
||||
class Runner:
|
||||
test_dir_name: Optional[Path] = None
|
||||
name: Optional[str] = None
|
||||
test_dir_name: Optional[str] = None
|
||||
main_setup_name: Optional[str] = None
|
||||
main_test_name: Optional[str] = None
|
||||
sub_tests: list[SubTest] = []
|
||||
dependencies: list[str] = []
|
||||
|
||||
def __init__(self, dotenv_path: Path, tests_dir: Path, session_id: str):
|
||||
def __init__(self, dotenv_path: Path, output_dir: Path, session_id: str):
|
||||
self.dotenv_path = dotenv_path
|
||||
self.tests_dir = tests_dir
|
||||
self.config: dict[str, str] = dotenv_values(dotenv_path) # type: ignore
|
||||
self.output_dir = output_dir
|
||||
self.session_id = session_id
|
||||
self.DIRS = DirManager(output_dir, session_id)
|
||||
|
||||
ic(f"creating instance of {self.__class__.__name__}")
|
||||
assert self.test_dir_name is not None
|
||||
self.root_dir = Path(__file__).parent
|
||||
|
||||
def _run_main_test(self):
|
||||
if isinstance(self.main_setup_name, str):
|
||||
full_path = self.root_dir / self.test_dir_name / self.main_setup_name
|
||||
self._run_pytest(full_path)
|
||||
if isinstance(self.main_test_name, str):
|
||||
full_test_path = self.root_dir / self.test_dir_name / self.main_test_name
|
||||
self._run_pytest(full_test_path)
|
||||
full_path = self.root_dir / self.test_dir_name / self.main_test_name
|
||||
self._run_pytest(full_path)
|
||||
|
||||
def _run_pytest(self, full_test_path: Path):
|
||||
"""runs pytest programmatically
|
||||
|
|
@ -35,24 +46,45 @@ class Runner:
|
|||
will run all tests in the file at full_test_path with some command line arguments"""
|
||||
|
||||
ic(f"running test: {full_test_path}")
|
||||
pytest.main(
|
||||
[
|
||||
"-v",
|
||||
"-rp",
|
||||
str(full_test_path),
|
||||
"--env_file",
|
||||
str(self.dotenv_path),
|
||||
"--tests_dir",
|
||||
str(self.tests_dir),
|
||||
"--session_id",
|
||||
self.session_id,
|
||||
]
|
||||
)
|
||||
|
||||
def show_files(self):
|
||||
ic(list(self.root_dir.glob("*")))
|
||||
command_arguments = []
|
||||
|
||||
# command_arguments.append("-v")
|
||||
# command_arguments.append("-rx")
|
||||
command_arguments.append(str(full_test_path))
|
||||
|
||||
command_arguments.append("--env_file")
|
||||
command_arguments.append(str(self.dotenv_path))
|
||||
|
||||
command_arguments.append("--output_dir")
|
||||
command_arguments.append(str(self.DIRS.OUTPUT))
|
||||
|
||||
command_arguments.append("--session_id")
|
||||
command_arguments.append(self.session_id)
|
||||
|
||||
# artifacts dir
|
||||
# warning: https://github.com/microsoft/playwright-pytest/issues/111
|
||||
# --output only works with the given context and page fixture
|
||||
# folder needs to be unique! traces will not appear, if every pytest run has same output dir
|
||||
output = self.DIRS.RESULTS / full_test_path.stem
|
||||
command_arguments.append("--output")
|
||||
command_arguments.append(str(output))
|
||||
|
||||
# tracing
|
||||
command_arguments.append("--tracing")
|
||||
command_arguments.append("retain-on-failure")
|
||||
# command_arguments.append("on")
|
||||
|
||||
# Disable capturing. With -s set, prints will go to console as if pytest is not there.
|
||||
# command_arguments.append("-s")
|
||||
|
||||
# headed
|
||||
# command_arguments.append("--headed")
|
||||
|
||||
pytest.main(command_arguments)
|
||||
|
||||
def run_tests(self):
|
||||
self._check_dependencies_finished()
|
||||
self._run_main_test()
|
||||
for sub_test in self.sub_tests:
|
||||
condition_function = sub_test["condition"]
|
||||
|
|
@ -60,3 +92,16 @@ class Runner:
|
|||
test_name = sub_test["test_file"]
|
||||
full_test_path = self.root_dir / self.test_dir_name / test_name
|
||||
self._run_pytest(full_test_path)
|
||||
self._create_progress_file()
|
||||
|
||||
def _create_progress_file(self):
|
||||
"""create progress file to indicated finished test"""
|
||||
file_path = self.DIRS.PROGRESS / self.name
|
||||
with open(file_path, "w") as _:
|
||||
pass # create empty file
|
||||
|
||||
def _check_dependencies_finished(self):
|
||||
"""look for progress file of dependencies to confirm they have ran"""
|
||||
finished_tests = [result.name for result in self.DIRS.PROGRESS.glob("*")]
|
||||
for dependencie in self.dependencies:
|
||||
assert dependencie in finished_tests
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue