various (#16)
* add full integration test of cli / pytest_abra with all tests * save path of runner_*.py in runner subclass to improve test discovery -> allows for same test name in two different runners * reorganize output dir names * use URL fixture everywhere * rework coordinator interface * add --session_id to cli args * add log results table * plenty of refactoring * add assert messages * add plenty of tests * add /docs dir with plenty of documentation * fix authentik setup * add authentik cleanup, remove test user * add random test user credential generation and integrate into test routine. random creds are saved to STATES Reviewed-on: local-it-infrastructure/e2e_tests#16 Co-authored-by: Daniel <d.brummerloh@gmail.com> Co-committed-by: Daniel <d.brummerloh@gmail.com>
This commit is contained in:
parent
016b88a68d
commit
2dd765a974
36 changed files with 1145 additions and 432 deletions
|
|
@ -6,9 +6,10 @@ from typing import TYPE_CHECKING, Callable, NamedTuple
|
|||
import pytest
|
||||
from loguru import logger
|
||||
|
||||
from pytest_abra.shared_types import STATUS, TestResult
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pytest_abra.coordinator import Coordinator
|
||||
from pytest_abra.env_manager import EnvFile
|
||||
from pytest_abra import Coordinator, DirManager, EnvFile
|
||||
|
||||
|
||||
class ConditionArgs(NamedTuple):
|
||||
|
|
@ -30,6 +31,7 @@ class Runner:
|
|||
tests: list[Test] = []
|
||||
cleanups: list[Test] = []
|
||||
dependencies: list[str] = []
|
||||
_tests_path: Path = Path()
|
||||
|
||||
def __init__(self, coordinator: "Coordinator", runner_index: int):
|
||||
self.coordinator = coordinator
|
||||
|
|
@ -41,62 +43,58 @@ class Runner:
|
|||
|
||||
logger.info(f"creating instance of {self.__class__.__name__}")
|
||||
|
||||
def run_setups(self):
|
||||
def run_setups(self) -> list[TestResult]:
|
||||
"""runs the setup scripts if available"""
|
||||
self._execute_test_list(self.setups)
|
||||
return self._execute_tests_list(self.setups)
|
||||
|
||||
def run_tests(self):
|
||||
def run_tests(self) -> list[TestResult]:
|
||||
"""runs the test scripts if available"""
|
||||
self._execute_test_list(self.tests)
|
||||
return self._execute_tests_list(self.tests)
|
||||
|
||||
def run_cleanups(self):
|
||||
def run_cleanups(self) -> list[TestResult]:
|
||||
"""runs the cleanup scripts if available"""
|
||||
self._execute_test_list(self.cleanups)
|
||||
return self._execute_tests_list(self.cleanups)
|
||||
|
||||
def _execute_test_list(self, test_list: list[Test]):
|
||||
"""runs the main test script and if available and sub test scripts if their running condition is met"""
|
||||
def _execute_tests_list(self, test_list: list[Test]) -> list[TestResult]:
|
||||
"""Runs all tests given in the list. If condition is defined, it is also checked."""
|
||||
# check if required dependencies have passed
|
||||
if not self._dependencies_passed():
|
||||
logger.warning(f"skipping run_tests() of {self.env_type} (one or more dependencies have not passed)")
|
||||
return
|
||||
return [TestResult("skipped_dep", test.test_file) for test in test_list]
|
||||
|
||||
for test in test_list:
|
||||
self._run_test_with_checks(test)
|
||||
|
||||
def _run_test_with_checks(self, test: Test):
|
||||
# dependency passed: true / false
|
||||
# already_passed: true / false
|
||||
# prevent_skip: true / false
|
||||
# condition_available: true / pass
|
||||
# condition_met: true / false
|
||||
return [self._run_test_with_checks(test) for test in test_list]
|
||||
|
||||
def _run_test_with_checks(self, test: Test) -> TestResult:
|
||||
identifier_string = self.combine_names(self.env_type, test.test_file)
|
||||
|
||||
results = list(self.DIR.RECIPES.rglob(test.test_file))
|
||||
assert len(results) == 1, f"{test.test_file} should exist exactly 1 time, but found {len(results)} times"
|
||||
full_test_path = results[0]
|
||||
test_files = list(self._tests_path.rglob(test.test_file))
|
||||
assert len(test_files) == 1, f"{test.test_file} should exist exactly once, but found {len(test_files)} times"
|
||||
full_test_path = test_files[0]
|
||||
|
||||
# check if test aleady passed
|
||||
if self._is_test_passed(identifier_string, remove_existing=True):
|
||||
if self._is_test_passed(self.DIR, identifier_string):
|
||||
if test.prevent_skip:
|
||||
logger.info(f"continuing {identifier_string} (passed before but prevent_skip=True)")
|
||||
else:
|
||||
logger.info(f"skipping {identifier_string} (test has passed)")
|
||||
return
|
||||
return TestResult("skipped_pas", test.test_file)
|
||||
|
||||
if test.condition:
|
||||
condition_result = self._run_condition(test.condition)
|
||||
condition_result = self._call_condition_function(test.condition)
|
||||
if not condition_result:
|
||||
# test condition is defined but not met
|
||||
logger.info(f"skipping {identifier_string} (test condition is not met)")
|
||||
return
|
||||
self._create_status_file(self.DIR, status="skipped_con", identifier_string=identifier_string)
|
||||
return TestResult("skipped_con", test.test_file)
|
||||
|
||||
# test condition is undefined or not met
|
||||
logger.info(f"running {identifier_string}")
|
||||
result = self._call_pytest(full_test_path)
|
||||
self._create_result_file(result=result, identifier_string=identifier_string)
|
||||
exit_code = self._call_pytest(full_test_path)
|
||||
status = self.exit_code_to_str(exit_code)
|
||||
self._create_status_file(self.DIR, status=status, identifier_string=identifier_string)
|
||||
return TestResult(status, test.test_file)
|
||||
|
||||
def _run_condition(self, condition_function: Callable[[ConditionArgs], bool]):
|
||||
def _call_condition_function(self, condition_function: Callable[[ConditionArgs], bool]):
|
||||
"""run the test condition function with multiple arguments"""
|
||||
# more arguments can be added later without changing the function signature
|
||||
conditon_args = ConditionArgs(
|
||||
|
|
@ -106,24 +104,40 @@ class Runner:
|
|||
)
|
||||
return condition_function(conditon_args)
|
||||
|
||||
def _is_test_passed(self, identifier_string: str, remove_existing: bool = False) -> bool:
|
||||
"""returns True if the selected test matching identifier_string already passed
|
||||
@classmethod
|
||||
def _create_status_file(
|
||||
cls,
|
||||
DIR: "DirManager",
|
||||
status: STATUS,
|
||||
identifier_string: str,
|
||||
):
|
||||
"""create result file to indicated passed/failed/skipped test"""
|
||||
|
||||
This is determined by the presence of a specific output file in the RESULTS folder that
|
||||
matches identifier_string
|
||||
# remove matching files
|
||||
for status_file in cls._get_status_files(DIR, identifier_string):
|
||||
status_file.unlink()
|
||||
|
||||
remove_existing: If True, result files matching identifier_string with a status
|
||||
other than 'passed' will be deleted"""
|
||||
full_name = cls.combine_names(status, identifier_string)
|
||||
file_path = DIR.STATUS / full_name
|
||||
with open(file_path, "w") as _:
|
||||
pass # create empty file
|
||||
|
||||
already_passed = False
|
||||
for result in self.DIR.RESULTS.glob("*"):
|
||||
if identifier_string in result.name:
|
||||
# process any result file (passed / failed / skipped) if it exists
|
||||
if "passed" in result.name:
|
||||
already_passed = True
|
||||
elif remove_existing:
|
||||
result.unlink()
|
||||
return already_passed
|
||||
@staticmethod
|
||||
def _get_status_files(DIR: "DirManager", identifier_string: str) -> list[Path]:
|
||||
return [f for f in DIR.STATUS.glob("*") if identifier_string in f.name]
|
||||
|
||||
@classmethod
|
||||
def _is_test_passed(cls, DIR: "DirManager", identifier_string: str) -> bool:
|
||||
"""returns True if the selected test matching identifier_string already passed"""
|
||||
|
||||
matching_files = cls._get_status_files(DIR, identifier_string)
|
||||
if len(matching_files) == 1:
|
||||
status_file = matching_files[0]
|
||||
if "passed" in status_file.name:
|
||||
return True
|
||||
elif len(matching_files) > 1:
|
||||
logger.warning("more than one matching status file found")
|
||||
return False
|
||||
|
||||
def _call_pytest(self, full_test_path: Path) -> int:
|
||||
"""runs pytest programmatically with a specific file
|
||||
|
|
@ -155,7 +169,7 @@ class Runner:
|
|||
# --output only works with the given context and page fixture
|
||||
# folder needs to be unique! traces will not appear, if every pytest run has same output dir
|
||||
command_arguments.append("--output")
|
||||
command_arguments.append(str(self.DIR.RECORDS / "traces" / full_test_path.stem))
|
||||
command_arguments.append(str(self.DIR.RESULTS / "traces" / full_test_path.stem))
|
||||
|
||||
# tracing
|
||||
command_arguments.append("--tracing") # "on", "off", "retain-on-failure"
|
||||
|
|
@ -170,28 +184,16 @@ class Runner:
|
|||
# command_arguments.append("--headed")
|
||||
|
||||
# html report. Will be combined into one file later.
|
||||
command_arguments.append(f"--html={self.DIR.RECORDS / 'html' / full_test_path.with_suffix('.html').name}")
|
||||
command_arguments.append(f"--html={self.DIR.RESULTS / 'html' / full_test_path.with_suffix('.html').name}")
|
||||
|
||||
return pytest.main(command_arguments)
|
||||
|
||||
def _create_result_file(
|
||||
self,
|
||||
result: int,
|
||||
identifier_string: str,
|
||||
):
|
||||
"""create result file to indicated passed/failed or skipped test"""
|
||||
|
||||
full_name = self.combine_names(self.result_int_to_str(result), identifier_string)
|
||||
file_path = self.DIR.RESULTS / full_name
|
||||
with open(file_path, "w") as _:
|
||||
pass # create empty file
|
||||
|
||||
def _dependencies_passed(self):
|
||||
"""returns true if all setups of each dependency have passed"""
|
||||
|
||||
# todo: what about conditional setups?
|
||||
|
||||
passed_tests = [r.name for r in self.DIR.RESULTS.glob("*") if "passed" in r.name]
|
||||
passed_tests = [r.name for r in self.DIR.STATUS.glob("*") if "passed" in r.name]
|
||||
results = []
|
||||
for dependency in self.dependencies:
|
||||
dependency_runner = self.coordinator.RUNNER_DICT[dependency]
|
||||
|
|
@ -201,11 +203,9 @@ class Runner:
|
|||
return all(results)
|
||||
|
||||
@staticmethod
|
||||
def result_int_to_str(result_int: int) -> str:
|
||||
def exit_code_to_str(result_int: int) -> STATUS:
|
||||
"""converts the pytest exit code (int) into a meaningful string"""
|
||||
match result_int:
|
||||
case -1:
|
||||
return "skipped"
|
||||
case 0:
|
||||
return "passed"
|
||||
case _:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue