- remove demo runner - improve docs - rename all tests to test_* (previously, also setup_* and cleanup_* existed) to improve stability as it is not guaranteed that pytest.ini is loaded. - improve logging formatting - improve full integration test Reviewed-on: local-it-infrastructure/e2e_tests#18 Co-authored-by: Daniel <d.brummerloh@gmail.com> Co-committed-by: Daniel <d.brummerloh@gmail.com>
217 lines
8.3 KiB
Python
217 lines
8.3 KiB
Python
import os
|
|
from dataclasses import dataclass
|
|
from pathlib import Path
|
|
from typing import TYPE_CHECKING, Callable, NamedTuple
|
|
|
|
import pytest
|
|
from loguru import logger
|
|
|
|
from pytest_abra.shared_types import STATUS, TestResult
|
|
|
|
if TYPE_CHECKING:
|
|
from pytest_abra import Coordinator, DirManager, EnvFile
|
|
|
|
|
|
class ConditionArgs(NamedTuple):
|
|
env_config: dict[str, str]
|
|
runner_index: int
|
|
env_files: list["EnvFile"]
|
|
|
|
|
|
@dataclass
|
|
class Test:
|
|
test_file: str
|
|
condition: Callable[[ConditionArgs], bool] | None = None
|
|
prevent_skip: bool = False
|
|
|
|
|
|
class Runner:
|
|
env_type: str = ""
|
|
setups: list[Test] = []
|
|
tests: list[Test] = []
|
|
cleanups: list[Test] = []
|
|
dependencies: list[str] = []
|
|
_tests_path: Path = Path()
|
|
|
|
def __init__(self, coordinator: "Coordinator", runner_index: int):
|
|
self.coordinator = coordinator
|
|
self.runner_index = runner_index
|
|
|
|
self.DIR = coordinator.DIR
|
|
self.ENV = coordinator.ENV
|
|
self.RUNNER_DICT = coordinator.RUNNER_DICT
|
|
|
|
logger.info(f"creating instance of {self.__class__.__name__}")
|
|
|
|
def run_setups(self) -> list[TestResult]:
|
|
"""runs the setup scripts if available"""
|
|
return self._execute_tests_list(self.setups)
|
|
|
|
def run_tests(self) -> list[TestResult]:
|
|
"""runs the test scripts if available"""
|
|
return self._execute_tests_list(self.tests)
|
|
|
|
def run_cleanups(self) -> list[TestResult]:
|
|
"""runs the cleanup scripts if available"""
|
|
return self._execute_tests_list(self.cleanups)
|
|
|
|
def _execute_tests_list(self, test_list: list[Test]) -> list[TestResult]:
|
|
"""Runs all tests given in the list. If condition is defined, it is also checked."""
|
|
# check if required dependencies have passed
|
|
if not self._dependencies_passed():
|
|
logger.warning(f"skipping run_tests() of {self.env_type} (one or more dependencies have not passed)")
|
|
return [TestResult("skipped_dep", test.test_file) for test in test_list]
|
|
|
|
return [self._run_test_with_checks(test) for test in test_list]
|
|
|
|
def _run_test_with_checks(self, test: Test) -> TestResult:
|
|
identifier_string = self.combine_names(self.env_type, test.test_file)
|
|
|
|
test_files = list(self._tests_path.rglob(test.test_file))
|
|
assert len(test_files) == 1, f"{test.test_file} should exist exactly once, but found {len(test_files)} times"
|
|
full_test_path = test_files[0]
|
|
|
|
# check if test aleady passed
|
|
if self._is_test_passed(self.DIR, identifier_string):
|
|
if test.prevent_skip:
|
|
logger.info(f"continuing {identifier_string} (passed before but prevent_skip=True)")
|
|
else:
|
|
logger.info(f"skipping {identifier_string} (test has passed)")
|
|
return TestResult("skipped_pas", test.test_file)
|
|
|
|
if test.condition:
|
|
condition_result = self._call_condition_function(test.condition)
|
|
if not condition_result:
|
|
# test condition is defined but not met
|
|
logger.info(f"skipping {identifier_string} (test condition is not met)")
|
|
self._create_status_file(self.DIR, status="skipped_con", identifier_string=identifier_string)
|
|
return TestResult("skipped_con", test.test_file)
|
|
|
|
# test condition is undefined or not met
|
|
logger.info(f"running {identifier_string}")
|
|
exit_code = self._call_pytest(full_test_path)
|
|
status = self.exit_code_to_str(exit_code)
|
|
self._create_status_file(self.DIR, status=status, identifier_string=identifier_string)
|
|
return TestResult(status, test.test_file)
|
|
|
|
def _call_condition_function(self, condition_function: Callable[[ConditionArgs], bool]):
|
|
"""run the test condition function with multiple arguments"""
|
|
# more arguments can be added later without changing the function signature
|
|
conditon_args = ConditionArgs(
|
|
env_files=self.ENV.env_files,
|
|
runner_index=self.runner_index,
|
|
env_config=self.ENV.env_files[self.runner_index].env_config,
|
|
)
|
|
return condition_function(conditon_args)
|
|
|
|
@classmethod
|
|
def _create_status_file(
|
|
cls,
|
|
DIR: "DirManager",
|
|
status: STATUS,
|
|
identifier_string: str,
|
|
):
|
|
"""create result file to indicated passed/failed/skipped test"""
|
|
|
|
# remove matching files
|
|
for status_file in cls._get_status_files(DIR, identifier_string):
|
|
status_file.unlink()
|
|
|
|
full_name = cls.combine_names(status, identifier_string)
|
|
file_path = DIR.STATUS / full_name
|
|
with open(file_path, "w") as _:
|
|
pass # create empty file
|
|
|
|
@staticmethod
|
|
def _get_status_files(DIR: "DirManager", identifier_string: str) -> list[Path]:
|
|
return [f for f in DIR.STATUS.glob("*") if identifier_string in f.name]
|
|
|
|
@classmethod
|
|
def _is_test_passed(cls, DIR: "DirManager", identifier_string: str) -> bool:
|
|
"""returns True if the selected test matching identifier_string already passed"""
|
|
|
|
matching_files = cls._get_status_files(DIR, identifier_string)
|
|
if len(matching_files) == 1:
|
|
status_file = matching_files[0]
|
|
if "passed" in status_file.name:
|
|
return True
|
|
elif len(matching_files) > 1:
|
|
logger.warning("more than one matching status file found")
|
|
return False
|
|
|
|
def _call_pytest(self, full_test_path: Path) -> int:
|
|
"""runs pytest programmatically with a specific file
|
|
|
|
all tests in the file [full_test_path] will be run along with command line arguments"""
|
|
|
|
command_arguments = []
|
|
|
|
# command_arguments.append("--traceconfig")
|
|
|
|
command_arguments.append("-v")
|
|
|
|
command_arguments.append(str(full_test_path))
|
|
|
|
command_arguments.append("--runner_index")
|
|
command_arguments.append(str(self.runner_index))
|
|
|
|
# set root dir for tests output (used in DirManager). this is our custom argument
|
|
command_arguments.append("--output_dir")
|
|
command_arguments.append(str(self.DIR.OUTPUT_DIR))
|
|
|
|
command_arguments.append("--session_id")
|
|
command_arguments.append(self.DIR.session_id)
|
|
|
|
command_arguments.append("--timeout")
|
|
command_arguments.append(str(self.coordinator.TIMEOUT))
|
|
|
|
# artifacts dir from pytest
|
|
# warning: https://github.com/microsoft/playwright-pytest/issues/111
|
|
# --output only works with the given context and page fixture
|
|
# folder needs to be unique! traces will not appear, if every pytest run has same output dir
|
|
command_arguments.append("--output")
|
|
command_arguments.append(str(self.DIR.RESULTS / "traces" / full_test_path.stem))
|
|
|
|
# tracing
|
|
command_arguments.append("--tracing") # "on", "off", "retain-on-failure"
|
|
command_arguments.append("retain-on-failure")
|
|
|
|
# Disable capturing. With -s set, prints will go to console as if pytest is not there.
|
|
if os.environ.get("PWDEBUG") == "1":
|
|
command_arguments.append("-s")
|
|
command_arguments.append("-s")
|
|
|
|
# headed
|
|
# command_arguments.append("--headed")
|
|
|
|
# html report. Will be combined into one file later.
|
|
command_arguments.append(f"--html={self.DIR.RESULTS / 'html' / full_test_path.with_suffix('.html').name}")
|
|
|
|
return pytest.main(command_arguments)
|
|
|
|
def _dependencies_passed(self):
|
|
"""returns true if all setups of each dependency have passed"""
|
|
|
|
# todo: what about conditional setups?
|
|
|
|
passed_tests = [r.name for r in self.DIR.STATUS.glob("*") if "passed" in r.name]
|
|
results = []
|
|
for dependency in self.dependencies:
|
|
dependency_runner = self.coordinator.RUNNER_DICT[dependency]
|
|
for setup_name in dependency_runner.setups:
|
|
dependencie_identifier = self.combine_names(dependency_runner.env_type, setup_name.test_file)
|
|
results.append(any(dependencie_identifier in f for f in passed_tests))
|
|
return all(results)
|
|
|
|
@staticmethod
|
|
def exit_code_to_str(result_int: int) -> STATUS:
|
|
"""converts the pytest exit code (int) into a meaningful string"""
|
|
match result_int:
|
|
case 0:
|
|
return "passed"
|
|
case _:
|
|
return "failed"
|
|
|
|
@staticmethod
|
|
def combine_names(*names: str) -> str:
|
|
return "-".join(names)
|