new-features #5
2 changed files with 47 additions and 58 deletions
|
|
@ -74,11 +74,11 @@ class Coordinator:
|
|||
logger.info("calling run_test()")
|
||||
self.runners: list[Runner] = self._load_runners(self.env_files)
|
||||
for runner in self.runners:
|
||||
runner.run_setup()
|
||||
runner.run_setups()
|
||||
for runner in self.runners:
|
||||
runner.run_tests()
|
||||
for runner in self.runners:
|
||||
runner.run_cleanup()
|
||||
runner.run_cleanups()
|
||||
logger.info("run_test() finished")
|
||||
|
||||
def _load_runners(self, env_files: list[EnvFile]) -> list[Runner]:
|
||||
|
|
|
|||
101
src/runner.py
101
src/runner.py
|
|
@ -1,5 +1,6 @@
|
|||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Callable, Optional, TypedDict
|
||||
from typing import Callable
|
||||
|
||||
import pytest
|
||||
from dotenv import dotenv_values
|
||||
|
|
@ -8,19 +9,20 @@ from loguru import logger
|
|||
from src.dirmanager import DirManager
|
||||
|
||||
|
||||
class ConditionalTest(TypedDict):
|
||||
condition: Callable[[dict[str, str]], bool]
|
||||
@dataclass
|
||||
class Test:
|
||||
test_file: str
|
||||
condition: Callable[[dict[str, str]], bool] | None = None
|
||||
prevent_skip: bool = False
|
||||
|
||||
|
||||
class Runner:
|
||||
name: str = ""
|
||||
test_dir_name: str = ""
|
||||
main_setup_name: Optional[str] = None
|
||||
main_test_name: Optional[str] = None
|
||||
main_cleanup_name: Optional[str] = None
|
||||
setups: list[Test] = []
|
||||
tests: list[Test] = []
|
||||
cleanups: list[Test] = []
|
||||
dependencies: list[type["Runner"]] = []
|
||||
conditional_tests: list[ConditionalTest] = []
|
||||
prevent_skip = False
|
||||
|
||||
def __init__(self, dotenv_path: Path, output_dir: Path, session_id: str):
|
||||
|
|
@ -35,69 +37,54 @@ class Runner:
|
|||
self.root_dir = Path(__file__).parent
|
||||
|
||||
def run_setup(self):
|
||||
"""runs the setup script if available"""
|
||||
if not self._dependencies_passed():
|
||||
logger.warning(f"skipping run_tests() of {self.name}, because some dependencies have not passed")
|
||||
return
|
||||
|
||||
# run main setup if available
|
||||
if isinstance(self.main_setup_name, str):
|
||||
self._run_or_skip_test(
|
||||
identifier_string=self.combine_names(self.name, self.main_setup_name),
|
||||
test_path=self.root_dir / self.test_dir_name / self.main_setup_name,
|
||||
)
|
||||
"""runs the setup scripts if available"""
|
||||
self._execute_test_list(self.setups)
|
||||
|
||||
def run_tests(self):
|
||||
"""runs the test scripts if available"""
|
||||
self._execute_test_list(self.tests)
|
||||
|
||||
def run_cleanup(self):
|
||||
"""runs the cleanup scripts if available"""
|
||||
self._execute_test_list(self.cleanups)
|
||||
|
||||
def _execute_test_list(self, test_list: list[Test]):
|
||||
"""runs the main test script and if available and sub test scripts if their running condition is met"""
|
||||
# check if required dependencies have passed
|
||||
if not self._dependencies_passed():
|
||||
logger.warning(f"skipping run_tests() of {self.name}, because some dependencies have not passed")
|
||||
return
|
||||
|
||||
# run main setup if available
|
||||
if isinstance(self.main_setup_name, str):
|
||||
self._run_or_skip_test(
|
||||
identifier_string=self.combine_names(self.name, self.main_setup_name),
|
||||
test_path=self.root_dir / self.test_dir_name / self.main_setup_name,
|
||||
)
|
||||
for test in test_list:
|
||||
self._run_test_with_checks(test)
|
||||
|
||||
# run main test if available
|
||||
if isinstance(self.main_test_name, str):
|
||||
self._run_or_skip_test(
|
||||
identifier_string=self.combine_names(self.name, self.main_test_name),
|
||||
test_path=self.root_dir / self.test_dir_name / self.main_test_name,
|
||||
)
|
||||
def _run_test_with_checks(self, test: Test):
|
||||
# dependency passed: true / false
|
||||
# already_passed: true / false
|
||||
# prevent_skip: true / false
|
||||
# condition_available: true / pass
|
||||
# condition_met: true / false
|
||||
|
||||
# run sub tests if conditions are met
|
||||
for cond_test in self.conditional_tests:
|
||||
condition_function = cond_test["condition"]
|
||||
cond_test_name = cond_test["test_file"]
|
||||
identifier_string = self.combine_names(self.name, cond_test_name)
|
||||
if condition_function(self.config):
|
||||
test_path = self.root_dir / self.test_dir_name / cond_test_name
|
||||
self._run_or_skip_test(identifier_string=identifier_string, test_path=test_path)
|
||||
identifier_string = self.combine_names(self.name, test.test_file)
|
||||
test_path = self.root_dir / self.test_dir_name / test.test_file
|
||||
|
||||
# check if test aleady passed
|
||||
if self._is_test_passed(identifier_string, remove_existing=True):
|
||||
if test.prevent_skip:
|
||||
logger.info(f"continuing , test {identifier_string} has passed but prevent_skip=True")
|
||||
else:
|
||||
self._create_result_file(result=-1, identifier_string=identifier_string)
|
||||
logger.info(f"skipping {identifier_string}, test has passed")
|
||||
return
|
||||
|
||||
def run_cleanup(self):
|
||||
"""runs the cleanup script if available"""
|
||||
if not self._dependencies_passed():
|
||||
logger.warning("skipping run_cleanup() of {self.name}, because some dependencies have not passed")
|
||||
if test.condition and not test.condition(self.config):
|
||||
# test condition is defined but not met
|
||||
logger.info(f"skipping {identifier_string}, test condition is not met")
|
||||
return
|
||||
if isinstance(self.main_cleanup_name, str):
|
||||
identifier_string = self.combine_names(self.name, self.main_cleanup_name)
|
||||
test_path = self.root_dir / self.test_dir_name / self.main_cleanup_name
|
||||
logger.info(f"running {identifier_string}")
|
||||
result = self._call_pytest(test_path)
|
||||
self._create_result_file(result=result, identifier_string=identifier_string)
|
||||
|
||||
def _run_or_skip_test(self, identifier_string: str, test_path: Path):
|
||||
if not self.prevent_skip and self._is_test_passed(identifier_string, remove_existing=True):
|
||||
logger.info(f"skipping {identifier_string}")
|
||||
else:
|
||||
logger.info(f"running {identifier_string}")
|
||||
result = self._call_pytest(test_path)
|
||||
self._create_result_file(result=result, identifier_string=identifier_string)
|
||||
# test condition is undefined or not met
|
||||
logger.info(f"running {identifier_string}")
|
||||
result = self._call_pytest(test_path)
|
||||
self._create_result_file(result=result, identifier_string=identifier_string)
|
||||
|
||||
def _is_test_passed(self, identifier_string: str, remove_existing: bool = False) -> bool:
|
||||
"""returns True if the selected test matching identifier_string already passed
|
||||
|
|
@ -177,6 +164,8 @@ class Runner:
|
|||
def _dependencies_passed(self):
|
||||
"""returns true if the setup of each dependency has passed"""
|
||||
|
||||
# todo: check more than one setup
|
||||
|
||||
passed_tests = [r.name for r in self.DIRS.RESULTS.glob("*") if "passed" in r.name]
|
||||
results = []
|
||||
for dependencie in self.dependencies:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue