turn Test object into dataclass object, make setups tests and cleanups a list

This commit is contained in:
Daniel 2023-12-04 11:25:41 +01:00
parent 92160e0021
commit 05be2c632d
2 changed files with 47 additions and 58 deletions

View file

@ -74,11 +74,11 @@ class Coordinator:
logger.info("calling run_test()") logger.info("calling run_test()")
self.runners: list[Runner] = self._load_runners(self.env_files) self.runners: list[Runner] = self._load_runners(self.env_files)
for runner in self.runners: for runner in self.runners:
runner.run_setup() runner.run_setups()
for runner in self.runners: for runner in self.runners:
runner.run_tests() runner.run_tests()
for runner in self.runners: for runner in self.runners:
runner.run_cleanup() runner.run_cleanups()
logger.info("run_test() finished") logger.info("run_test() finished")
def _load_runners(self, env_files: list[EnvFile]) -> list[Runner]: def _load_runners(self, env_files: list[EnvFile]) -> list[Runner]:

View file

@ -1,5 +1,6 @@
from dataclasses import dataclass
from pathlib import Path from pathlib import Path
from typing import Callable, Optional, TypedDict from typing import Callable
import pytest import pytest
from dotenv import dotenv_values from dotenv import dotenv_values
@ -8,19 +9,20 @@ from loguru import logger
from src.dirmanager import DirManager from src.dirmanager import DirManager
class ConditionalTest(TypedDict): @dataclass
condition: Callable[[dict[str, str]], bool] class Test:
test_file: str test_file: str
condition: Callable[[dict[str, str]], bool] | None = None
prevent_skip: bool = False
class Runner: class Runner:
name: str = "" name: str = ""
test_dir_name: str = "" test_dir_name: str = ""
main_setup_name: Optional[str] = None setups: list[Test] = []
main_test_name: Optional[str] = None tests: list[Test] = []
main_cleanup_name: Optional[str] = None cleanups: list[Test] = []
dependencies: list[type["Runner"]] = [] dependencies: list[type["Runner"]] = []
conditional_tests: list[ConditionalTest] = []
prevent_skip = False prevent_skip = False
def __init__(self, dotenv_path: Path, output_dir: Path, session_id: str): def __init__(self, dotenv_path: Path, output_dir: Path, session_id: str):
@ -35,69 +37,54 @@ class Runner:
self.root_dir = Path(__file__).parent self.root_dir = Path(__file__).parent
def run_setup(self): def run_setup(self):
"""runs the setup script if available""" """runs the setup scripts if available"""
if not self._dependencies_passed(): self._execute_test_list(self.setups)
logger.warning(f"skipping run_tests() of {self.name}, because some dependencies have not passed")
return
# run main setup if available
if isinstance(self.main_setup_name, str):
self._run_or_skip_test(
identifier_string=self.combine_names(self.name, self.main_setup_name),
test_path=self.root_dir / self.test_dir_name / self.main_setup_name,
)
def run_tests(self): def run_tests(self):
"""runs the test scripts if available"""
self._execute_test_list(self.tests)
def run_cleanup(self):
"""runs the cleanup scripts if available"""
self._execute_test_list(self.cleanups)
def _execute_test_list(self, test_list: list[Test]):
"""runs the main test script and if available and sub test scripts if their running condition is met""" """runs the main test script and if available and sub test scripts if their running condition is met"""
# check if required dependencies have passed # check if required dependencies have passed
if not self._dependencies_passed(): if not self._dependencies_passed():
logger.warning(f"skipping run_tests() of {self.name}, because some dependencies have not passed") logger.warning(f"skipping run_tests() of {self.name}, because some dependencies have not passed")
return return
# run main setup if available for test in test_list:
if isinstance(self.main_setup_name, str): self._run_test_with_checks(test)
self._run_or_skip_test(
identifier_string=self.combine_names(self.name, self.main_setup_name),
test_path=self.root_dir / self.test_dir_name / self.main_setup_name,
)
# run main test if available def _run_test_with_checks(self, test: Test):
if isinstance(self.main_test_name, str): # dependency passed: true / false
self._run_or_skip_test( # already_passed: true / false
identifier_string=self.combine_names(self.name, self.main_test_name), # prevent_skip: true / false
test_path=self.root_dir / self.test_dir_name / self.main_test_name, # condition_available: true / pass
) # condition_met: true / false
# run sub tests if conditions are met identifier_string = self.combine_names(self.name, test.test_file)
for cond_test in self.conditional_tests: test_path = self.root_dir / self.test_dir_name / test.test_file
condition_function = cond_test["condition"]
cond_test_name = cond_test["test_file"] # check if test aleady passed
identifier_string = self.combine_names(self.name, cond_test_name) if self._is_test_passed(identifier_string, remove_existing=True):
if condition_function(self.config): if test.prevent_skip:
test_path = self.root_dir / self.test_dir_name / cond_test_name logger.info(f"continuing , test {identifier_string} has passed but prevent_skip=True")
self._run_or_skip_test(identifier_string=identifier_string, test_path=test_path)
else: else:
self._create_result_file(result=-1, identifier_string=identifier_string) logger.info(f"skipping {identifier_string}, test has passed")
return
def run_cleanup(self): if test.condition and not test.condition(self.config):
"""runs the cleanup script if available""" # test condition is defined but not met
if not self._dependencies_passed(): logger.info(f"skipping {identifier_string}, test condition is not met")
logger.warning("skipping run_cleanup() of {self.name}, because some dependencies have not passed")
return return
if isinstance(self.main_cleanup_name, str):
identifier_string = self.combine_names(self.name, self.main_cleanup_name)
test_path = self.root_dir / self.test_dir_name / self.main_cleanup_name
logger.info(f"running {identifier_string}")
result = self._call_pytest(test_path)
self._create_result_file(result=result, identifier_string=identifier_string)
def _run_or_skip_test(self, identifier_string: str, test_path: Path): # test condition is undefined or not met
if not self.prevent_skip and self._is_test_passed(identifier_string, remove_existing=True): logger.info(f"running {identifier_string}")
logger.info(f"skipping {identifier_string}") result = self._call_pytest(test_path)
else: self._create_result_file(result=result, identifier_string=identifier_string)
logger.info(f"running {identifier_string}")
result = self._call_pytest(test_path)
self._create_result_file(result=result, identifier_string=identifier_string)
def _is_test_passed(self, identifier_string: str, remove_existing: bool = False) -> bool: def _is_test_passed(self, identifier_string: str, remove_existing: bool = False) -> bool:
"""returns True if the selected test matching identifier_string already passed """returns True if the selected test matching identifier_string already passed
@ -177,6 +164,8 @@ class Runner:
def _dependencies_passed(self): def _dependencies_passed(self):
"""returns true if the setup of each dependency has passed""" """returns true if the setup of each dependency has passed"""
# todo: check more than one setup
passed_tests = [r.name for r in self.DIRS.RESULTS.glob("*") if "passed" in r.name] passed_tests = [r.name for r in self.DIRS.RESULTS.glob("*") if "passed" in r.name]
results = [] results = []
for dependencie in self.dependencies: for dependencie in self.dependencies: