huge refactor of runner functions
This commit is contained in:
parent
dd5fe859e8
commit
0c8999b070
1 changed files with 39 additions and 47 deletions
|
|
@ -64,20 +64,14 @@ class Runner:
|
||||||
self._run_test_with_checks(test)
|
self._run_test_with_checks(test)
|
||||||
|
|
||||||
def _run_test_with_checks(self, test: Test):
|
def _run_test_with_checks(self, test: Test):
|
||||||
# dependency passed: true / false
|
|
||||||
# already_passed: true / false
|
|
||||||
# prevent_skip: true / false
|
|
||||||
# condition_available: true / pass
|
|
||||||
# condition_met: true / false
|
|
||||||
|
|
||||||
identifier_string = self.combine_names(self.env_type, test.test_file)
|
identifier_string = self.combine_names(self.env_type, test.test_file)
|
||||||
|
|
||||||
results = list(self._tests_path.rglob(test.test_file))
|
test_files = list(self._tests_path.rglob(test.test_file))
|
||||||
assert len(results) == 1, f"{test.test_file} should exist exactly 1 time, but found {len(results)} times"
|
assert len(test_files) == 1, f"{test.test_file} should exist exactly once, but found {len(test_files)} times"
|
||||||
full_test_path = results[0]
|
full_test_path = test_files[0]
|
||||||
|
|
||||||
# check if test aleady passed
|
# check if test aleady passed
|
||||||
if self._is_test_passed(identifier_string, remove_existing=True):
|
if self._is_test_passed(self.DIR, identifier_string):
|
||||||
if test.prevent_skip:
|
if test.prevent_skip:
|
||||||
logger.info(f"continuing {identifier_string} (passed before but prevent_skip=True)")
|
logger.info(f"continuing {identifier_string} (passed before but prevent_skip=True)")
|
||||||
else:
|
else:
|
||||||
|
|
@ -85,19 +79,19 @@ class Runner:
|
||||||
return
|
return
|
||||||
|
|
||||||
if test.condition:
|
if test.condition:
|
||||||
condition_result = self._run_condition(test.condition)
|
condition_result = self._call_condition_function(test.condition)
|
||||||
if not condition_result:
|
if not condition_result:
|
||||||
# test condition is defined but not met
|
# test condition is defined but not met
|
||||||
logger.info(f"skipping {identifier_string} (test condition is not met)")
|
logger.info(f"skipping {identifier_string} (test condition is not met)")
|
||||||
self.create_result_file(self.DIR, result="skipped", identifier_string=identifier_string)
|
self._create_status_file(self.DIR, status="skipped", identifier_string=identifier_string)
|
||||||
return
|
return
|
||||||
|
|
||||||
# test condition is undefined or not met
|
# test condition is undefined or not met
|
||||||
logger.info(f"running {identifier_string}")
|
logger.info(f"running {identifier_string}")
|
||||||
result = self._call_pytest(full_test_path)
|
exit_code = self._call_pytest(full_test_path)
|
||||||
self.create_result_file(self.DIR, result=result, identifier_string=identifier_string)
|
self._create_status_file(self.DIR, status=exit_code, identifier_string=identifier_string)
|
||||||
|
|
||||||
def _run_condition(self, condition_function: Callable[[ConditionArgs], bool]):
|
def _call_condition_function(self, condition_function: Callable[[ConditionArgs], bool]):
|
||||||
"""run the test condition function with multiple arguments"""
|
"""run the test condition function with multiple arguments"""
|
||||||
# more arguments can be added later without changing the function signature
|
# more arguments can be added later without changing the function signature
|
||||||
conditon_args = ConditionArgs(
|
conditon_args = ConditionArgs(
|
||||||
|
|
@ -107,24 +101,38 @@ class Runner:
|
||||||
)
|
)
|
||||||
return condition_function(conditon_args)
|
return condition_function(conditon_args)
|
||||||
|
|
||||||
def _is_test_passed(self, identifier_string: str, remove_existing: bool = False) -> bool:
|
@classmethod
|
||||||
"""returns True if the selected test matching identifier_string already passed
|
def _create_status_file(
|
||||||
|
cls,
|
||||||
|
DIR: "DirManager",
|
||||||
|
status: int | str,
|
||||||
|
identifier_string: str,
|
||||||
|
):
|
||||||
|
"""create result file to indicated passed/failed/skipped test"""
|
||||||
|
|
||||||
This is determined by the presence of a specific output file in the RESULTS folder that
|
if isinstance(status, int):
|
||||||
matches identifier_string
|
status = cls.exit_code_to_str(status)
|
||||||
|
|
||||||
remove_existing: If True, result files matching identifier_string with a status
|
# remove matching files
|
||||||
other than 'passed' will be deleted"""
|
[f.unlink() for f in DIR.STATUS.glob("*") if identifier_string in f.name]
|
||||||
|
|
||||||
already_passed = False
|
full_name = cls.combine_names(status, identifier_string)
|
||||||
for status in self.DIR.STATUS.glob("*"):
|
file_path = DIR.STATUS / full_name
|
||||||
if identifier_string in status.name:
|
with open(file_path, "w") as _:
|
||||||
# process any result file (passed / failed / skipped) if it exists
|
pass # create empty file
|
||||||
if "passed" in status.name:
|
|
||||||
already_passed = True
|
@staticmethod
|
||||||
elif remove_existing:
|
def _is_test_passed(DIR: "DirManager", identifier_string: str) -> bool:
|
||||||
status.unlink()
|
"""returns True if the selected test matching identifier_string already passed"""
|
||||||
return already_passed
|
|
||||||
|
matching_files = [f for f in DIR.STATUS.glob("*") if identifier_string in f.name]
|
||||||
|
if len(matching_files) == 1:
|
||||||
|
status_file = matching_files[0]
|
||||||
|
if "passed" in status_file.name:
|
||||||
|
return True
|
||||||
|
elif len(matching_files) > 1:
|
||||||
|
logger.warning("more than one matching status file found")
|
||||||
|
return False
|
||||||
|
|
||||||
def _call_pytest(self, full_test_path: Path) -> int:
|
def _call_pytest(self, full_test_path: Path) -> int:
|
||||||
"""runs pytest programmatically with a specific file
|
"""runs pytest programmatically with a specific file
|
||||||
|
|
@ -175,22 +183,6 @@ class Runner:
|
||||||
|
|
||||||
return pytest.main(command_arguments)
|
return pytest.main(command_arguments)
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_result_file(
|
|
||||||
cls,
|
|
||||||
DIR: "DirManager",
|
|
||||||
result: int | str,
|
|
||||||
identifier_string: str,
|
|
||||||
):
|
|
||||||
"""create result file to indicated passed/failed or skipped test"""
|
|
||||||
|
|
||||||
if isinstance(result, int):
|
|
||||||
result = cls.result_int_to_str(result)
|
|
||||||
full_name = cls.combine_names(result, identifier_string)
|
|
||||||
file_path = DIR.STATUS / full_name
|
|
||||||
with open(file_path, "w") as _:
|
|
||||||
pass # create empty file
|
|
||||||
|
|
||||||
def _dependencies_passed(self):
|
def _dependencies_passed(self):
|
||||||
"""returns true if all setups of each dependency have passed"""
|
"""returns true if all setups of each dependency have passed"""
|
||||||
|
|
||||||
|
|
@ -206,7 +198,7 @@ class Runner:
|
||||||
return all(results)
|
return all(results)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def result_int_to_str(result_int: int) -> str:
|
def exit_code_to_str(result_int: int) -> str:
|
||||||
"""converts the pytest exit code (int) into a meaningful string"""
|
"""converts the pytest exit code (int) into a meaningful string"""
|
||||||
match result_int:
|
match result_int:
|
||||||
case 0:
|
case 0:
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue