From 0c8999b070b21304cd145c0a931aacd5271e1aa1 Mon Sep 17 00:00:00 2001 From: Daniel Date: Mon, 11 Dec 2023 11:51:32 +0100 Subject: [PATCH] huge refactor of runner functions --- pytest_abra/runner.py | 86 ++++++++++++++++++++----------------------- 1 file changed, 39 insertions(+), 47 deletions(-) diff --git a/pytest_abra/runner.py b/pytest_abra/runner.py index 99c494f..5667cd9 100644 --- a/pytest_abra/runner.py +++ b/pytest_abra/runner.py @@ -64,20 +64,14 @@ class Runner: self._run_test_with_checks(test) def _run_test_with_checks(self, test: Test): - # dependency passed: true / false - # already_passed: true / false - # prevent_skip: true / false - # condition_available: true / pass - # condition_met: true / false - identifier_string = self.combine_names(self.env_type, test.test_file) - results = list(self._tests_path.rglob(test.test_file)) - assert len(results) == 1, f"{test.test_file} should exist exactly 1 time, but found {len(results)} times" - full_test_path = results[0] + test_files = list(self._tests_path.rglob(test.test_file)) + assert len(test_files) == 1, f"{test.test_file} should exist exactly once, but found {len(test_files)} times" + full_test_path = test_files[0] # check if test aleady passed - if self._is_test_passed(identifier_string, remove_existing=True): + if self._is_test_passed(self.DIR, identifier_string): if test.prevent_skip: logger.info(f"continuing {identifier_string} (passed before but prevent_skip=True)") else: @@ -85,19 +79,19 @@ class Runner: return if test.condition: - condition_result = self._run_condition(test.condition) + condition_result = self._call_condition_function(test.condition) if not condition_result: # test condition is defined but not met logger.info(f"skipping {identifier_string} (test condition is not met)") - self.create_result_file(self.DIR, result="skipped", identifier_string=identifier_string) + self._create_status_file(self.DIR, status="skipped", identifier_string=identifier_string) return # test condition is undefined or not met logger.info(f"running {identifier_string}") - result = self._call_pytest(full_test_path) - self.create_result_file(self.DIR, result=result, identifier_string=identifier_string) + exit_code = self._call_pytest(full_test_path) + self._create_status_file(self.DIR, status=exit_code, identifier_string=identifier_string) - def _run_condition(self, condition_function: Callable[[ConditionArgs], bool]): + def _call_condition_function(self, condition_function: Callable[[ConditionArgs], bool]): """run the test condition function with multiple arguments""" # more arguments can be added later without changing the function signature conditon_args = ConditionArgs( @@ -107,24 +101,38 @@ class Runner: ) return condition_function(conditon_args) - def _is_test_passed(self, identifier_string: str, remove_existing: bool = False) -> bool: - """returns True if the selected test matching identifier_string already passed + @classmethod + def _create_status_file( + cls, + DIR: "DirManager", + status: int | str, + identifier_string: str, + ): + """create result file to indicated passed/failed/skipped test""" - This is determined by the presence of a specific output file in the RESULTS folder that - matches identifier_string + if isinstance(status, int): + status = cls.exit_code_to_str(status) - remove_existing: If True, result files matching identifier_string with a status - other than 'passed' will be deleted""" + # remove matching files + [f.unlink() for f in DIR.STATUS.glob("*") if identifier_string in f.name] - already_passed = False - for status in self.DIR.STATUS.glob("*"): - if identifier_string in status.name: - # process any result file (passed / failed / skipped) if it exists - if "passed" in status.name: - already_passed = True - elif remove_existing: - status.unlink() - return already_passed + full_name = cls.combine_names(status, identifier_string) + file_path = DIR.STATUS / full_name + with open(file_path, "w") as _: + pass # create empty file + + @staticmethod + def _is_test_passed(DIR: "DirManager", identifier_string: str) -> bool: + """returns True if the selected test matching identifier_string already passed""" + + matching_files = [f for f in DIR.STATUS.glob("*") if identifier_string in f.name] + if len(matching_files) == 1: + status_file = matching_files[0] + if "passed" in status_file.name: + return True + elif len(matching_files) > 1: + logger.warning("more than one matching status file found") + return False def _call_pytest(self, full_test_path: Path) -> int: """runs pytest programmatically with a specific file @@ -175,22 +183,6 @@ class Runner: return pytest.main(command_arguments) - @classmethod - def create_result_file( - cls, - DIR: "DirManager", - result: int | str, - identifier_string: str, - ): - """create result file to indicated passed/failed or skipped test""" - - if isinstance(result, int): - result = cls.result_int_to_str(result) - full_name = cls.combine_names(result, identifier_string) - file_path = DIR.STATUS / full_name - with open(file_path, "w") as _: - pass # create empty file - def _dependencies_passed(self): """returns true if all setups of each dependency have passed""" @@ -206,7 +198,7 @@ class Runner: return all(results) @staticmethod - def result_int_to_str(result_int: int) -> str: + def exit_code_to_str(result_int: int) -> str: """converts the pytest exit code (int) into a meaningful string""" match result_int: case 0: