diff --git a/.gitignore b/.gitignore index 47caf4d..f541c4b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,10 @@ __pycache__/ +test-output/ +TestResults/ +.vscode/ +*.pyc *.json *.zip -TestResults/ +*.egg-info +credentials* +!credentials-example.json \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..2cb2d96 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "envfiles"] + path = envfiles + url = ssh://git@git.local-it.org:2222/local-it-infrastructure/dev.local-it.cloud.git diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..609f0a0 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3.12-slim + +RUN pip install --no-cache-dir pytest-playwright + +RUN playwright install + +RUN playwright install-deps + +COPY . /code + +WORKDIR /code + +RUN pip install --no-cache-dir -e . + +RUN rm -rf /code + +RUN ln -s /code/recipes /recipes +RUN ln -s /code/envfiles /envfiles diff --git a/README.md b/README.md index 4d956f1..2a1b8f0 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,69 @@ +# pytest-abra -# Installation +Pytest-Abra is an installable python package baed on pytest, designed to test instances created with [abra](https://docs.coopcloud.tech/abra/). After installation, you will have two things: -pip install pytest-playwright +- `abratest` CLI command. *Used to initialize the testing.* + +- `pytest-abra` Pytest plugin. *Automatically loads custom fixtures in any pytest run (see `pytest_abra/custom_fixtures.py`)* + +# Usage + +Pytest-abra can easily be installed on any system but also offers a Docker image. To use pytest-abra, follow these steps: + +## Usage [without Docker] + +### Installation [without Docker] + +To clone with submodules, use these git commands: + +```bash +git clone --recurse-submodules +// optional: +git submodule update --init // add submodule after normal cloning +git submodule update --remote // update submodules +``` + +Create a python environment and install all dependencies via + +```bash +pip install -e . playwright install +``` -# Run Tests: +### Run [without Docker] -pytest -k nextcloud +Run the helper script or directly use the cli command (see docs) -playwright show-trace trace.zip +```bash +python main.py # run pytest-abra +abratest [options] +``` + +## Usage [with docker] + +### Installation [with docker] + +To clone with submodules, use these git commands: + +```bash +git clone --recurse-submodules +// optional: +git submodule update --init // add submodule after normal cloning +git submodule update --remote // update submodules +``` + +Build the image + +```bash +docker compose build # build the image +docker compose build --no-cache # Force rebuild without cache +``` + +### Run [with docker] + +Run the script + +```bash +docker compose run --rm app python main.py # run pytest-abra +docker compose run --rm -it app /bin/bash # use the container interactively +``` diff --git a/credentials-example.json b/credentials-example.json new file mode 100644 index 0000000..fa0f58b --- /dev/null +++ b/credentials-example.json @@ -0,0 +1,9 @@ +{ + "ADMIN_USER": "admin", + "ADMIN_PASS": "password", + "IMAP_EMAIL": "test@domain.com", + "IMAP_HOST": "mail.domain.com", + "IMAP_PORT": "993", + "IMAP_USER": "imap_user", + "IMAP_PASS": "password" +} \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..5da046e --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,6 @@ +services: + app: + build: . + container_name: python-env + volumes: + - .:/code diff --git a/docs/documentation.md b/docs/documentation.md new file mode 100644 index 0000000..421dcb2 --- /dev/null +++ b/docs/documentation.md @@ -0,0 +1,330 @@ +# pytest-abra + +Pytest-Abra is an installable python package baed on pytest, designed to test instances created with [abra](https://docs.coopcloud.tech/abra/). After installation, you will have two things: + +- `abratest` CLI command. *Used to initialize the testing.* + +- `pytest-abra` Pytest plugin. *Automatically loads custom fixtures in any pytest run (see `pytest_abra/custom_fixtures.py`)* + +# Getting Started + +Pytest-abra can easily be installed on any system but also offers a Docker image. To use pytest-abra, follow these steps: + +## Usage [without Docker] + +### Installation [without Docker] + +To clone with submodules, use these git commands: + +```bash +git clone --recurse-submodules +// optional: +git submodule update --init // add submodule after normal cloning +git submodule update --remote // update submodules +``` + +Create a python environment and install all dependencies via + +```bash +pip install -e . +playwright install +``` + +### Run [without Docker] + +Run the helper script or directly use the cli command (see docs) + +```bash +python main.py # run pytest-abra +abratest [options] +``` + +## Usage [with docker] + +### Installation [with docker] + +To clone with submodules, use these git commands: + +```bash +git clone --recurse-submodules +// optional: +git submodule update --init // add submodule after normal cloning +git submodule update --remote // update submodules +``` + +Build the image + +```bash +docker compose build # build the image +docker compose build --no-cache # Force rebuild without cache +``` + +### Run [with docker] + +Run the script + +```bash +docker compose run --rm app python main.py # run pytest-abra +docker compose run --rm -it app /bin/bash # use the container interactively +``` + +# Documentation + +After Installation, `abratest` can be called via terminal: + +```bash +abratest [arguments] +``` + +To run successfully, very specific arguments are required. The easiest way to use `abratest` is with the helper script `main.py`. Of yourse you can implement a similar helper script in the language of your liking. + +## CLI Interface + +The cli command `abratest` has 3 **required arguments**: + +- `--env_paths ENV_PATHS`: list of the .env files used in the test +- `--recipes_dir RECIPES_DIR`: directory of all available abra recipes +- `--output_dir OUTPUT_DIR`: target directory for all test results + +Furtheremore, there are these optional arguments: + +- `--resume`: `abratest` will take the directory in `output_dir` with the most recent creation date and resume the tests there. +- `--session_id SESSION_ID`: Instead of generating a new session_id, the given session_id is used to run or resume the test. Overwrites --resume to False. +- `--debug`: enables playwright debug mode, see docs [here](https://playwright.dev/python/docs/running-tests#debugging-tests) +- `--timeout`: will overwrite the default playwright timeouts in [ms], see docs [here](https://playwright.dev/python/docs/api/class-browsercontext#browser-context-set-default-timeout) and [here](https://playwright.dev/python/docs/test-assertions#global-timeout). In our current setup, some tests can fail at 10s but will pass with 20s. + +### env_paths [required | string] + +The .env files provied through the `--env_paths` argument are the most important input to abratest, as they serve as configuration for the tests. One or more paths pointing at .env files can be provided, multiple paths are separated with ";". These .env files are actually the same files that are used to configure the `abra` recipes for instance creation. + +To run `abratest` with these `.env` configuration files + +- `/path/config_1.env` [of TYPE authentik] +- `/path/config_2.env` [of TYPE wordpress] +- `/path/config_3.env` [of TYPE wordpress] + +we simply call + +``` +abratest --env_paths /path/config_1.env;/path/config_2.env;/path/config_3.env [...other args] +``` + +Under the hood, each `.env` file in `--env_paths` will create one instance of a `Runner` subclass. Let's say we have `config_2.env` containing `TYPE=wordpress`. This will create an instance of `RunnerWordpress`. This class has to be imported from `recipes_dir`. + +### recipes_dir [required | string] + +The required argument `--recipes_dir` has to point to the directory, where all the abra recipes are stored. We can call `abratest` with + +``` +abratest --recipes_dir /path/to/abra/recipes +``` + +The expected dir structure inside of `recipes_dir` is as follows: + +``` +DIR recipes_dir [contains abra recipes] +│ +├── DIR authentik [authentik recipe] +│ ├── [files from authentik recipe] +│ └── DIR tests_authentik [pytest tests for authentik] +│ ├── FILE runner_authentik.py # containing RunnerAuthentik class +│ └── [pytest_files] +│ +└── DIR wordpress [wordpress recipe] + ├── [files from wordpress recipe] + └── DIR tests_wordpress [pytest tests for wordpress] + ├── FILE runner_wordpress.py # containing RunnerWordpress class + └── [pytest_files] +``` + +The class `RunnerWordpress` will be automatically imported using `importlib` library, which is equivalent to the code below. Note that `recipes_dir` will be added to sys.path automatically for the import to work and that every `Runner` class matching `recipes_dir.rglob("*/runner*.py")` will be imported. + +```python +from wordpress.tests_wordpress.runner_wordpress import RunnerWordpress +``` + +### output_dir [required | string] + +Path to the directory where all test outputs are stored (test report, tracebacks, playwright traces etc.) + +``` +abratest --output_dir /path/to/output +``` + +# Functionality + +Abratest has 3 required inputs, but most importantly the test configuration is done through the .env files given with the --env_paths argument. So let's say we want to run abratest with these 3 .env files: + +- `config1.env` [of TYPE authentik] + +- config2.env [of TYPE wordpress] + +- config3.env [of TYPE wordpress] + +Now we run + +```bash +abratest --env_paths path/config1.env;path/config2.env;path/config3.env [...other args] +``` + + +``` +abratest -> create Coordinator() instance +└── Coordinator() -> create Runner() subclass instances + ├── RunnerAuthentik() [based on config1.env, loaded + │ │ from abra/recipes/authentik] + │ │ # RunnerAuthentik with 3 test files: + │ ├── RUN pytest path/setup_authentik.py + │ ├── RUN pytest path/test_authentik_1.py + │ └── RUN pytest path/test_authentik_2.py + ├── RunnerWordpress() [based on config2.env, loaded + │ │ from abra/recipes/wordpress] + │ │ # RunnerWordpress with 1 test file + │ ├── RUN pytest path/setup_authentik.py + │ ├── RUN pytest path/test_authentik_1.py + │ └── RUN pytest path/test_authentik_2.py + └── RunnerWordpress() [based on config3.env, loaded + │ from abra/recipes/wordpress] + │ # RunnerWordpress with 1 test file + ├── RUN pytest path/setup_authentik.py + ├── RUN pytest path/test_authentik_1.py + └── RUN pytest path/test_authentik_2.py + + +``` + +Coordinator will take care of the correct order of the tests. In general, tests are placed in one of 3 categories: `setups`, `tests` and `cleanups`. To associate a test with one of these categories, place the Test in the corresponding list of the Runner class, i.e. Runner.setups = [test] or Runner.tests = [test]. The execution order will be. + +> [setups] ➔ [tests] ➔ [cleanups] + + +Furthermore, some `Runner` classes can depend on others. For example, `RunnerWordpress` depends on `RunnerAuthentik`. Therefore, `Coordinator` will make sure that `RunnerAuthentik` runs before `RunnerWordpress`. We will end up with with this order: + +| # | Runner | Type | +| --- | -------------- | -------- | +| 1. | Authentik | setups | +| 2. | Wordpress-1 | setups | +| 3. | Wordpress-2 | setups | +| 4. | Authentik | tests | +| 5. | Wordpress-1 | tests | +| 6. | Wordpress-2 | tests | +| 7. | Authentik | cleanups | +| 8. | Wordpress-1 | cleanups | +| 9. | Wordpress-2 | cleanups | + + +# Create a test suite for a recipe + +todo + +To understand how a test suite is built, let's have a look at the files + +runner_authentik.py -> required, defines the Runner subclass (see below) +conftest.py -> not required. special file for pytest. is automatically discovered and loaded. convenient place to define fixtures and functions to be used in more than one test routine +setup_authentik.py -> not required. can hold setup routine for authentik, has to be registered in runner_authentik.py +fixtures_authentik.py -> not required. holds fixtures that are meant to be imported by other test modules that depend on authentik. + +# Create a custom Runner + +To comprehend the process of creating a new subclass of `Runner`, let's examine a simplified rendition of the `RunnerWordpress` class. Within it, there exist two setup scripts and two test scripts, one of which operates conditionally. + + +```python +from pytest_abra import Runner, Test + +class RunnerWordpress(Runner): + env_type = "wordpress" + dependencies = ["authentik"] + setups = [ + Test(test_file="setup_wordpress_1.py"), + Test(test_file="setup_wordpress_2.py"), + ] + tests = [ + Test(test_file="test_wordpress.py"), + Test(condition=condition_function, test_file="test_wordpress_conditional.py"), + ] + cleanups = [] +``` + +The signature of condition functions can be seen below. The function takes one `NamedTuple` and returns of type `bool`. You can learn about the contents of the input by looking up the class `ConditionArgs`. Generally speaking, it provides access to all of the .env files, especially the one related to the current Runner. + +```python +def condition_function(args: ConditionArgs) -> bool: + ... +``` + +## Discovery of `Runners` and `Tests` + +- Runners will be discovered, if they are defined in a moduled of name `runner_*.py` including a class of name `Runner*`. + +- Tests will be discovered by filename as long as they are placed in the parent dir of `runner_*.py` or in any subdirectory. + +``` +DIR parent_dir +├── FILE runner_*.py +├── FILE test1.py +└── DIR subdir + ├── DIR subsubdir + │ └── test2.py + └── test3.py +``` + +# Create custom Tests + +The test files are written in the same way as any other pytest test file. The only difference is that pytest-abra provides custom fixtures that make it easy to get the configuration by the provided .env files and to deal with URLS etc. + + +### Step 1) Add new Test + +Create a new testfile `new_test.py` in the same directory or a subdirectory of `runner_wordpress.py`. +Register `new_test.py` as a test in the `RunnerWordpress` class. +Set prevent_skip=True, so that you can run your new test over and over again for debugging, without it being skipped + +```python +# runner_wordpress.py +from pytest_abra import Runner, Test + +class RunnerWordpress(Runner): + env_type = "wordpress" + tests = [ + Test(test_file="working_test.py"), + Test(test_file="new_test.py", prevent_skip=True), + ] +``` + +```python +# new_test.py + +def test_new(): + ... +``` + +### Step 2) Call abratest + +Call abratest with `--debug` to enable playwright debug mode and either `--session_id` or `--resume`. + +```bash +abratest [required-options] --debug --session_id debug_session +``` + +This could be done by modifying `main.py`. The first time you run abratest, all tests will be executed as usual. The second time, all tests will be skipped as they have passed already. Only your new test will be run again and again, as the prevent_skip option is enabled. So you can run all tests once and then skip all tests besides your new test you want to debug. + +# todo: add example + +# Playwright Debug & Codegen + +Use playwright debug mode or codegen to create testing code easily by recording browser actions https://playwright.dev/python/docs/codegen + +```bash +abratest --debug # launch your tests in debug mode +playwright codegen demo.playwright.dev/todomvc # visit given url in codegen mode +``` + +## Development + +```bash +pytest # test pytest-abra +pytest -m "not slow" # test pytest-abra without slow tests +pytest --collect-only # debug test pytest-abra +docker compose run --rm app pytest # run pytest-abra +``` diff --git a/envfiles b/envfiles new file mode 160000 index 0000000..a8375f6 --- /dev/null +++ b/envfiles @@ -0,0 +1 @@ +Subproject commit a8375f6fc7a285a1000b5553be47eaf19b0be0a6 diff --git a/main.py b/main.py new file mode 100644 index 0000000..9d3ac37 --- /dev/null +++ b/main.py @@ -0,0 +1,46 @@ +import subprocess +from pathlib import Path + +from pytest_abra.utils import load_json_to_environ + +# --------------------- load credentials to env variables -------------------- # + +cred_file = Path("credentials.json") +load_json_to_environ(cred_file) + +# --------------------------------- env files -------------------------------- # + +# This list of env files is the input to testing framework. each env file +# triggers the execution of one test Runner and provides configuration to the +# tests inside the runner. + +ENV_FILES_ROOT = Path("./envfiles").resolve() +ENV_FILES = [ + ENV_FILES_ROOT / "login.test.dev.local-it.cloud.env", # authentik + ENV_FILES_ROOT / "blog.test.dev.local-it.cloud.env", # wordpress + ENV_FILES_ROOT / "files.test.dev.local-it.cloud.env", # nextcloud +] +ENV_PATHS = ";".join([x.as_posix() for x in ENV_FILES]) + +# ----------------------------------- dirs ----------------------------------- # + +RECIPES_DIR = Path("./recipes").resolve() +OUTPUT_DIR = Path("./test-output").resolve() + +# ------------------------------------ run ----------------------------------- # + +subprocess.run( + [ + "abratest", + "--env_paths", + ENV_PATHS, + "--recipes_dir", + RECIPES_DIR, + "--output_dir", + OUTPUT_DIR, + "--resume", + # "--debug", + # "--session_id", + # "abc", + ] +) diff --git a/previous-work/README.md b/previous-work/README.md new file mode 100644 index 0000000..4d956f1 --- /dev/null +++ b/previous-work/README.md @@ -0,0 +1,11 @@ + +# Installation + +pip install pytest-playwright +playwright install + +# Run Tests: + +pytest -k nextcloud + +playwright show-trace trace.zip diff --git a/authentik_test.py b/previous-work/authentik_test.py similarity index 100% rename from authentik_test.py rename to previous-work/authentik_test.py diff --git a/config.yaml b/previous-work/config.yaml similarity index 100% rename from config.yaml rename to previous-work/config.yaml diff --git a/conftest.py b/previous-work/conftest.py similarity index 100% rename from conftest.py rename to previous-work/conftest.py diff --git a/nextcloud_test.py b/previous-work/nextcloud_test.py similarity index 100% rename from nextcloud_test.py rename to previous-work/nextcloud_test.py diff --git a/pytest.ini b/previous-work/pytest.ini similarity index 100% rename from pytest.ini rename to previous-work/pytest.ini diff --git a/vikunja_test.py b/previous-work/vikunja_test.py similarity index 100% rename from vikunja_test.py rename to previous-work/vikunja_test.py diff --git a/wekan_test.py b/previous-work/wekan_test.py similarity index 100% rename from wekan_test.py rename to previous-work/wekan_test.py diff --git a/previous-work/wordpress_test.py b/previous-work/wordpress_test.py new file mode 100644 index 0000000..960fa72 --- /dev/null +++ b/previous-work/wordpress_test.py @@ -0,0 +1,14 @@ +from playwright.sync_api import BrowserContext, expect + +from pytest_abra.dir_manager import DirManager + + +def test_wordpress(admin_session: BrowserContext, env_config: dict[str, str], DIR: DirManager): + page_authentik = admin_session.new_page() + with page_authentik.expect_popup() as event_context: + page_authentik.get_by_role("link", name="Wordpress").click() + page_wordpress = event_context.value + + expect(page_wordpress.locator("#wpcontent")).to_be_visible() + if "locale" in env_config and "de" in env_config["locale"]: + expect(page_wordpress.get_by_role("heading")).to_have_text("Willkommen bei WordPress!") diff --git a/prototyping/dependency_injection.py b/prototyping/dependency_injection.py new file mode 100644 index 0000000..dbd3760 --- /dev/null +++ b/prototyping/dependency_injection.py @@ -0,0 +1,19 @@ +import inspect + +a = 2 +b = 3 +c = 4 + + +def func(a: int, c: int) -> int: + return a + c + + +arg_names = inspect.getfullargspec(func).args +print(arg_names) # ['a', 'c'] + +arguments = {arg: globals()[arg] for arg in arg_names if arg in globals()} +print(arguments) # {'a': 2, 'c': 4} + +result = func(**arguments) +print(result) # 6 diff --git a/prototyping/email_imbox.py b/prototyping/email_imbox.py new file mode 100644 index 0000000..15cf562 --- /dev/null +++ b/prototyping/email_imbox.py @@ -0,0 +1,53 @@ +# %% +import datetime +import json +import os +from pathlib import Path + +from imbox import Imbox + +cred_file = Path("../credentials.json") +with open(cred_file, "r") as f: + CREDENTIALS = json.load(f) + +for key, value in CREDENTIALS.items(): + os.environ[key] = value + +IMAP_HOST = os.environ["IMAP_HOST"] +IMAP_PORT = os.environ["IMAP_PORT"] +IMAP_USER = os.environ["IMAP_USER"] +IMAP_PASS = os.environ["IMAP_PASS"] + + +with Imbox( + hostname=os.environ["IMAP_HOST"], + port=os.environ["IMAP_PORT"], + username=os.environ["IMAP_USER"], + password=os.environ["IMAP_PASS"], + ssl=True, + ssl_context=None, + starttls=False, +) as imbox: + # Get all folders + status, folders_with_additional_info = imbox.folders() + + # Gets all messages from the inbox + all_inbox_messages = imbox.messages() + + # Messages received after specific date + inbox_messages_received_after = imbox.messages(date__gt=datetime.date(2018, 7, 30)) + + # Messages whose subjects contain a string + inbox_messages_subject_christmas = imbox.messages(subject="Christmas") + + for uid, message in all_inbox_messages: + print(uid, message.subject, message.date) + # # Every message is an object with the following keys + + # message.sent_from + # message.sent_to + # message.subject + # message.headers + # message.message_id + # message.date + # message.body.plain diff --git a/prototyping/env_var_subprocess.py b/prototyping/env_var_subprocess.py new file mode 100644 index 0000000..aa0a027 --- /dev/null +++ b/prototyping/env_var_subprocess.py @@ -0,0 +1,17 @@ +import os +import subprocess + +# Set an environment variable in the parent process +os.environ["PARENT_VARIABLE"] = "12345s" + +# Spawn a subprocess and modify the environment variable +subprocess.run( + [ + "python", + "-c", + "import os; print('b', os.environ['PARENT_VARIABLE']); os.environ['PARENT_VARIABLE'] = 'modified_value'; print('c', os.environ['PARENT_VARIABLE'])", + ] +) + +# Check if the modification in the subprocess affected the parent process +print("a", os.environ["PARENT_VARIABLE"]) # This will print 'parent_value', not 'modified_value' diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..0a2ed00 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,66 @@ +[project] +name = "pytest-abra" +description = "A pytest plugin to test instances of abra recipes" +dynamic = ["version"] +authors = [{name = "Local-IT e.V."}] +readme = "README.md" +requires-python = ">=3.10" +classifiers = [ +"Programming Language :: Python :: 3", +"Programming Language :: Python :: 3.10", +"Programming Language :: Python :: 3.11", +"Programming Language :: Python :: 3.12", +"Framework :: Pytest", +] +dependencies = [ + "pytest == 7.4.3", + "playwright == 1.40", + "pytest-html == 4.1.1", + "pytest-playwright == 0.4.3", + "python-dotenv == 1.0.0", + "loguru == 0.7.2", + "beautifulsoup4 == 4.12.2", + "imbox == 0.9.8", + "tabulate == 0.9.0", + "hatchling == 1.18.0", + "icecream == 2.1.3", +] + +[project.optional-dependencies] +dev = [ + "mypy", + "ruff", +] + +[project.entry-points.pytest11] +pytest_abra = "pytest_abra.custom_fixtures" + +[project.scripts] +abratest = "pytest_abra.cli:run" + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.version] +path = "pytest_abra/__init__.py" + +[tool.hatch.build] +include = [ + "pytest_abra/*.py", +] +exclude = [ + "*.json", +] + + +[tool.ruff] +line-length = 120 +target-version = "py311" + +[tool.pytest.ini_options] +norecursedirs = ".* previous-work recipes" +testpaths = "tests" +markers = [ + "slow: marks tests as slow", +] \ No newline at end of file diff --git a/pytest_abra/__init__.py b/pytest_abra/__init__.py new file mode 100644 index 0000000..76af3df --- /dev/null +++ b/pytest_abra/__init__.py @@ -0,0 +1,18 @@ +from pytest_abra.coordinator import Coordinator +from pytest_abra.dir_manager import DirManager +from pytest_abra.env_manager import EnvFile, EnvManager +from pytest_abra.runner import ConditionArgs, Runner, Test +from pytest_abra.utils import BaseUrl + +__all__ = [ + "Coordinator", + "ConditionArgs", + "Runner", + "Test", + "DirManager", + "BaseUrl", + "EnvFile", + "EnvManager", +] + +__version__ = "0.3.0" diff --git a/pytest_abra/cli.py b/pytest_abra/cli.py new file mode 100644 index 0000000..ab4d791 --- /dev/null +++ b/pytest_abra/cli.py @@ -0,0 +1,62 @@ +import argparse +import os +import sys +from pathlib import Path + +from loguru import logger + +from pytest_abra import Coordinator, __version__ +from pytest_abra.dir_manager import DirManager +from pytest_abra.utils import get_session_id + + +def get_version(): + return __version__ + + +def run(): + parser = argparse.ArgumentParser() + parser.add_argument("--version", "-V", action="version", version=get_version(), help="output the version number") + parser.add_argument("--env_paths", type=str, help="List of loaded env files separated with ;", required=True) + parser.add_argument("--recipes_dir", type=Path, help="Dir of abra recipes and respective runners", required=True) + parser.add_argument("--output_dir", type=Path, help="Dir of test outputs", required=True) + parser.add_argument("--timeout", type=int, help="Set Playwright timeout in ms", default=30_000) + parser.add_argument("--debug", action="store_true", help="Enable Playwright debug mode") + parser.add_argument("--resume", action="store_true", help="Re-run the most recent test, skipping passed tests") + parser.add_argument("--session_id", help="Session dir name (inside output_dir). Overwrites --resume") + parser.add_argument("--cleanup", help="Force test cleanup. Should not be necessary") + + args = parser.parse_args() + env_paths = [Path(s) for s in args.env_paths.split(";")] + + # -------------------------- enable playwright debug ------------------------- # + + if args.debug: + os.environ["PWDEBUG"] = "1" + + # ----------------------------- define session_id ---------------------------- # + + session_id = get_session_id(args.output_dir, args.resume, args.session_id) + + # ------------------------------- setup logging ------------------------------ # + + # todo: move to Coordinator + DIR = DirManager(output_dir=args.output_dir, session_id=session_id) + log_file = DIR.RESULTS / "coordinator.log" + logger.remove() + logger.add(log_file, format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}") + logger.add(sys.stdout, colorize=True, format="{time:YYYY-MM-DD HH:mm:ss} {message}") + + # ---------------------------- initialize and run ---------------------------- # + + coordinator = Coordinator( + env_paths=env_paths, + output_dir=args.output_dir, + session_id=session_id, + recipes_dir=args.recipes_dir, + timeout=args.timeout, + ) + coordinator.prepare_tests() + coordinator.run_tests() + coordinator.combine_html() + coordinator.collect_traces() diff --git a/pytest_abra/coordinator.py b/pytest_abra/coordinator.py new file mode 100644 index 0000000..6704360 --- /dev/null +++ b/pytest_abra/coordinator.py @@ -0,0 +1,145 @@ +import importlib +import json +import re +import sys +from pathlib import Path + +from loguru import logger +from tabulate import tabulate # type: ignore + +from pytest_abra.dir_manager import DirManager +from pytest_abra.env_manager import EnvFile, EnvManager +from pytest_abra.html_helper import merge_html_reports +from pytest_abra.runner import Runner +from pytest_abra.shared_types import TestResult +from pytest_abra.utils import generate_random_string, load_json_to_environ, rmtree + + +class Coordinator: + def __init__( + self, + env_paths: list[Path], + output_dir: Path, + session_id: str, + recipes_dir: Path, + timeout: int, + ) -> None: + # logging + out_string = "".join([e.name + "\n" for e in env_paths]) + out_string += f"output_dir = {output_dir}\n" + out_string += f"session_id = {session_id}" + logger.info(f"initialize Coordinator instance with\nenv_paths_list =\n{out_string}") + + self.RUNNER_DICT = self.create_runner_dict(recipes_dir) + self.DIR = DirManager(output_dir=output_dir, session_id=session_id, recipes_dir=recipes_dir) + self.ENV = EnvManager(env_paths=env_paths, RUNNER_DICT=self.RUNNER_DICT) + self.TIMEOUT = timeout + + def prepare_tests(self) -> None: + logger.info("calling prepare_tests()") + self.DIR.create_all_dirs() + self.ENV.copy_env_files(self.ENV.env_files, self.DIR) + self.load_test_credentials(self.DIR) + + def run_tests(self) -> None: + logger.info("calling run_tests()") + self.runners: list[Runner] = self._load_runners(self.ENV.env_files) + status_list: list[TestResult] = [] + for runner in self.runners: + status_list.extend(runner.run_setups()) + for runner in self.runners: + status_list.extend(runner.run_tests()) + for runner in self.runners: + status_list.extend(runner.run_cleanups()) + status_table = tabulate([[t.test_name, t.status] for t in status_list], headers=["name", "status"]) + logger.info(f"run_tests() finished\n{status_table}") + + def _load_runners(self, env_files: list[EnvFile]) -> list[Runner]: + """Creates an instance of the correct Runner class for each given env file""" + runners: list[Runner] = [] + for index, env_file in enumerate(env_files): + RunnerClass = self.RUNNER_DICT[env_file.env_config["TYPE"]] + runners.append(RunnerClass(coordinator=self, runner_index=index)) + return runners + + def combine_html(self) -> None: + """combines all generated pytest html reports into one""" + in_dir_path = str(self.DIR.RESULTS / "html") + out_file_path = str(self.DIR.RESULTS / "full-report.html") + title = "combined.html" + merge_html_reports(in_dir_path, out_file_path, title) + + def collect_traces(self): + """moves all traces into SESSION/RESULTS dir + + if tests are rerun and generate another trace, the new trace will get a unique name such as + tracename-0 + tracename-1 + ... + """ + + def get_new_path(root_dir: Path, base_name: str, index=0) -> Path: + new_name_alt = base_name + f"-{index}" + if not (root_dir / new_name_alt).is_dir(): + return root_dir / new_name_alt + else: + index += 1 + return get_new_path(root_dir, base_name, index=index) + + trace_root_dir = self.DIR.RESULTS / "traces" + for f in trace_root_dir.rglob("*/trace.zip"): + new_path = get_new_path(self.DIR.RESULTS, f.parent.name) + f.parent.rename(new_path) + rmtree(trace_root_dir) + + @staticmethod + def load_test_credentials(DIR: DirManager): + """Load test user credentials. If not available, create them randomly. + + Test users are created during testing but should be deleted after the test. In case test + users are not deleted after tests by accident, the user credentials are not known to an + attacker.""" + + test_credentials_path = DIR.STATES / "credentials_test.json" + if not test_credentials_path.is_file(): + test_credentials = { + "TEST_USER": "test-" + generate_random_string(6), + "TEST_PASS": generate_random_string(12, punctuation=True), + } + + with open(test_credentials_path, "w") as json_file: + json.dump(test_credentials, json_file) + + load_json_to_environ(test_credentials_path) + + @staticmethod + def create_runner_dict(recipes_dir: Path) -> dict[str, type[Runner]]: + """Creates a dictionary holding all the RunnerClasses that can be discovered in recipes_dir + + example: + RUNNER_DICT: dict[str, type["Runner"]] = { + "authentik": RunnerAuthentik, + "wordpress": RunnerWordpress, + "nextcloud": RunnerNextcloud, + } + + The Runner classes are automatically imported with importlib. The imports are successful + because recipes_dir is added to sys.path. + """ + + RUNNER_DICT: dict[str, type[Runner]] = dict() + runner_discovery_pattern = re.compile("Runner.+") + + # make it possible to import modules from recipes_dir + sys.path.append(recipes_dir.as_posix()) + + for module_path in recipes_dir.rglob("*/runner_*.py"): + rel_path = module_path.relative_to(recipes_dir).as_posix().replace("/", ".").replace(".py", "") + module = importlib.import_module(rel_path) + runner_class_names = [name for name in dir(module) if runner_discovery_pattern.match(name)] + assert len(runner_class_names) == 1 + runner_class_name = runner_class_names[0] + RunnerClass: type[Runner] = getattr(module, runner_class_name) + RunnerClass._tests_path = module_path.parent + RUNNER_DICT[RunnerClass.env_type] = RunnerClass + return RUNNER_DICT diff --git a/pytest_abra/custom_fixtures.py b/pytest_abra/custom_fixtures.py new file mode 100644 index 0000000..4c07127 --- /dev/null +++ b/pytest_abra/custom_fixtures.py @@ -0,0 +1,152 @@ +# This file is registered as a pytest plugin, meaning it will automatically loaded. +# All fixtures in this file will be available without manual loading. + +import os +import re + +# from datetime import datetime, timedelta +from pathlib import Path +from typing import Generator, Protocol, TypedDict + +import pytest +from dotenv import dotenv_values +from icecream import ic # type: ignore +from imbox import Imbox # type: ignore +from playwright.sync_api import BrowserContext, expect +from pytest import Parser + +from pytest_abra import BaseUrl, DirManager, EnvFile + + +def pytest_addoption(parser: Parser): + parser.addoption("--runner_index", action="store", type=int) + parser.addoption("--output_dir", action="store", type=Path) + parser.addoption("--session_id", action="store", type=str) + parser.addoption("--timeout", action="store", type=int, default=30_000) + + +@pytest.fixture(autouse=True) +def set_expect_timeout(request): + TIMEOUT = request.config.getoption("--timeout") + expect.set_options(timeout=TIMEOUT) + + +@pytest.fixture +def context(context: BrowserContext, request) -> BrowserContext: + # note: because this has the existing context fixture as an argument, it is ensured + # that the original fixture is called first and then overwritten by this custom one. + + TIMEOUT = request.config.getoption("--timeout") + LOCALE = {"Accept-Language": "de_DE"} + + context.set_default_timeout(TIMEOUT) + context.set_extra_http_headers(LOCALE) + return context + + +@pytest.fixture(scope="session") +def DIR(request) -> DirManager: + """Fixture holding test directories + + DIR.OUTPUT + DIR.SESSION + DIR.STATES + DIR.RESULTS + DIR.STATUS""" + + output_dir = request.config.getoption("--output_dir") + assert output_dir, "pytest argument --output_dir not set" + session_id = request.config.getoption("--session_id") + assert session_id, "pytest argument --session_id not set" + dirmanager = DirManager(output_dir=output_dir, session_id=session_id) + dirmanager.create_all_dirs() + return dirmanager + + +@pytest.fixture(scope="session") +def env_files(DIR: DirManager) -> list[EnvFile]: + """list of EnvFile objects created from the given env files""" + + env_files_dict: dict[int, EnvFile] = dict() + for env_path in DIR.ENV_FILES.glob("*.env"): + config: dict[str, str] = dotenv_values(env_path) # type: ignore + env_type = config["TYPE"] + result = re.search(r"(\d+)-*", env_path.name) + assert result + runner_index = int(result[1]) + env_files_dict[runner_index] = EnvFile(env_path=env_path, env_config=config, env_type=env_type) + keys = list(env_files_dict.keys()) + keys.sort() + return [env_files_dict[key] for key in keys] + + +@pytest.fixture(scope="session") +def env_config(request, env_files: list[EnvFile]) -> dict[str, str]: + """Current env_config""" + runner_index = request.config.getoption("--runner_index") + return env_files[runner_index].env_config + + +@pytest.fixture(scope="session") +def URL(env_config: dict[str, str]) -> BaseUrl: + """BaseUrl object based on current DOMAIN""" + return BaseUrl(netloc=env_config["DOMAIN"]) + + +@pytest.fixture(scope="session") +def imap_client() -> Generator[Imbox, None, None]: + """imap email client using credentials from environment variables""" + + assert os.environ["IMAP_HOST"], "required environment variable is undefined" + assert os.environ["IMAP_PORT"], "required environment variable is undefined" + assert os.environ["IMAP_USER"], "required environment variable is undefined" + assert os.environ["IMAP_PASS"], "required environment variable is undefined" + + imbox = Imbox( + hostname=os.environ["IMAP_HOST"], + port=os.environ["IMAP_PORT"], + username=os.environ["IMAP_USER"], + password=os.environ["IMAP_PASS"], + ssl=True, + ssl_context=None, + starttls=False, + ) + + yield imbox + + imbox.logout() + + +class Body(TypedDict): + plain: list + html: list + + +class Message(Protocol): + sent_from: list + sent_to: list + subject: str + headers: list + date: str + body: Body + + +@pytest.fixture +def imap_recent_messages(imap_client: Imbox) -> list[Message]: + """Get all messages from [n_minutes] ago till now. + + # iterate with + for uid, message in messages: + print(uid, message.subject, message.date)""" + + # N_MINUTES = 30 + # n_minutes_ago = datetime.now() - timedelta(minutes=N_MINUTES) + uids: list[bytes] = [] + messages: list[Message] = [] + # for uid, message in imap_client.messages(date__gt=n_minutes_ago): + for uid, message in imap_client.messages(): + ic("one time") + uids.append(uid) + messages.append(message) + + return messages diff --git a/pytest_abra/dir_manager.py b/pytest_abra/dir_manager.py new file mode 100644 index 0000000..b5ca29e --- /dev/null +++ b/pytest_abra/dir_manager.py @@ -0,0 +1,95 @@ +from pathlib import Path +from typing import Optional + +from dotenv import dotenv_values + + +class DirManager: + """Manages directories for the tests and should be used to create and find + and use the correct directories. + + The structures is as follows: + tests dir/ + session_id-1/ + results + states + status + session_id-2/ + results + ... + """ + + def __init__(self, output_dir: Path | str, session_id: str, recipes_dir: Path | str = ""): + if isinstance(output_dir, str): + output_dir = Path(output_dir) + self.output_dir = output_dir.resolve() + self.session_id = session_id + if isinstance(recipes_dir, str): + recipes_dir = Path(recipes_dir) + self.recipes_dir = recipes_dir + + def create_all_dirs(self) -> None: + dirs: list[Path] = [ + self.OUTPUT_DIR, + self.SESSION, + self.STATES, + self.ENV_FILES, + self.RESULTS, + self.HTML, + self.STATUS, + ] + for d in dirs: + d.mkdir(exist_ok=True) + + @property + def OUTPUT_DIR(self): + return self.output_dir + + @property + def SESSION(self): + return self.OUTPUT_DIR / self.session_id + + @property + def STATES(self): + return self.SESSION / "states" + + @property + def ENV_FILES(self): + return self.STATES / "env_files" + + @property + def RESULTS(self): + return self.SESSION / "results" + + @property + def HTML(self): + return self.RESULTS / "html" + + @property + def STATUS(self): + return self.SESSION / "status" + + @property + def RECIPES(self): + return self.recipes_dir + + def get_config(self, search_string: str) -> dict[str, str]: + env_file = next(self.ENV_FILES.glob(f"*{search_string}*")) + config: dict[str, str] = dotenv_values(env_file) # type: ignore + return config + + @staticmethod + def get_latest_session_id(output_dir: Path) -> Optional[str]: + """returns the name of the newest dir inside of output_dir + + if output_dir does not exists or is empty, None is returned""" + + if not output_dir.is_dir(): + return None + + all_dirs = [d for d in output_dir.iterdir() if d.is_dir()] + if all_dirs: + newest_dir: Path = max(all_dirs, key=lambda x: x.stat().st_ctime) + return newest_dir.name + else: + return None diff --git a/pytest_abra/env_manager.py b/pytest_abra/env_manager.py new file mode 100644 index 0000000..7076268 --- /dev/null +++ b/pytest_abra/env_manager.py @@ -0,0 +1,119 @@ +import shutil +from pathlib import Path +from typing import TYPE_CHECKING, NamedTuple + +from dotenv import dotenv_values + +from pytest_abra.utils import files_are_same + +if TYPE_CHECKING: + from pytest_abra import DirManager, Runner + + +class EnvFile(NamedTuple): + env_path: Path + env_config: dict[str, str] + env_type: str + + def __repr__(self) -> str: + return f"EnvFile(type={self.env_type})" + + +class DependencyRule(NamedTuple): + child: str + dependency: str + + +class EnvManager: + def __init__(self, env_paths: list[Path], RUNNER_DICT: dict[str, type["Runner"]]): + self.env_files: list[EnvFile] = self._get_env_files(env_paths) + self.dependency_rules: list[DependencyRule] = self._get_dependency_rules(self.env_files, RUNNER_DICT) + self.env_files = self.sort_env_files_by_rule(self.env_files, self.dependency_rules) + + @staticmethod + def _get_env_files(env_paths: list[Path]) -> list[EnvFile]: + """Returns a list of EnvFile objects created from the given env files""" + env_files: list[EnvFile] = [] + for env_path in env_paths: + assert env_path.is_file(), f"the env file {env_path} does not exist" + config: dict[str, str] = dotenv_values(env_path) # type: ignore + assert "TYPE" in config, f"the env file {env_path} does not specify the required TYPE key." + env_type = config["TYPE"] + env_files.append(EnvFile(env_path=env_path, env_config=config, env_type=env_type)) + return env_files + + @staticmethod + def _get_dependency_rules(env_files: list[EnvFile], RUNNER_DICT: dict[str, type["Runner"]]) -> list[DependencyRule]: + dependency_rules: list[DependencyRule] = [] + for env_file in env_files: + assert env_file.env_type in RUNNER_DICT, f"no runner for env_type={env_file.env_type} found in RUNNER_DICT" + child_runner_class = RUNNER_DICT[env_file.env_type] + for dependency in child_runner_class.dependencies: + dependency_rule = DependencyRule(child=child_runner_class.env_type, dependency=dependency) + dependency_rules.append(dependency_rule) + return dependency_rules + + @staticmethod + def _get_indices_by_string(in_list: list[EnvFile], string: str) -> list[int]: + """returns all indices of items in in_list, where item.env_type matches string""" + return [index for index, element in enumerate(in_list) if element.env_type == string] + + @staticmethod + def _swap_item_with_previous(in_list: list[EnvFile], index: int): + """swaps item at index N with item at index N-1""" + assert index > 0, "cannot swap with negative index" + in_list[index], in_list[index - 1] = in_list[index - 1], in_list[index] + + @classmethod + def is_rule_satisfied(cls, env_list: list[EnvFile], rule: DependencyRule, swap=False) -> bool: + """returns if the ordering in in_list is compliant with the given rule + + if swap=True, some reordering will happen in case of a violated rule""" + + child_indices = cls._get_indices_by_string(env_list, rule.child) + parent_indices = cls._get_indices_by_string(env_list, rule.dependency) + for child_index in child_indices: + for parent_index in parent_indices: + if not parent_index < child_index: + if swap: + cls._swap_item_with_previous(env_list, parent_index) + return False + return True + + @classmethod + def sort_env_files_by_rule(cls, env_list: list[EnvFile], rules: list[DependencyRule]) -> list[EnvFile]: + out_list = env_list.copy() + + for _ in range(10_000): + rule_satisfied: list[bool] = [] + for rule in rules: + rule_satisfied.append(cls.is_rule_satisfied(out_list, rule, swap=True)) + + if all(rule_satisfied): + return out_list + raise ValueError( + "Could not resolve test order. This is possibly due to a circular dependency (a on b, b on c, c on a)" + ) + + @staticmethod + def copy_env_files(env_files: list[EnvFile], DIR: "DirManager") -> None: + """Copies all env files to STATES/env_files. + + Files will be renamed to --. Example: + 00-authentik-login.test.dev.local-it.cloud.env + + Does nothing when called twice with same env_files. Throws an AssertionError if either + contents or filenames of env_files have changed (probably test rerun with different input)""" + + dir_was_not_empty = len(list(DIR.ENV_FILES.iterdir())) > 0 + + for index, env_file in enumerate(env_files): + file_name = "-".join([str(index).zfill(2), env_file.env_type, env_file.env_path.name]) + if dir_was_not_empty: + # check that the copied env files have not changed + present_files = [f.name for f in DIR.ENV_FILES.iterdir()] + assert ( + file_name in present_files and files_are_same(env_file.env_path, DIR.ENV_FILES / file_name) + ), "It appears that you are resuming a test while the input env files have changed. Start a new test instead" + + shutil.copy(env_file.env_path, DIR.ENV_FILES / file_name) diff --git a/pytest_abra/html_helper.py b/pytest_abra/html_helper.py new file mode 100644 index 0000000..6215350 --- /dev/null +++ b/pytest_abra/html_helper.py @@ -0,0 +1,211 @@ +# code from +# https://github.com/akavbathen/pytest_html_merger/tree/main + +import json +import os +import pathlib +import re +import shutil + +from bs4 import BeautifulSoup # type: ignore +from packaging import version + +CHECKBOX_REGEX = r"^(?P0|[1-9]\d*) (?P.*)" + + +def custom_copy_assets(assets_dir_path: str, out_file_path: str): + """custom function added for pytest_abra + + copies every asset to asset folder. Exclude style.css as this is already handled by pytest_html_merger""" + + assets_source_dir = pathlib.Path(assets_dir_path) + assets_source_files = [p for p in assets_source_dir.glob("*") if p.is_file() and p.name != "style.css"] + out_dir_path = pathlib.Path(out_file_path).parent + assets_target_dir = out_dir_path / "assets" + assets_target_dir.mkdir(exist_ok=True) + for asset in assets_source_files: + shutil.copy(asset, assets_target_dir / asset.name) + + +def merge_html_reports(in_dir_path: str, out_file_path: str, report_title: str): + paths = get_html_files(in_dir_path, out_file_path) + if not paths: + raise RuntimeError(f"Unable to find html files in {in_dir_path}") + + assets_dir_path = get_assets_path(in_dir_path) + + custom_copy_assets(assets_dir_path, out_file_path) + + first_file = BeautifulSoup("".join(open(paths[0])), features="html.parser") + paths.pop(0) + + try: + first_file.find("link").decompose() + except: + pass + + if assets_dir_path is None: + print( + f"Will assume css is embedded in the reports. If this is not the case, " + f"Please make sure that you have 'assets' directory inside {in_dir_path} " + f"which contains css files generated by pytest-html." + ) + else: + with open(os.path.join(assets_dir_path, "style.css"), "r") as f: + content = f.read() + + head = first_file.head + head.append(first_file.new_tag("style", type="text/css")) + head.style.append(content) + + h = first_file.find("h1") + h.string = report_title or os.path.basename(out_file_path) + + ps = first_file.find_all("p") + pytest_version = ps[0].text.split(" ")[-1] + ps.pop(0) + + cb_types = { + "passed": [0, ""], + "skipped": [0, ""], + "failed": [0, ""], + "error": [0, ""], + "xfailed": [0, ""], + "xpassed": [0, ""], + } + + html_ver = version.parse(pytest_version) + if html_ver >= version.parse("4.0.0rc"): + cb_types["rerun"] = [0, ""] + + for cb_type in cb_types: + cb_val = get_checkbox_value(first_file, cb_type) + cb_types[cb_type][0] = cb_val[0] + cb_types[cb_type][1] = cb_val[1] + + dur, test_count, fp = get_test_count_and_duration(ps, html_ver) + + if html_ver < version.parse("4.0.0rc"): + t = first_file.find("table", {"id": "results-table"}) + else: + f_json_blob = first_file.find("div", {"id": "data-container"}).get("data-jsonblob") + # Convert the JSON string into a dictionary + f_data_dict = json.loads(f_json_blob) + + for path in paths: + cur_file = BeautifulSoup("".join(open(path)), features="html.parser") + + if html_ver < version.parse("4.0.0rc"): + tbody_res = cur_file.find_all("tbody", {"class": "results-table-row"}) + for elm in tbody_res: + t.append(elm) + else: + f_json_blob = cur_file.find("div", {"id": "data-container"}).get("data-jsonblob") + # Convert the JSON string into a dictionary + c_data_dict = json.loads(f_json_blob) + + f_data_dict["tests"].update(c_data_dict["tests"]) + + p_res = cur_file.find_all("p") + _dur, _test_count, _ = get_test_count_and_duration(p_res, html_ver) + dur += _dur + test_count += _test_count + + for cb_type in cb_types: + tmp = get_checkbox_value(cur_file, cb_type) + cb_types[cb_type][0] += tmp[0] + + fp.string = f"{test_count} tests ran in {dur} seconds" + + if html_ver >= version.parse("4.0.0rc"): + first_file.find("div", {"id": "data-container"})["data-jsonblob"] = json.dumps(f_data_dict) + + for cb_type in cb_types: + set_checkbox_value(first_file, cb_type, cb_types[cb_type]) + + with open(out_file_path, "w") as f: + f.write(str(first_file)) + + +def get_test_count_and_duration(ps, html_ver): + test_count = 0 + dur = 0 + fp = None + + for p in ps: + if html_ver >= version.parse("4.0.0"): + match = re.search(r"test.* took ", p.text) + if match: + tmp = p.text.split(" ") + test_count = int(tmp[0]) + + if "ms." in tmp: + dur = int(tmp[3]) / 1000 + else: + hours, minutes, seconds = map(int, tmp[3][:-1].split(":")) + dur = hours * 3600 + minutes * 60 + seconds + + fp = p + + break + + if html_ver < version.parse("4.0.0"): + if " tests ran" in p.text: + tmp = p.text.split(" ") + test_count = int(tmp[0]) + dur = float(tmp[4]) + fp = p + + break + + return dur, test_count, fp + + +def set_checkbox_value(root_soap, cb_type, val): + elem = root_soap.find("span", {"class": cb_type}) + match = re.search(CHECKBOX_REGEX, elem.text) + if match is None: + raise RuntimeError(f"{cb_type} not found") + + elem.string = f"{val[0]} {val[1]}" + + elem = root_soap.find("input", {"data-test-result": cb_type}) + if val[0] != 0: + del elem["disabled"] + del elem["hidden"] + + +def get_checkbox_value(root_soap, cb_type): + elem = root_soap.find("span", {"class": cb_type}) + match = re.search(CHECKBOX_REGEX, elem.text) + if match is None: + raise RuntimeError(f"{cb_type} not found") + + gdict = match.groupdict() + + return int(gdict["num"]), gdict["txt1"] + + +def get_html_files(path, output_file_path): + onlyfiles = [] + output_file_path = os.path.abspath(output_file_path) + + for p in pathlib.Path(path).rglob("*.html"): + res = str(p.absolute()) + if output_file_path in res: + continue + + tmp = BeautifulSoup("".join(open(res)), features="html.parser") + p = tmp.find("p") + if p and "Report generated on " in p.text: + onlyfiles.append(res) + + return sorted(onlyfiles, reverse=True) + + +def get_assets_path(path): + res = None + for p in pathlib.Path(path).rglob("assets"): + return str(p.absolute()) + + return res diff --git a/pytest_abra/runner.py b/pytest_abra/runner.py new file mode 100644 index 0000000..6676560 --- /dev/null +++ b/pytest_abra/runner.py @@ -0,0 +1,217 @@ +import os +from dataclasses import dataclass +from pathlib import Path +from typing import TYPE_CHECKING, Callable, NamedTuple + +import pytest +from loguru import logger + +from pytest_abra.shared_types import STATUS, TestResult + +if TYPE_CHECKING: + from pytest_abra import Coordinator, DirManager, EnvFile + + +class ConditionArgs(NamedTuple): + env_config: dict[str, str] + runner_index: int + env_files: list["EnvFile"] + + +@dataclass +class Test: + test_file: str + condition: Callable[[ConditionArgs], bool] | None = None + prevent_skip: bool = False + + +class Runner: + env_type: str = "" + setups: list[Test] = [] + tests: list[Test] = [] + cleanups: list[Test] = [] + dependencies: list[str] = [] + _tests_path: Path = Path() + + def __init__(self, coordinator: "Coordinator", runner_index: int): + self.coordinator = coordinator + self.runner_index = runner_index + + self.DIR = coordinator.DIR + self.ENV = coordinator.ENV + self.RUNNER_DICT = coordinator.RUNNER_DICT + + logger.info(f"creating instance of {self.__class__.__name__}") + + def run_setups(self) -> list[TestResult]: + """runs the setup scripts if available""" + return self._execute_tests_list(self.setups) + + def run_tests(self) -> list[TestResult]: + """runs the test scripts if available""" + return self._execute_tests_list(self.tests) + + def run_cleanups(self) -> list[TestResult]: + """runs the cleanup scripts if available""" + return self._execute_tests_list(self.cleanups) + + def _execute_tests_list(self, test_list: list[Test]) -> list[TestResult]: + """Runs all tests given in the list. If condition is defined, it is also checked.""" + # check if required dependencies have passed + if not self._dependencies_passed(): + logger.warning(f"skipping run_tests() of {self.env_type} (one or more dependencies have not passed)") + return [TestResult("skipped_dep", test.test_file) for test in test_list] + + return [self._run_test_with_checks(test) for test in test_list] + + def _run_test_with_checks(self, test: Test) -> TestResult: + identifier_string = self.combine_names(self.env_type, test.test_file) + + test_files = list(self._tests_path.rglob(test.test_file)) + assert len(test_files) == 1, f"{test.test_file} should exist exactly once, but found {len(test_files)} times" + full_test_path = test_files[0] + + # check if test aleady passed + if self._is_test_passed(self.DIR, identifier_string): + if test.prevent_skip: + logger.info(f"continuing {identifier_string} (passed before but prevent_skip=True)") + else: + logger.info(f"skipping {identifier_string} (test has passed)") + return TestResult("skipped_pas", test.test_file) + + if test.condition: + condition_result = self._call_condition_function(test.condition) + if not condition_result: + # test condition is defined but not met + logger.info(f"skipping {identifier_string} (test condition is not met)") + self._create_status_file(self.DIR, status="skipped_con", identifier_string=identifier_string) + return TestResult("skipped_con", test.test_file) + + # test condition is undefined or not met + logger.info(f"running {identifier_string}") + exit_code = self._call_pytest(full_test_path) + status = self.exit_code_to_str(exit_code) + self._create_status_file(self.DIR, status=status, identifier_string=identifier_string) + return TestResult(status, test.test_file) + + def _call_condition_function(self, condition_function: Callable[[ConditionArgs], bool]): + """run the test condition function with multiple arguments""" + # more arguments can be added later without changing the function signature + conditon_args = ConditionArgs( + env_files=self.ENV.env_files, + runner_index=self.runner_index, + env_config=self.ENV.env_files[self.runner_index].env_config, + ) + return condition_function(conditon_args) + + @classmethod + def _create_status_file( + cls, + DIR: "DirManager", + status: STATUS, + identifier_string: str, + ): + """create result file to indicated passed/failed/skipped test""" + + # remove matching files + for status_file in cls._get_status_files(DIR, identifier_string): + status_file.unlink() + + full_name = cls.combine_names(status, identifier_string) + file_path = DIR.STATUS / full_name + with open(file_path, "w") as _: + pass # create empty file + + @staticmethod + def _get_status_files(DIR: "DirManager", identifier_string: str) -> list[Path]: + return [f for f in DIR.STATUS.glob("*") if identifier_string in f.name] + + @classmethod + def _is_test_passed(cls, DIR: "DirManager", identifier_string: str) -> bool: + """returns True if the selected test matching identifier_string already passed""" + + matching_files = cls._get_status_files(DIR, identifier_string) + if len(matching_files) == 1: + status_file = matching_files[0] + if "passed" in status_file.name: + return True + elif len(matching_files) > 1: + logger.warning("more than one matching status file found") + return False + + def _call_pytest(self, full_test_path: Path) -> int: + """runs pytest programmatically with a specific file + + all tests in the file [full_test_path] will be run along with command line arguments""" + + command_arguments = [] + + # command_arguments.append("--traceconfig") + + command_arguments.append("-v") + + command_arguments.append(str(full_test_path)) + + command_arguments.append("--runner_index") + command_arguments.append(str(self.runner_index)) + + # set root dir for tests output (used in DirManager). this is our custom argument + command_arguments.append("--output_dir") + command_arguments.append(str(self.DIR.OUTPUT_DIR)) + + command_arguments.append("--session_id") + command_arguments.append(self.DIR.session_id) + + command_arguments.append("--timeout") + command_arguments.append(str(self.coordinator.TIMEOUT)) + + # artifacts dir from pytest + # warning: https://github.com/microsoft/playwright-pytest/issues/111 + # --output only works with the given context and page fixture + # folder needs to be unique! traces will not appear, if every pytest run has same output dir + command_arguments.append("--output") + command_arguments.append(str(self.DIR.RESULTS / "traces" / full_test_path.stem)) + + # tracing + command_arguments.append("--tracing") # "on", "off", "retain-on-failure" + command_arguments.append("retain-on-failure") + + # Disable capturing. With -s set, prints will go to console as if pytest is not there. + if os.environ.get("PWDEBUG") == "1": + command_arguments.append("-s") + command_arguments.append("-s") + + # headed + # command_arguments.append("--headed") + + # html report. Will be combined into one file later. + command_arguments.append(f"--html={self.DIR.RESULTS / 'html' / full_test_path.with_suffix('.html').name}") + + return pytest.main(command_arguments) + + def _dependencies_passed(self): + """returns true if all setups of each dependency have passed""" + + # todo: what about conditional setups? + + passed_tests = [r.name for r in self.DIR.STATUS.glob("*") if "passed" in r.name] + results = [] + for dependency in self.dependencies: + dependency_runner = self.coordinator.RUNNER_DICT[dependency] + for setup_name in dependency_runner.setups: + dependencie_identifier = self.combine_names(dependency_runner.env_type, setup_name.test_file) + results.append(any(dependencie_identifier in f for f in passed_tests)) + return all(results) + + @staticmethod + def exit_code_to_str(result_int: int) -> STATUS: + """converts the pytest exit code (int) into a meaningful string""" + match result_int: + case 0: + return "passed" + case _: + return "failed" + + @staticmethod + def combine_names(*names: str) -> str: + return "-".join(names) diff --git a/pytest_abra/shared_types.py b/pytest_abra/shared_types.py new file mode 100644 index 0000000..ff0e147 --- /dev/null +++ b/pytest_abra/shared_types.py @@ -0,0 +1,16 @@ +from typing import Literal, NamedTuple + +""" +passed: test passed +failed: test failed +skipped_con: test skipped because condition was not met +skipped_dep: test skipped because dependencies did not finish +skipped_pas: test skipped because it passed before +""" + +STATUS = Literal["passed", "failed", "skipped_con", "skipped_dep", "skipped_pas"] + + +class TestResult(NamedTuple): + status: STATUS + test_name: str diff --git a/pytest_abra/utils.py b/pytest_abra/utils.py new file mode 100644 index 0000000..0dfdb5d --- /dev/null +++ b/pytest_abra/utils.py @@ -0,0 +1,86 @@ +import json +import os +import random +import string +from dataclasses import dataclass +from datetime import datetime +from pathlib import Path +from typing import Optional +from urllib.parse import urlunparse + +from loguru import logger + +from pytest_abra.dir_manager import DirManager + + +@dataclass +class BaseUrl: + """utility class to create a url string with urllib""" + + netloc: str + scheme: str = "https" + path: str = "" + params: str = "" + query: str = "" + fragment: str = "" + + def get(self, path: str = ""): + return urlunparse((self.scheme, self.netloc, path, self.params, self.query, self.fragment)) + + +def get_datetime_string() -> str: + current_datetime = datetime.now() + return current_datetime.strftime("%Y-%m-%d-%H-%M-%S") + + +def rmtree(root_dir: Path) -> None: + """removes a folder with content recursively""" + if not root_dir.is_dir(): + return + for child in root_dir.iterdir(): + if child.is_dir(): + rmtree(child) + else: + child.unlink() + + root_dir.rmdir() + + +def generate_random_string(length: int, punctuation=False) -> str: + """returns a random string of the given length""" + characters = string.ascii_letters + string.digits + if punctuation: + characters += string.punctuation + random_string = "".join(random.choice(characters) for _ in range(length)) + return random_string + + +def load_json_to_environ(cred_file: Path) -> None: + """Load the contents of a json file directly into os.environ. Variable names are inherited""" + + if not cred_file.is_file(): + logger.warning(f"{cred_file} could not be found, no credentials loaded") + return + + with open(cred_file, "r") as f: + CREDENTIALS = json.load(f) + + for key, value in CREDENTIALS.items(): + os.environ[key] = value + + +def get_session_id(args_output_dir: Path, args_resume: bool, args_session_id: Optional[str]) -> str: + """converts the cli arguments to the correct session_id""" + session_id = args_session_id + if not session_id: + session_id = "test-" + get_datetime_string() + if args_resume: + latest_session_id = DirManager.get_latest_session_id(args_output_dir) + if latest_session_id: + session_id = latest_session_id + return session_id + + +def files_are_same(file1: Path, file2: Path) -> bool: + with open(file1, "r") as f1, open(file2, "r") as f2: + return f1.read() == f2.read() diff --git a/recipes/authentik/tests_authentik/__init__.py b/recipes/authentik/tests_authentik/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/recipes/authentik/tests_authentik/cleanup_authentik.py b/recipes/authentik/tests_authentik/cleanup_authentik.py new file mode 100644 index 0000000..3f65dfd --- /dev/null +++ b/recipes/authentik/tests_authentik/cleanup_authentik.py @@ -0,0 +1,40 @@ +import json +import os +import re + +from playwright.sync_api import BrowserContext + +from pytest_abra import BaseUrl, DirManager + +ADMIN_USER = os.environ["ADMIN_USER"] +ADMIN_PASS = os.environ["ADMIN_PASS"] +TEST_USER = os.environ["TEST_USER"] +TEST_PASS = os.environ["TEST_PASS"] + + +def remove_user(admin_context: BrowserContext, URL: BaseUrl): + """removes TEST_USER account from authentik""" + page = admin_context.new_page() + page.goto(URL.get()) + page.get_by_role("link", name="Admin Interface").click() + nav = page.locator("ak-sidebar-item", has_text=re.compile(r"Directory|Verzeichnis")) + nav.click() + nav.get_by_role("link", name=re.compile(r"Users|Benutzer")).click() + + name_pattern = re.compile(TEST_USER) + page.get_by_role("row", name=name_pattern).get_by_label("").check() + page.get_by_role("button", name=re.compile(r"Löschen|Delete")).click() + page.get_by_role("dialog").get_by_role("button", name=re.compile(r"Löschen|Delete")).click() + + +def test_cleanup_delete_user( + context: BrowserContext, env_config: dict[str, str], DIR: DirManager, URL: BaseUrl, check_if_user_exists +): + # load admin cookies to context + state_file = DIR.STATES / "authentik_admin_state.json" + storage_state = json.loads(state_file.read_bytes()) + context.add_cookies(storage_state["cookies"]) + + if check_if_user_exists(context, env_config, URL): + remove_user(context, URL) + assert not check_if_user_exists(context, env_config, URL) diff --git a/recipes/authentik/tests_authentik/conftest.py b/recipes/authentik/tests_authentik/conftest.py new file mode 100644 index 0000000..df919d9 --- /dev/null +++ b/recipes/authentik/tests_authentik/conftest.py @@ -0,0 +1,46 @@ +import os +import re +from typing import Callable, Generator + +import pytest +from playwright.sync_api import APIRequestContext, BrowserContext, Playwright, TimeoutError + +from pytest_abra import BaseUrl, DirManager + + +@pytest.fixture(scope="session") +def api_request_context( + playwright: Playwright, + DIR: DirManager, +) -> Generator[APIRequestContext, None, None]: + state_file = DIR.STATES / "authentik_admin_state.json" + request_context = playwright.request.new_context(storage_state=state_file) + yield request_context + request_context.dispose() + + +@pytest.fixture +def check_if_user_exists() -> Callable[[BrowserContext, dict[str, str], BaseUrl], bool]: + """This is actually a normal function supplied by a fixture. We do this, because imports from + tests_authentik are difficult as it is not part of the python environment. We expect + from X import function + to fail here. However, pytest handles the loading of fixtures from conftest.py automatically, + hence we use that to load functions too.""" + + def inner_check_if_user_exists(admin_context: BrowserContext, env_config: dict[str, str], URL: BaseUrl) -> bool: + # go to admin page + page = admin_context.new_page() + page.goto(URL.get()) + page.get_by_role("link", name="Admin Interface").click() + nav = page.locator("ak-sidebar-item", has_text=re.compile(r"Directory|Verzeichnis")) + nav.click() + nav.get_by_role("link", name=re.compile(r"Users|Benutzer")).click() + + user = page.get_by_text(os.environ["TEST_USER"]) + try: + user.wait_for(state="visible", timeout=5_000) + return True + except TimeoutError: + return False + + return inner_check_if_user_exists diff --git a/recipes/authentik/tests_authentik/fixtures_authentik.py b/recipes/authentik/tests_authentik/fixtures_authentik.py new file mode 100644 index 0000000..fdba975 --- /dev/null +++ b/recipes/authentik/tests_authentik/fixtures_authentik.py @@ -0,0 +1,42 @@ +import json + +import pytest +from playwright.sync_api import BrowserContext, Page + +from pytest_abra import BaseUrl, DirManager + + +@pytest.fixture +def authentik_admin_context(context: BrowserContext, DIR: DirManager) -> BrowserContext: + state_file = DIR.STATES / "authentik_admin_state.json" + assert state_file.is_file(), "authentik setup did not finish successfully" + storage_state = json.loads(state_file.read_bytes()) + context.add_cookies(storage_state["cookies"]) + return context + + +@pytest.fixture +def authentik_admin_page(authentik_admin_context: BrowserContext, DIR: DirManager) -> Page: + page = authentik_admin_context.new_page() + config = DIR.get_config("authentik") + base_url = BaseUrl(config["DOMAIN"]) + page.goto(base_url.get()) + return page + + +@pytest.fixture +def authentik_user_context(context: BrowserContext, DIR: DirManager) -> BrowserContext: + state_file = DIR.STATES / "authentik_user_state.json" + assert state_file.is_file(), "authentik setup did not finish successfully" + storage_state = json.loads(state_file.read_bytes()) + context.add_cookies(storage_state["cookies"]) + return context + + +@pytest.fixture +def authentik_user_page(authentik_user_context: BrowserContext, DIR: DirManager) -> Page: + page = authentik_user_context.new_page() + config = DIR.get_config("authentik") + base_url = BaseUrl(config["DOMAIN"]) + page.goto(base_url.get()) + return page diff --git a/recipes/authentik/tests_authentik/runner_authentik.py b/recipes/authentik/tests_authentik/runner_authentik.py new file mode 100644 index 0000000..4e9b81a --- /dev/null +++ b/recipes/authentik/tests_authentik/runner_authentik.py @@ -0,0 +1,8 @@ +from pytest_abra import Runner, Test + + +class RunnerAuthentik(Runner): + env_type = "authentik" + setups = [Test(test_file="setup_authentik.py")] + tests = [Test(test_file="test_authentik_blueprint_api.py")] + cleanups = [Test(test_file="cleanup_authentik.py")] diff --git a/recipes/authentik/tests_authentik/setup_authentik.py b/recipes/authentik/tests_authentik/setup_authentik.py new file mode 100644 index 0000000..9dd6aee --- /dev/null +++ b/recipes/authentik/tests_authentik/setup_authentik.py @@ -0,0 +1,105 @@ +import json +import os +import re + +from playwright.sync_api import BrowserContext, expect + +from pytest_abra import BaseUrl, DirManager + +ADMIN_USER = os.environ["ADMIN_USER"] +ADMIN_PASS = os.environ["ADMIN_PASS"] +TEST_USER = os.environ["TEST_USER"] +TEST_PASS = os.environ["TEST_PASS"] + + +def test_setup_admin_state(context: BrowserContext, env_config: dict[str, str], DIR: DirManager, URL: BaseUrl): + # go to page + page = context.new_page() + page.goto(URL.get()) + + # check welcome message + welcome_message = env_config.get("welcome_message") + if welcome_message: + expect(page.get_by_text(welcome_message)).to_be_visible() + + # login + page.locator("input[name='uidField']").fill(ADMIN_USER) + page.locator("ak-stage-identification input[name='password']").fill(ADMIN_PASS) + page.get_by_role("button", name="Log In").click() + expect(page.locator("ak-library")).to_be_visible() + + # save state + context.storage_state(path=DIR.STATES / "authentik_admin_state.json") + + +def create_invite_link(admin_context: BrowserContext, env_config: dict[str, str], URL: BaseUrl): + # go to admin page + page = admin_context.new_page() + page.goto(URL.get()) + page.get_by_role("link", name="Admin Interface").click() + + nav = page.locator("ak-sidebar-item", has_text=re.compile(r"Directory|Verzeichnis")) + nav.click() + nav.get_by_role("link", name=re.compile(r"Invitations|Einladungen")).click() + + # todo: only works if no links have been created yet (empty list) + page.get_by_role("cell", name=re.compile(r"Keine Objekte|objects")).get_by_role( + "button" + ).click() # todo: confirm "objects" for en lang + + page.locator('input[name="name"]').click() + linkname = "test_link_123" + page.locator('input[name="name"]').fill(linkname) + placeholder_pattern = re.compile(r"Wählen Sie ein|Select an") + page.get_by_placeholder(placeholder_pattern).click() + page.get_by_role("option", name=re.compile(r"invitation-enrollment-flow")).click() + + # force, because else we get "intercepts pointer events" + page.locator("footer").locator("ak-spinner-button").first.click(force=True) + + linklocator = page.get_by_role("rowgroup").filter(has=page.get_by_text(linkname)) + linklocator.locator(".fa-angle-down").click() + # page.get_by_text(linkname).click() + invitelink = linklocator.get_by_role("textbox").get_attribute(name="value") + return invitelink + + +def create_user(user_context: BrowserContext, invitelink): + # warning: only works on german site + page = user_context.new_page() + page.goto(invitelink) + page.get_by_placeholder("Benutzername").click() + page.get_by_placeholder("Benutzername").fill(TEST_USER) + page.locator('input[name="name"]').click() + page.locator('input[name="name"]').fill("name") + page.locator('input[name="email"]').click() + email = os.environ["IMAP_EMAIL"] if "IMAP_EMAIL" in os.environ else "test@domain.com" + page.locator('input[name="email"]').fill(email) + page.get_by_placeholder("Passwort", exact=True).click() + page.get_by_placeholder("Passwort", exact=True).fill(TEST_PASS) + page.get_by_placeholder("Passwort (wiederholen)").click() + page.get_by_placeholder("Passwort (wiederholen)").fill(TEST_PASS) + page.get_by_role("button", name="Weiter").click() + expect(page.locator("ak-library")).to_be_visible() + + +def test_setup_user_state( + context: BrowserContext, env_config: dict[str, str], DIR: DirManager, URL: BaseUrl, check_if_user_exists +): + # load admin cookies to context + state_file = DIR.STATES / "authentik_admin_state.json" + storage_state = json.loads(state_file.read_bytes()) + context.add_cookies(storage_state["cookies"]) + + if check_if_user_exists(context, env_config, URL): + # just login with user + pass + context.clear_cookies() + else: + # get invite_link + invite_link = create_invite_link(context, env_config, URL) + # create user + context.clear_cookies() + create_user(context, invite_link) + + context.storage_state(path=DIR.STATES / "authentik_user_state.json") diff --git a/recipes/authentik/tests_authentik/test_authentik_blueprint_api.py b/recipes/authentik/tests_authentik/test_authentik_blueprint_api.py new file mode 100644 index 0000000..cf5914b --- /dev/null +++ b/recipes/authentik/tests_authentik/test_authentik_blueprint_api.py @@ -0,0 +1,39 @@ +# api testing +# https://playwright.dev/python/docs/api-testing + +import pytest_html # type: ignore +from icecream import ic # type: ignore +from playwright.sync_api import APIRequestContext +from tabulate import tabulate # type: ignore + +from pytest_abra import BaseUrl + + +def test_authentik_blueprint_status( + api_request_context: APIRequestContext, + URL: BaseUrl, + extras, +) -> None: + blueprints = api_request_context.get(URL.get("api/v3/managed/blueprints")) + assert blueprints.ok + blueprints_data = blueprints.json() + # ic(blueprints_data) + + # fake failed blueprint + # blueprints_data["results"][10]["status"] = "failed" + + table_data_all = [] + table_data_failed = [] + for item in blueprints_data["results"]: + row = [item["name"], item["enabled"], item["status"]] + table_data_all.append(row) + if item["status"] != "successful": + table_data_failed.append(row) + + table = tabulate(table_data_all, headers=["name", "enabled", "status"]) + extras.append(pytest_html.extras.text(table, name="Authentik Blueprint Status")) + + # with pytest -v (verbose) the failed blueprints will be visible in the traceback + assert ( + table_data_failed == [] + ), "One or more blueprints were not successful. See Authentik Blueprint Status in html report" diff --git a/recipes/nextcloud/tests_nextcloud/__init__.py b/recipes/nextcloud/tests_nextcloud/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/recipes/nextcloud/tests_nextcloud/cleanup_nextcloud.py b/recipes/nextcloud/tests_nextcloud/cleanup_nextcloud.py new file mode 100644 index 0000000..22e4f4f --- /dev/null +++ b/recipes/nextcloud/tests_nextcloud/cleanup_nextcloud.py @@ -0,0 +1,17 @@ +import os + +from playwright.sync_api import Page + + +def delete_nextcloud_user(authentik_admin_page: Page): + """Delete Nextcloud User""" + with authentik_admin_page.expect_popup() as nextcloud_info: + authentik_admin_page.get_by_role("link", name="Nextcloud").click() + nextcloud = nextcloud_info.value + nextcloud.get_by_role("link", name="Open settings menu").click() + nextcloud.get_by_role("link", name="Users").click() + nextcloud.locator("#app-content div").filter(has_text=os.environ["NEXTCLOUD_USER"]).get_by_role( + "button", name="Toggle user actions menu" + ).click() + nextcloud.get_by_role("button", name="Delete user").click() + nextcloud.get_by_role("button", name=f"Delete authentik-{os.environ["NEXTCLOUD_USER"]}'s account").click() diff --git a/recipes/nextcloud/tests_nextcloud/conftest.py b/recipes/nextcloud/tests_nextcloud/conftest.py new file mode 100644 index 0000000..4151c7a --- /dev/null +++ b/recipes/nextcloud/tests_nextcloud/conftest.py @@ -0,0 +1,32 @@ +import json +import os + +import pytest +from playwright.sync_api import BrowserContext, Page + +from pytest_abra import BaseUrl, DirManager + +pytest_plugins = "authentik.tests_authentik.fixtures_authentik" + +NEXTCLOUD_DEMO_USER = { + "NEXTCLOUD_USER": "next_demo_user", + "NEXTCLOUD_PASS": "P@ss.123", +} + +for key, value in NEXTCLOUD_DEMO_USER.items(): + os.environ[key] = value + + +@pytest.fixture +def nextcloud_admin_context(context: BrowserContext, DIR: DirManager) -> BrowserContext: + state_file = DIR.STATES / "nextcloud_admin_state.json" + storage_state = json.loads(state_file.read_bytes()) + context.add_cookies(storage_state["cookies"]) + return context + + +@pytest.fixture +def nextcloud_admin_page(nextcloud_admin_context: BrowserContext, DIR: DirManager, URL: BaseUrl) -> Page: + page = nextcloud_admin_context.new_page() + page.goto(URL.get("/apps/files")) + return page diff --git a/recipes/nextcloud/tests_nextcloud/runner_nextcloud.py b/recipes/nextcloud/tests_nextcloud/runner_nextcloud.py new file mode 100644 index 0000000..fe56587 --- /dev/null +++ b/recipes/nextcloud/tests_nextcloud/runner_nextcloud.py @@ -0,0 +1,12 @@ +from pytest_abra import Runner, Test + + +class RunnerNextcloud(Runner): + env_type = "nextcloud" + dependencies = ["authentik"] + setups = [Test(test_file="setup_nextcloud.py", prevent_skip=False)] + tests = [ + Test(test_file="tests_nextcloud.py"), + # Test(condition=condition_always_false, test_file="tests_nextcloud_onlyoffice.py"), + ] + # cleanups = [Test(test_file="cleanup_nextcloud.py")] diff --git a/recipes/nextcloud/tests_nextcloud/setup_nextcloud.py b/recipes/nextcloud/tests_nextcloud/setup_nextcloud.py new file mode 100644 index 0000000..378c1db --- /dev/null +++ b/recipes/nextcloud/tests_nextcloud/setup_nextcloud.py @@ -0,0 +1,25 @@ +import re + +from playwright.sync_api import Page, expect + +from pytest_abra import BaseUrl, DirManager + +# url dashboard +# https://files.test.dev.local-it.cloud/apps/dashboard/ +# url files +# https://files.test.dev.local-it.cloud/apps/files/ + + +def test_setup_nextcloud_admin_session(authentik_admin_page: Page, DIR: DirManager, URL: BaseUrl): + """visit nextcloud from authentik with admin_session to create wordpress_admin_session""" + with authentik_admin_page.expect_popup() as event_context: + authentik_admin_page.get_by_role("link", name="Nextcloud").click() + page_nextcloud = event_context.value + context = page_nextcloud.context + + # expect quota stats on files page to confirm successful login + page_nextcloud.goto(URL.get("/apps/files")) + quota_pattern = re.compile(r"\d*,\d .* (\d*,\d).") + expect(page_nextcloud.get_by_text(quota_pattern)).to_be_visible() + + context.storage_state(path=DIR.STATES / "nextcloud_admin_state.json") diff --git a/recipes/nextcloud/tests_nextcloud/tests_nextcloud.py b/recipes/nextcloud/tests_nextcloud/tests_nextcloud.py new file mode 100644 index 0000000..496b3ae --- /dev/null +++ b/recipes/nextcloud/tests_nextcloud/tests_nextcloud.py @@ -0,0 +1,32 @@ +import re + +import pytest +from playwright.sync_api import Page, expect + + +def test_nextcloud_quota(nextcloud_admin_page: Page, env_config: dict[str, str]): + """Tests if the quota set in .env file matches the actual quota shown on the page within 10%""" + if env_config.get("DEFAULT_QUOTA"): + # get quota from website + quota_string = nextcloud_admin_page.get_by_text( + re.compile(r"\d*,\d .* \d*,\d") + ).inner_text() # "37,7 MB von 104,9 MB verwendet" + out = re.search(r"\d*,\d .* (\d*,\d).", quota_string) + out_number = out[1] # 104,9 + out_number = out_number.replace(",", ".") + quota_website = float(out_number) + + # get quota from env + quota_config_string = env_config["DEFAULT_QUOTA"] # "100 MB" + assert "MB" in quota_config_string + quota_config = float(quota_config_string.strip("MB")) + + assert quota_website == pytest.approx(quota_config, rel=0.1) # within 10% + else: + pytest.skip("DEFAULT_QUOTA not defined in env file") + + +@pytest.mark.skip +def test_nextcloud_apps(nextcloud_admin_page: Page, env_config: dict[str, str]): + for app in env_config["nc_apps"]: + expect(nextcloud_admin_page.get_by_role("link", name=app)).to_be_visible() diff --git a/recipes/nextcloud/tests_nextcloud/tests_nextcloud_onlyoffice.py b/recipes/nextcloud/tests_nextcloud/tests_nextcloud_onlyoffice.py new file mode 100644 index 0000000..0b78e5a --- /dev/null +++ b/recipes/nextcloud/tests_nextcloud/tests_nextcloud_onlyoffice.py @@ -0,0 +1,19 @@ +def test_onlyoffice(nc_session): + """Test Onlyoffice in Nextcloud""" + context, page = nc_session + # if page.query_selector('.close-icon'): + # page.get_by_role("button", name="Close modal").click() + page.get_by_role("link", name="New file/folder menu").click() + page.get_by_role("link", name="New document").click() + page.locator("#view9-input-file").fill("test.docx") + page.get_by_role("button", name="Submit").click() + outer_frame = page.frame_locator("#onlyofficeFrame") + check_for(outer_frame.locator("body")) + inner_frame = outer_frame.frame_locator("#app > iframe") + check_for(inner_frame.locator("body")) + onlyoffice = page.frame("frameEditor") + check_for(onlyoffice.locator('//*[@id="area_id"]')) + onlyoffice.locator("#btn-goback").click() + page.get_by_role("link", name="Not favorited test .docx Share Actions").get_by_role("link", name="Actions").click() + page.get_by_role("link", name="Delete file").click() + context.tracing.stop(path=f"{RECORDS}/onlyoffice.zip") diff --git a/recipes/wordpress/tests_wordpress/__init__.py b/recipes/wordpress/tests_wordpress/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/recipes/wordpress/tests_wordpress/conftest.py b/recipes/wordpress/tests_wordpress/conftest.py new file mode 100644 index 0000000..69fa4cf --- /dev/null +++ b/recipes/wordpress/tests_wordpress/conftest.py @@ -0,0 +1,23 @@ +import json + +import pytest +from playwright.sync_api import BrowserContext, Page + +from pytest_abra import BaseUrl, DirManager + +pytest_plugins = "authentik.tests_authentik.fixtures_authentik" + + +@pytest.fixture +def wordpress_admin_context(context: BrowserContext, DIR: DirManager) -> BrowserContext: + state_file = DIR.STATES / "wordpress_admin_state.json" + storage_state = json.loads(state_file.read_bytes()) + context.add_cookies(storage_state["cookies"]) + return context + + +@pytest.fixture +def wordpress_admin_page(wordpress_admin_context: BrowserContext, URL: BaseUrl) -> Page: + page = wordpress_admin_context.new_page() + page.goto(URL.get()) + return page diff --git a/recipes/wordpress/tests_wordpress/runner_wordpress.py b/recipes/wordpress/tests_wordpress/runner_wordpress.py new file mode 100644 index 0000000..aef1273 --- /dev/null +++ b/recipes/wordpress/tests_wordpress/runner_wordpress.py @@ -0,0 +1,22 @@ +from pytest_abra import ConditionArgs, Runner, Test + + +def env_config_has_locale(args: ConditionArgs) -> bool: + env_config = args.env_config + if "LOCALE" in env_config: + return True + else: + return False + + +class RunnerWordpress(Runner): + env_type = "wordpress" + dependencies = ["authentik"] + setups = [ + Test(test_file="setup_wordpress.py"), + Test(test_file="setup_wordpress_trigger_email.py"), + ] + tests = [ + # Test(test_file="test_wordpress_receive_email.py", prevent_skip=True), + Test(condition=env_config_has_locale, test_file="test_wordpress_localization.py"), + ] diff --git a/recipes/wordpress/tests_wordpress/setup_wordpress.py b/recipes/wordpress/tests_wordpress/setup_wordpress.py new file mode 100644 index 0000000..3438db7 --- /dev/null +++ b/recipes/wordpress/tests_wordpress/setup_wordpress.py @@ -0,0 +1,27 @@ +import pytest +from playwright.sync_api import BrowserContext, Page, expect + +from pytest_abra import BaseUrl, DirManager + + +def test_visit_from_domain(authentik_admin_context: BrowserContext, URL: BaseUrl): + """visit wordpress directly with admin_session, expect not to be logged in""" + page = authentik_admin_context.new_page() + page.goto(URL.get()) + with pytest.raises(AssertionError): + # look for admin bar + expect(page.locator("#wpadminbar")).to_be_visible(timeout=3_000) + + +def test_setup_wordpress_admin_session(authentik_admin_page: Page, DIR: DirManager): + """visit wordpress from authentik with admin_session to create wordpress_admin_session""" + with authentik_admin_page.expect_popup() as event_context: + authentik_admin_page.get_by_role("link", name="Wordpress").click() + page_wordpress = event_context.value + # look for content wrapper + expect(page_wordpress.locator("#wpcontent")).to_be_visible() + # look for admin bar + expect(page_wordpress.locator("#wpadminbar")).to_be_visible() + # save session + context = page_wordpress.context + context.storage_state(path=DIR.STATES / "wordpress_admin_state.json") diff --git a/recipes/wordpress/tests_wordpress/setup_wordpress_trigger_email.py b/recipes/wordpress/tests_wordpress/setup_wordpress_trigger_email.py new file mode 100644 index 0000000..9ae1e28 --- /dev/null +++ b/recipes/wordpress/tests_wordpress/setup_wordpress_trigger_email.py @@ -0,0 +1,19 @@ +import os + +from playwright.sync_api import Page, expect + +from pytest_abra import BaseUrl + + +def test_setup_trigger_email(wordpress_admin_page: Page, URL: BaseUrl): + """change profile email to EMAIL to trigger email""" + page = wordpress_admin_page + page.goto(URL.get("wp-admin/profile.php")) + EMAIL = os.environ["IMAP_EMAIL"] + print(EMAIL) + # breakpoint() + page.pause() + page.locator("input[id='email']").fill(EMAIL) + page.locator("input[id='submit']").click() + + expect(page.locator("div.notice").get_by_text(EMAIL)).to_be_visible() diff --git a/recipes/wordpress/tests_wordpress/test_wordpress_localization.py b/recipes/wordpress/tests_wordpress/test_wordpress_localization.py new file mode 100644 index 0000000..2e01d79 --- /dev/null +++ b/recipes/wordpress/tests_wordpress/test_wordpress_localization.py @@ -0,0 +1,14 @@ +# WIP localization + +from playwright.sync_api import BrowserContext, expect + +from pytest_abra import BaseUrl + + +def test_de_welcome_message(context: BrowserContext, env_config: dict[str, str], URL: BaseUrl): + page = context.new_page() + page.goto(URL.get()) + + expect(page.locator(".wp-block-heading")).to_be_visible() + if "de" in env_config.get("locale", ""): + expect(page.get_by_role("heading")).to_have_text("Willkommen bei WordPress!") diff --git a/recipes/wordpress/tests_wordpress/test_wordpress_receive_email.py b/recipes/wordpress/tests_wordpress/test_wordpress_receive_email.py new file mode 100644 index 0000000..f806d13 --- /dev/null +++ b/recipes/wordpress/tests_wordpress/test_wordpress_receive_email.py @@ -0,0 +1,15 @@ +import pytest +from icecream import ic + +from pytest_abra.custom_fixtures import Message + + +@pytest.mark.skip +def test_demo(imap_recent_messages: list[Message]): + for message in imap_recent_messages: + print(dir(message)) + ic(message.subject) + ic(message.body["plain"]) + + exit() + assert False diff --git a/tests/assets/html_merge/assets/recipes_authentik_tests_authentik_test_authentik_blueprint_api.py__test_should_create_bug_report_0_0.txt b/tests/assets/html_merge/assets/recipes_authentik_tests_authentik_test_authentik_blueprint_api.py__test_should_create_bug_report_0_0.txt new file mode 100644 index 0000000..eba0ff3 --- /dev/null +++ b/tests/assets/html_merge/assets/recipes_authentik_tests_authentik_test_authentik_blueprint_api.py__test_should_create_bug_report_0_0.txt @@ -0,0 +1,33 @@ +name enabled status +-------------------------------------------------------- --------- ---------- +Custom Invalidation Flow True successful +System - SCIM Provider - Mappings True successful +System - OAuth2 Provider - Scopes True successful +System - SAML Provider - Mappings True successful +System - LDAP Source - Mappings True successful +Migration - Remove old prompt fields True successful +Default - Events Transport & Rules True successful +Default - Source pre-authentication flow True successful +Default - TOTP MFA setup flow True successful +Default - WebAuthn MFA setup flow True successful +Default - Provider authorization flow (explicit consent) True failed +Default - Source authentication flow True successful +Default - Provider authorization flow (implicit consent) True successful +Default - Static MFA setup flow True successful +matrix True successful +Custom System Tenant True successful +Nextcloud True successful +Wordpress True successful +Custom Authentication Flow True successful +wekan True successful +Default - Invalidation flow True successful +Default - Tenant True successful +Flow Translations True successful +Default - User settings flow False successful +Default - Source enrollment flow False successful +Invitation Enrollment Flow True successful +vikunja True successful +Default - Password change flow False successful +Default - Authentication flow False successful +Recovery with email verification True successful +System - Proxy Provider - Scopes True successful \ No newline at end of file diff --git a/tests/assets/html_merge/assets/style.css b/tests/assets/html_merge/assets/style.css new file mode 100644 index 0000000..561524c --- /dev/null +++ b/tests/assets/html_merge/assets/style.css @@ -0,0 +1,319 @@ +body { + font-family: Helvetica, Arial, sans-serif; + font-size: 12px; + /* do not increase min-width as some may use split screens */ + min-width: 800px; + color: #999; +} + +h1 { + font-size: 24px; + color: black; +} + +h2 { + font-size: 16px; + color: black; +} + +p { + color: black; +} + +a { + color: #999; +} + +table { + border-collapse: collapse; +} + +/****************************** + * SUMMARY INFORMATION + ******************************/ +#environment td { + padding: 5px; + border: 1px solid #e6e6e6; + vertical-align: top; +} +#environment tr:nth-child(odd) { + background-color: #f6f6f6; +} +#environment ul { + margin: 0; + padding: 0 20px; +} + +/****************************** + * TEST RESULT COLORS + ******************************/ +span.passed, +.passed .col-result { + color: green; +} + +span.skipped, +span.xfailed, +span.rerun, +.skipped .col-result, +.xfailed .col-result, +.rerun .col-result { + color: orange; +} + +span.error, +span.failed, +span.xpassed, +.error .col-result, +.failed .col-result, +.xpassed .col-result { + color: red; +} + +.col-links__extra { + margin-right: 3px; +} + +/****************************** + * RESULTS TABLE + * + * 1. Table Layout + * 2. Extra + * 3. Sorting items + * + ******************************/ +/*------------------ + * 1. Table Layout + *------------------*/ +#results-table { + border: 1px solid #e6e6e6; + color: #999; + font-size: 12px; + width: 100%; +} +#results-table th, +#results-table td { + padding: 5px; + border: 1px solid #e6e6e6; + text-align: left; +} +#results-table th { + font-weight: bold; +} + +/*------------------ + * 2. Extra + *------------------*/ +.logwrapper { + max-height: 230px; + overflow-y: scroll; + background-color: #e6e6e6; +} +.logwrapper.expanded { + max-height: none; +} +.logwrapper.expanded .logexpander:after { + content: "collapse [-]"; +} +.logwrapper .logexpander { + z-index: 1; + position: sticky; + top: 10px; + width: max-content; + border: 1px solid; + border-radius: 3px; + padding: 5px 7px; + margin: 10px 0 10px calc(100% - 80px); + cursor: pointer; + background-color: #e6e6e6; +} +.logwrapper .logexpander:after { + content: "expand [+]"; +} +.logwrapper .logexpander:hover { + color: #000; + border-color: #000; +} +.logwrapper .log { + min-height: 40px; + position: relative; + top: -50px; + height: calc(100% + 50px); + border: 1px solid #e6e6e6; + color: black; + display: block; + font-family: "Courier New", Courier, monospace; + padding: 5px; + padding-right: 80px; + white-space: pre-wrap; +} + +div.media { + border: 1px solid #e6e6e6; + float: right; + height: 240px; + margin: 0 5px; + overflow: hidden; + width: 320px; +} + +.media-container { + display: grid; + grid-template-columns: 25px auto 25px; + align-items: center; + flex: 1 1; + overflow: hidden; + height: 200px; +} + +.media-container--fullscreen { + grid-template-columns: 0px auto 0px; +} + +.media-container__nav--right, +.media-container__nav--left { + text-align: center; + cursor: pointer; +} + +.media-container__viewport { + cursor: pointer; + text-align: center; + height: inherit; +} +.media-container__viewport img, +.media-container__viewport video { + object-fit: cover; + width: 100%; + max-height: 100%; +} + +.media__name, +.media__counter { + display: flex; + flex-direction: row; + justify-content: space-around; + flex: 0 0 25px; + align-items: center; +} + +.collapsible td:not(.col-links) { + cursor: pointer; +} +.collapsible td:not(.col-links):hover::after { + color: #bbb; + font-style: italic; + cursor: pointer; +} + +.col-result { + width: 130px; +} +.col-result:hover::after { + content: " (hide details)"; +} + +.col-result.collapsed:hover::after { + content: " (show details)"; +} + +#environment-header h2:hover::after { + content: " (hide details)"; + color: #bbb; + font-style: italic; + cursor: pointer; + font-size: 12px; +} + +#environment-header.collapsed h2:hover::after { + content: " (show details)"; + color: #bbb; + font-style: italic; + cursor: pointer; + font-size: 12px; +} + +/*------------------ + * 3. Sorting items + *------------------*/ +.sortable { + cursor: pointer; +} +.sortable.desc:after { + content: " "; + position: relative; + left: 5px; + bottom: -12.5px; + border: 10px solid #4caf50; + border-bottom: 0; + border-left-color: transparent; + border-right-color: transparent; +} +.sortable.asc:after { + content: " "; + position: relative; + left: 5px; + bottom: 12.5px; + border: 10px solid #4caf50; + border-top: 0; + border-left-color: transparent; + border-right-color: transparent; +} + +.hidden, .summary__reload__button.hidden { + display: none; +} + +.summary__data { + flex: 0 0 550px; +} +.summary__reload { + flex: 1 1; + display: flex; + justify-content: center; +} +.summary__reload__button { + flex: 0 0 300px; + display: flex; + color: white; + font-weight: bold; + background-color: #4caf50; + text-align: center; + justify-content: center; + align-items: center; + border-radius: 3px; + cursor: pointer; +} +.summary__reload__button:hover { + background-color: #46a049; +} +.summary__spacer { + flex: 0 0 550px; +} + +.controls { + display: flex; + justify-content: space-between; +} + +.filters, +.collapse { + display: flex; + align-items: center; +} +.filters button, +.collapse button { + color: #999; + border: none; + background: none; + cursor: pointer; + text-decoration: underline; +} +.filters button:hover, +.collapse button:hover { + color: #ccc; +} + +.filter__label { + margin-right: 10px; +} diff --git a/tests/assets/html_merge/setup_wordpress.html b/tests/assets/html_merge/setup_wordpress.html new file mode 100644 index 0000000..43180e8 --- /dev/null +++ b/tests/assets/html_merge/setup_wordpress.html @@ -0,0 +1,770 @@ + + + + + setup_wordpress.html + + + +

setup_wordpress.html

+

Report generated on 08-Dec-2023 at 14:55:57 by pytest-html + v4.1.1

+
+

Environment

+
+
+ + + + + +
+
+

Summary

+
+
+

2 tests took 00:00:11.

+

(Un)check the boxes to filter the results.

+
+ +
+
+
+
+ + 0 Failed, + + 2 Passed, + + 0 Skipped, + + 0 Expected failures, + + 0 Unexpected passes, + + 0 Errors, + + 0 Reruns +
+
+  /  +
+
+
+
+
+
+
+
+ + + + + + + + + +
ResultTestDurationLinks
+ +
+
+ +
+ \ No newline at end of file diff --git a/tests/assets/html_merge/test_authentik_blueprint_api.html b/tests/assets/html_merge/test_authentik_blueprint_api.html new file mode 100644 index 0000000..37897c3 --- /dev/null +++ b/tests/assets/html_merge/test_authentik_blueprint_api.html @@ -0,0 +1,770 @@ + + + + + test_authentik_blueprint_api.html + + + +

test_authentik_blueprint_api.html

+

Report generated on 09-Dec-2023 at 12:22:45 by pytest-html + v4.1.1

+
+

Environment

+
+
+ + + + + +
+
+

Summary

+
+
+

1 test took 00:00:01.

+

(Un)check the boxes to filter the results.

+
+ +
+
+
+
+ + 1 Failed, + + 0 Passed, + + 0 Skipped, + + 0 Expected failures, + + 0 Unexpected passes, + + 0 Errors, + + 0 Reruns +
+
+  /  +
+
+
+
+
+
+
+
+ + + + + + + + + +
ResultTestDurationLinks
+ +
+
+ +
+ \ No newline at end of file diff --git a/tests/assets/html_merge/test_wordpress_receive_email.html b/tests/assets/html_merge/test_wordpress_receive_email.html new file mode 100644 index 0000000..b29f75a --- /dev/null +++ b/tests/assets/html_merge/test_wordpress_receive_email.html @@ -0,0 +1,770 @@ + + + + + test_wordpress_receive_email.html + + + +

test_wordpress_receive_email.html

+

Report generated on 08-Dec-2023 at 16:00:41 by pytest-html + v4.1.1

+
+

Environment

+
+
+ + + + + +
+
+

Summary

+
+
+

1 test took 946 ms.

+

(Un)check the boxes to filter the results.

+
+ +
+
+
+
+ + 1 Failed, + + 0 Passed, + + 0 Skipped, + + 0 Expected failures, + + 0 Unexpected passes, + + 0 Errors, + + 0 Reruns +
+
+  /  +
+
+
+
+
+
+
+
+ + + + + + + + + +
ResultTestDurationLinks
+ + + \ No newline at end of file diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100644 index 0000000..8a94ee6 --- /dev/null +++ b/tests/test_cli.py @@ -0,0 +1,54 @@ +import re +import time +from pathlib import Path + +import pytest + +from pytest_abra import DirManager +from pytest_abra.utils import get_session_id + + +def test_get_session_id_random(tmp_path: Path): + args_output_dir = tmp_path + args_resume = False + args_session_id = None + session_id = get_session_id(args_output_dir, args_resume, args_session_id) + assert re.search(r"\d+-\d+-\d+", session_id) + + +def test_get_session_id_explicit1(tmp_path: Path): + args_output_dir = tmp_path + args_resume = False + args_session_id = "abc" + session_id = get_session_id(args_output_dir, args_resume, args_session_id) + assert session_id == "abc" + + +def test_get_session_id_explicit2(tmp_path: Path): + args_output_dir = tmp_path + args_resume = True + args_session_id = "abc" + session_id = get_session_id(args_output_dir, args_resume, args_session_id) + assert session_id == "abc" + + +@pytest.mark.slow +def test_get_session_id_integration(tmp_path: Path): + assert len(list(tmp_path.iterdir())) == 0 + session_id_1 = get_session_id(args_output_dir=tmp_path, args_resume=False, args_session_id=None) + + DIR = DirManager(output_dir=tmp_path, session_id=session_id_1) + DIR.create_all_dirs() + assert len(list(tmp_path.iterdir())) == 1 + + time.sleep(1.1) # get_session_id won't be unique if called without time passed + session_id_2 = get_session_id(args_output_dir=tmp_path, args_resume=False, args_session_id=None) + DIR = DirManager(output_dir=tmp_path, session_id=session_id_2) + DIR.create_all_dirs() + assert len(list(tmp_path.iterdir())) == 2 + + session_id_3 = get_session_id(args_output_dir=tmp_path, args_resume=True, args_session_id=None) + assert session_id_2 == session_id_3 + + session_id_4 = get_session_id(args_output_dir=tmp_path, args_resume=True, args_session_id="abc") + assert session_id_4 == "abc" diff --git a/tests/test_cli_full_integration.py b/tests/test_cli_full_integration.py new file mode 100644 index 0000000..fcf39b9 --- /dev/null +++ b/tests/test_cli_full_integration.py @@ -0,0 +1,81 @@ +import shutil +import subprocess +from pathlib import Path + +import pytest + +from pytest_abra import DirManager +from pytest_abra.utils import load_json_to_environ + + +@pytest.fixture(scope="session") +def tmp_recipes(tmp_path_factory: pytest.TempPathFactory) -> Path: + tmp_recipes_target = tmp_path_factory.mktemp("recipes") + recipes_dir_source = Path("recipes") + shutil.copytree(recipes_dir_source, tmp_recipes_target, dirs_exist_ok=True) + return tmp_recipes_target + + +@pytest.fixture(scope="session") +def tmp_output(tmp_path_factory: pytest.TempPathFactory) -> Path: + return tmp_path_factory.mktemp("output") + + +@pytest.mark.slow +def test_abratest_cli_full_integration(tmp_output: Path, tmp_recipes: Path): + """Full integration test of abratest against the dev instance. Recipes dir not in path + + this test is hard to debug as the output dir is in tmp. If required, try + pytest -s + or find the tmp dir to look into test outputs""" + + # --------------------- load credentials to env variables -------------------- # + + cred_file = Path("credentials.json") + load_json_to_environ(cred_file) + + # --------------------------------- env files -------------------------------- # + + ENV_FILES_ROOT = Path("./envfiles").resolve() + ENV_FILES = [ + ENV_FILES_ROOT / "login.test.dev.local-it.cloud.env", # authentik + ENV_FILES_ROOT / "blog.test.dev.local-it.cloud.env", # wordpress + ENV_FILES_ROOT / "files.test.dev.local-it.cloud.env", # nextcloud + ] + ENV_PATHS = ";".join([x.as_posix() for x in ENV_FILES]) + + # ----------------------------------- dirs ----------------------------------- # + + RECIPES_DIR = tmp_recipes.resolve() + # RECIPES_DIR = Path("recipes") + OUTPUT_DIR = tmp_output.resolve() + + # ------------------------------------ run ----------------------------------- # + + result = subprocess.run( + [ + "abratest", + "--env_paths", + ENV_PATHS, + "--recipes_dir", + RECIPES_DIR, + "--output_dir", + OUTPUT_DIR, + "--session_id", + "abc", + ] + ) + + assert result.returncode == 0 + + +@pytest.mark.slow +def test_full_integration_results(tmp_output: Path): + OUTPUT_DIR = tmp_output.resolve() + + DIR = DirManager(output_dir=OUTPUT_DIR, session_id="abc") + all_files = [f.name for f in DIR.STATUS.rglob("*")] + passed_files = [f.name for f in DIR.STATUS.rglob("passed-*")] + failed_files = set(all_files) - set(passed_files) + assert len(all_files) > 0 + assert not failed_files, failed_files diff --git a/tests/test_coordinator.py b/tests/test_coordinator.py new file mode 100644 index 0000000..08b7975 --- /dev/null +++ b/tests/test_coordinator.py @@ -0,0 +1,53 @@ +import os +import shutil +import sys +from pathlib import Path + +import pytest + +from pytest_abra.coordinator import Coordinator +from pytest_abra.dir_manager import DirManager + + +def test_load_test_credentials(tmp_path: Path): + assert "TEST_USER" not in os.environ + + DIR = DirManager(output_dir=tmp_path, session_id="abc") + DIR.create_all_dirs() + + Coordinator.load_test_credentials(DIR) + assert (DIR.STATES / "credentials_test.json").is_file() + + assert "TEST_USER" in os.environ + test_user_before = os.environ["TEST_USER"] + + # os.environ.clear() # this breaks pytest! + del os.environ["TEST_USER"] + assert "TEST_USER" not in os.environ + + Coordinator.load_test_credentials(DIR) + assert test_user_before == os.environ["TEST_USER"] + + +@pytest.fixture(scope="session") +def tmp_recipes(tmp_path_factory: pytest.TempPathFactory) -> Path: + tmp_recipes_target = tmp_path_factory.mktemp("recipes") + recipes_dir_source = Path("recipes") + shutil.copytree(recipes_dir_source, tmp_recipes_target, dirs_exist_ok=True) + return tmp_recipes_target + + +@pytest.fixture +def clear_sys_path(): + """clear sys.path before test, restore after""" + syspath_copy = sys.path.copy() + sys.path.clear() + yield + sys.path.extend(syspath_copy) + + +def test_runner_runner_dict_import(tmp_recipes: Path, clear_sys_path): + """import from recipes dict should work, because create_runner_dict has sys.path.append""" + + RUNNER_DICT = Coordinator.create_runner_dict(tmp_recipes) + assert len(RUNNER_DICT.keys()) > 0 diff --git a/tests/test_dir_manager.py b/tests/test_dir_manager.py new file mode 100644 index 0000000..caee6fa --- /dev/null +++ b/tests/test_dir_manager.py @@ -0,0 +1,31 @@ +import time +from pathlib import Path + +import pytest + +from pytest_abra.dir_manager import DirManager + + +def test_get_latest_session_id_from_non_existing_dir(tmp_path: Path): + out = DirManager.get_latest_session_id(tmp_path / "not_exist") + assert out is None + + +def test_get_latest_session_id_from_empty_dir(tmp_path: Path): + out = DirManager.get_latest_session_id(tmp_path) + assert out is None + + +def test_get_latest_session_id_single(tmp_path: Path): + (tmp_path / "a").mkdir() + out = DirManager.get_latest_session_id(tmp_path) + assert out == "a" + + +@pytest.mark.slow +def test_get_latest_session_id(tmp_path: Path): + (tmp_path / "a").mkdir() + time.sleep(1.1) + (tmp_path / "b").mkdir() + out = DirManager.get_latest_session_id(tmp_path) + assert out == "b" diff --git a/tests/test_env_manager.py b/tests/test_env_manager.py new file mode 100644 index 0000000..05255f4 --- /dev/null +++ b/tests/test_env_manager.py @@ -0,0 +1,137 @@ +import shutil +from pathlib import Path + +import pytest + +from pytest_abra.dir_manager import DirManager +from pytest_abra.env_manager import EnvManager +from pytest_abra.utils import files_are_same + +ENV_PATHS = [ + Path("envfiles/blog.test.dev.local-it.cloud.env"), # wordpress + Path("envfiles/login.test.dev.local-it.cloud.env"), # authentik + Path("envfiles/login.test.dev.local-it.cloud.env"), # authentik +] + + +@pytest.fixture +def tmp_output(tmp_path_factory: pytest.TempPathFactory) -> Path: + return tmp_path_factory.mktemp("output") + + +@pytest.fixture +def tmp_recipes(tmp_path_factory: pytest.TempPathFactory) -> Path: + return tmp_path_factory.mktemp("recipes") + + +def test_copy_env_files(tmp_output: Path, tmp_recipes: Path): + # create dirs in output + DIR = DirManager(output_dir=tmp_output, session_id="abc", recipes_dir=tmp_recipes) + DIR.create_all_dirs() + + # confirm dir is empty + assert len(list(DIR.ENV_FILES.iterdir())) == 0 + + # copy env files + env_files = EnvManager._get_env_files(ENV_PATHS) + EnvManager.copy_env_files(env_files, DIR) + + # check that each env file is present in DIR.ENV_FILES with correct contents + assert len(list(DIR.ENV_FILES.iterdir())) == len(env_files) + for index, env_path in enumerate(ENV_PATHS): + matching_files = [f for f in DIR.ENV_FILES.iterdir() if index == int(f.name.split("-")[0])] + assert len(matching_files) == 1 + assert files_are_same(env_path, matching_files[0]) + + +def test_copy_env_files_twice(tmp_output: Path, tmp_recipes: Path): + """Copy the same env files twice""" + # create dirs in output + DIR = DirManager(output_dir=tmp_output, session_id="abc", recipes_dir=tmp_recipes) + DIR.create_all_dirs() + + # confirm dir is empty + assert len(list(DIR.ENV_FILES.iterdir())) == 0 + + # copy env files + env_files = EnvManager._get_env_files(ENV_PATHS) + EnvManager.copy_env_files(env_files, DIR) + + # check that each env file is present in DIR.ENV_FILES with correct contents + assert len(list(DIR.ENV_FILES.iterdir())) == len(env_files) + + # copy env files again + EnvManager.copy_env_files(env_files, DIR) + + for index, env_path in enumerate(ENV_PATHS): + matching_files = [f for f in DIR.ENV_FILES.iterdir() if index == int(f.name.split("-")[0])] + assert len(matching_files) == 1 + assert files_are_same(env_path, matching_files[0]) + + +def test_copy_env_files_twice_with_content_change(tmp_output: Path, tmp_recipes: Path, tmp_path: Path): + # copy env files to tmp_path + assert len(list(tmp_path.iterdir())) == 0 + for f in ENV_PATHS: + shutil.copy(f, tmp_path / f.name) + ENV_PATHS_NEW = list(tmp_path.iterdir()) + assert len(ENV_PATHS_NEW) > 0 + + # create dirs in output + DIR = DirManager(output_dir=tmp_output, session_id="abc", recipes_dir=tmp_recipes) + DIR.create_all_dirs() + + # confirm dir is empty + assert len(list(DIR.ENV_FILES.iterdir())) == 0 + + # copy env files from tmp_path to tmp_output + env_files = EnvManager._get_env_files(ENV_PATHS_NEW) + EnvManager.copy_env_files(env_files, DIR) + + # check that each env file is present in DIR.ENV_FILES with correct contents + assert len(list(DIR.ENV_FILES.iterdir())) == len(env_files) + + # change content of one env_file in tmp_path + file_path = next(tmp_path.iterdir()) + with open(file_path, "w") as file: + file.write("This is the new content") + + # copy env files again + with pytest.raises(AssertionError) as excinfo: + EnvManager.copy_env_files(env_files, DIR) + + assert "input env files have changed" in str(excinfo.value) + + +def test_copy_env_files_twice_with_name_change(tmp_output: Path, tmp_recipes: Path, tmp_path: Path): + # copy env files to tmp_path + assert len(list(tmp_path.iterdir())) == 0 + for f in ENV_PATHS: + shutil.copy(f, tmp_path / f.name) + ENV_PATHS_NEW = list(tmp_path.iterdir()) + assert len(ENV_PATHS_NEW) > 0 + + # create dirs in output + DIR = DirManager(output_dir=tmp_output, session_id="abc", recipes_dir=tmp_recipes) + DIR.create_all_dirs() + + # confirm dir is empty + assert len(list(DIR.ENV_FILES.iterdir())) == 0 + + # copy env files from tmp_path to tmp_output + env_files = EnvManager._get_env_files(ENV_PATHS_NEW) + EnvManager.copy_env_files(env_files, DIR) + + # check that each env file is present in DIR.ENV_FILES with correct contents + assert len(list(DIR.ENV_FILES.iterdir())) == len(env_files) + + # change name of one env_file in tmp_path + file_path = next(tmp_path.iterdir()) + file_path.rename(file_path.parent / (file_path.stem + "-other" + file_path.suffix)) + + # copy env files from tmp_path to tmp_output again + with pytest.raises(AssertionError) as excinfo: + env_files = EnvManager._get_env_files(list(tmp_path.iterdir())) + EnvManager.copy_env_files(env_files, DIR) + + assert "input env files have changed" in str(excinfo.value) diff --git a/tests/test_env_resolution.py b/tests/test_env_resolution.py new file mode 100644 index 0000000..bbdf8f0 --- /dev/null +++ b/tests/test_env_resolution.py @@ -0,0 +1,118 @@ +from pathlib import Path + +import pytest + +from pytest_abra.coordinator import Coordinator +from pytest_abra.env_manager import DependencyRule, EnvFile, EnvManager + +RECIPES_DIR = Path("./recipes").resolve() +RUNNER_DICT = Coordinator.create_runner_dict(RECIPES_DIR) + + +def test_complex_sorting() -> None: + demo_rules = [ # X depends on Y + DependencyRule("a", "e"), + DependencyRule("b", "e"), + DependencyRule("b", "f"), + DependencyRule("c", "e"), + DependencyRule("d", "e"), + DependencyRule("f", "e"), + ] + + demo_types = ["a", "b", "c", "d", "e", "f", "g"] + env_files = [EnvFile(env_type=t, env_path=Path(), env_config=dict()) for t in demo_types] + EnvManager.sort_env_files_by_rule + sorted_env_files = EnvManager.sort_env_files_by_rule(env_files, demo_rules) + + assert sorted_env_files[0].env_type == "e" + + +def test_circular_import() -> None: + """This test will raise ValueError because the example input cannot be correctly ordered""" + demo_rules = [ + DependencyRule("a", "b"), + DependencyRule("b", "c"), + DependencyRule("c", "a"), + ] + + demo_types = ["a", "b", "c"] + env_files = [EnvFile(env_type=t, env_path=Path(), env_config=dict()) for t in demo_types] + with pytest.raises(ValueError): + EnvManager.sort_env_files_by_rule(env_files, demo_rules) + + +def test_real_env_files() -> None: + """authentik should be first""" + + ENV_FILES = [ + Path("envfiles/blog.test.dev.local-it.cloud.env"), # wordpress + Path("envfiles/login.test.dev.local-it.cloud.env"), # authentik + ] + env_files: list[EnvFile] = EnvManager._get_env_files(ENV_FILES) + dependency_rules: list[DependencyRule] = EnvManager._get_dependency_rules(env_files, RUNNER_DICT) + sorted_env_files = EnvManager.sort_env_files_by_rule(env_files, dependency_rules) + assert sorted_env_files[0].env_type == "authentik" + + +def test_real_env_files_duplicate() -> None: + """authentik should be first""" + + ENV_FILES = [ + Path("envfiles/blog.test.dev.local-it.cloud.env"), # wordpress + Path("envfiles/login.test.dev.local-it.cloud.env"), # authentik + Path("envfiles/login.test.dev.local-it.cloud.env"), # authentik + ] + env_files: list[EnvFile] = EnvManager._get_env_files(ENV_FILES) + dependency_rules: list[DependencyRule] = EnvManager._get_dependency_rules(env_files, RUNNER_DICT) + sorted_env_files = EnvManager.sort_env_files_by_rule(env_files, dependency_rules) + assert sorted_env_files[0].env_type == "authentik" + assert sorted_env_files[1].env_type == "authentik" + assert sorted_env_files[2].env_type == "wordpress" + + +def test_real_env_files_duplicate_six() -> None: + """authentik should be first""" + + ENV_FILES = [ + Path("envfiles/blog.test.dev.local-it.cloud.env"), # wordpress + Path("envfiles/login.test.dev.local-it.cloud.env"), # authentik + Path("envfiles/blog.test.dev.local-it.cloud.env"), # wordpress + Path("envfiles/login.test.dev.local-it.cloud.env"), # authentik + Path("envfiles/login.test.dev.local-it.cloud.env"), # authentik + Path("envfiles/blog.test.dev.local-it.cloud.env"), # wordpress + ] + env_files: list[EnvFile] = EnvManager._get_env_files(ENV_FILES) + dependency_rules: list[DependencyRule] = EnvManager._get_dependency_rules(env_files, RUNNER_DICT) + sorted_env_files = EnvManager.sort_env_files_by_rule(env_files, dependency_rules) + assert sorted_env_files[0].env_type == "authentik" + assert sorted_env_files[1].env_type == "authentik" + assert sorted_env_files[2].env_type == "authentik" + assert sorted_env_files[3].env_type == "wordpress" + assert sorted_env_files[4].env_type == "wordpress" + assert sorted_env_files[5].env_type == "wordpress" + + +def test_env_manager() -> None: + env_paths_list = [ + Path("envfiles/blog.test.dev.local-it.cloud.env"), # wordpress + Path("envfiles/login.test.dev.local-it.cloud.env"), # authentik + Path("envfiles/login.test.dev.local-it.cloud.env"), # authentik + ] + ENV = EnvManager(env_paths_list, RUNNER_DICT) + assert ENV.env_files[0].env_type == "authentik" + assert ENV.env_files[1].env_type == "authentik" + assert ENV.env_files[2].env_type == "wordpress" + + +def test_RUNNER_DICT_missing_key() -> None: + """RUNNER_DICT missing wordpress key while .env file with TYPE=wordpress given""" + env_paths_list = [ + Path("envfiles/blog.test.dev.local-it.cloud.env"), # wordpress + Path("envfiles/login.test.dev.local-it.cloud.env"), # authentik + Path("envfiles/login.test.dev.local-it.cloud.env"), # authentik + ] + RUNNER_DICT_COPY = RUNNER_DICT.copy() + del RUNNER_DICT_COPY["wordpress"] + with pytest.raises(AssertionError) as excinfo: + EnvManager(env_paths_list, RUNNER_DICT_COPY) + assert "no runner for" in str(excinfo.value) diff --git a/tests/test_html_merge.py b/tests/test_html_merge.py new file mode 100644 index 0000000..a216dfa --- /dev/null +++ b/tests/test_html_merge.py @@ -0,0 +1,64 @@ +# tmp_path fixture: +# https://docs.pytest.org/en/6.2.x/tmpdir.html + +from pathlib import Path + +import pytest +from icecream import ic # type: ignore +from playwright.sync_api import BrowserContext, expect + +from pytest_abra import BaseUrl +from pytest_abra.html_helper import merge_html_reports + + +@pytest.fixture(scope="session") +def session_tmp_path(tmp_path_factory: pytest.TempPathFactory) -> Path: + return tmp_path_factory.mktemp("html_test") + + +@pytest.fixture(scope="session") +def html_file(session_tmp_path: Path) -> Path: + """combines all generated pytest html reports into one""" + + in_dir_path = Path(__file__).parent / "assets" / "html_merge" + in_dir_path = in_dir_path.resolve() + + html_file = session_tmp_path / "test.html" + + merge_html_reports(in_dir_path.as_posix(), html_file.as_posix(), "combined.html") + return html_file + + +def test_merge_html(html_file: Path): + assert html_file.is_file() + assert html_file.parent.is_dir() + assert next(html_file.parent.glob("*")) + + +@pytest.mark.slow +def test_check_result_with_playwright(html_file: Path, context: BrowserContext): + assert html_file.is_file() + + file_url = BaseUrl(netloc=html_file.as_posix(), scheme="file").get() + + page = context.new_page() + page.goto(file_url) + + # check if combined is correct + expect(page.get_by_text("2 Passed,")).to_be_visible() + expect(page.get_by_text("2 Failed,")).to_be_visible() + expect(page.get_by_text("tests ran in 12.946 seconds")).to_be_visible() + + # check if heading is correct + expect(page.get_by_role("heading", name="combined.html")).to_be_visible() + + # check if traceback is included + expect(page.get_by_text("E AssertionError: One or more")).to_be_visible() + + # check if asset works + with page.expect_popup() as page1_info: + page.get_by_role("link", name="Authentik Blueprint Status").click() + page1 = page1_info.value + + # see if content of txt file is correct + expect(page1.get_by_text("failed")).to_be_visible() diff --git a/tests/test_runner.py b/tests/test_runner.py new file mode 100644 index 0000000..24f9bf6 --- /dev/null +++ b/tests/test_runner.py @@ -0,0 +1,29 @@ +from pathlib import Path + +from pytest_abra import DirManager, Runner + + +def test_runner_create_status_file(tmp_path: Path): + """check if _create_status_file prevents duplicates""" + + DIR = DirManager(output_dir=tmp_path, session_id="temp") + DIR.create_all_dirs() + assert len(list(DIR.STATUS.iterdir())) == 0 + + # create first status file + Runner._create_status_file(DIR, "passed", "identifier-a") + assert len(list(DIR.STATUS.iterdir())) == 1 + + # create second status file + Runner._create_status_file(DIR, "passed", "identifier-b") + assert len(list(DIR.STATUS.iterdir())) == 2 + + # check if _get_status_files finds only the correct status file + result = Runner._get_status_files(DIR, "identifier-a") + assert len(result) == 1 + + # overwrite first status file + Runner._create_status_file(DIR, "failed", "identifier-a") + assert len(list(DIR.STATUS.iterdir())) == 2 + + assert Runner._is_test_passed(DIR, "identifier-a") is False diff --git a/tests/test_url.py b/tests/test_url.py new file mode 100644 index 0000000..82b9f44 --- /dev/null +++ b/tests/test_url.py @@ -0,0 +1,28 @@ +from pytest_abra.utils import BaseUrl + +url_input = { + "netloc": "blog.dev.local-it.cloud", + "scheme": "https", +} + +url_obj = BaseUrl(**url_input) + + +def test_urllib_domain_only(): + assert url_obj.get() == "https://blog.dev.local-it.cloud" + + +def test_urllib_path_single(): + assert url_obj.get(path="something") == "https://blog.dev.local-it.cloud/something" + + +def test_urllib_path_double(): + assert url_obj.get(path="something/else") == "https://blog.dev.local-it.cloud/something/else" + + +def test_urllib_path_signle_suc_slash(): + assert url_obj.get(path="something/else/") == "https://blog.dev.local-it.cloud/something/else/" + + +def test_urllib_path_signle_pre_slash(): + assert url_obj.get(path="/something/else") == "https://blog.dev.local-it.cloud/something/else" diff --git a/wordpress_test.py b/wordpress_test.py deleted file mode 100644 index c58e4af..0000000 --- a/wordpress_test.py +++ /dev/null @@ -1,14 +0,0 @@ -from conftest import CONFIG, check_for, RECORDS - -""" Test Wordpress """ -def test_wordpress(admin_session): - context, page = admin_session - with page.expect_popup() as info: - page.get_by_role("link", name="Wordpress").click() - - wordpress = info.value - check_for(wordpress.locator("#wpcontent")) - if CONFIG['locale'] == 'de': - check_for(wordpress.get_by_role("heading", name="Willkommen bei WordPress!")) - context.tracing.stop(path=f"{RECORDS}/wordpress.zip") -