Marco Ricci commited on 2025-08-09 14:44:43
Zeige 8 geänderte Dateien mit 1681 Einfügungen und 1589 Löschungen.
Split off the slow, `hypothesis`-based integration tests (the "heavy duty" tests) from the `test_derivepassphrase_cli`, `test_derivepassphrase_ssh_agent` and `test_derivepassphrase_types` modules into separate submodules named `heavy_duty`. Additionally, mark the contents of these `heavy_duty` submodules with the `heavy_duty` `pytest` mark. We do this both because the integration tests are slow and because they are relatively large per test: you typically write a whole new class plus support code per test, plus you reexport one of the class's attributes as a top-level, auto-discoverable test. Though specifically marked, the tests are still run by default.
| ... | ... |
@@ -110,6 +110,17 @@ def _hypothesis_settings_setup() -> None: |
| 110 | 110 |
_hypothesis_settings_setup() |
| 111 | 111 |
|
| 112 | 112 |
|
| 113 |
+def pytest_configure(config: pytest.Config) -> None: |
|
| 114 |
+ """Configure `pytest`: add the `heavy_duty` marker.""" |
|
| 115 |
+ config.addinivalue_line( |
|
| 116 |
+ 'markers', |
|
| 117 |
+ ( |
|
| 118 |
+ 'heavy_duty: ' |
|
| 119 |
+ 'mark test as a slow, heavy-duty test (e.g., an integration test)' |
|
| 120 |
+ ), |
|
| 121 |
+ ) |
|
| 122 |
+ |
|
| 123 |
+ |
|
| 113 | 124 |
# https://docs.pytest.org/en/stable/explanation/fixtures.html#a-note-about-fixture-cleanup |
| 114 | 125 |
# https://github.com/pytest-dev/pytest/issues/5243#issuecomment-491522595 |
| 115 | 126 |
@pytest.fixture(scope="session", autouse=False) |
| ... | ... |
@@ -140,6 +140,17 @@ def xfail_on_the_annoying_os( |
| 140 | 140 |
return mark if f is None else mark(f) |
| 141 | 141 |
|
| 142 | 142 |
|
| 143 |
+heavy_duty = pytest.mark.heavy_duty |
|
| 144 |
+""" |
|
| 145 |
+A cached `pytest` mark indicating that this test function/class/module |
|
| 146 |
+is a slow, heavy duty test. Users who are impatient (or otherwise |
|
| 147 |
+cannot afford to wait for these tests to complete) may wish to exclude |
|
| 148 |
+these tests; this mark helps in achieving that. |
|
| 149 |
+ |
|
| 150 |
+All current heavy duty tests are integration tests. |
|
| 151 |
+""" |
|
| 152 |
+ |
|
| 153 |
+ |
|
| 143 | 154 |
# Parameter sets |
| 144 | 155 |
# ============== |
| 145 | 156 |
|
| ... | ... |
@@ -16,7 +16,6 @@ import logging |
| 16 | 16 |
import operator |
| 17 | 17 |
import os |
| 18 | 18 |
import pathlib |
| 19 |
-import queue |
|
| 20 | 19 |
import re |
| 21 | 20 |
import shlex |
| 22 | 21 |
import shutil |
| ... | ... |
@@ -25,14 +24,14 @@ import tempfile |
| 25 | 24 |
import textwrap |
| 26 | 25 |
import types |
| 27 | 26 |
import warnings |
| 28 |
-from typing import TYPE_CHECKING, cast |
|
| 27 |
+from typing import TYPE_CHECKING |
|
| 29 | 28 |
|
| 30 | 29 |
import click.testing |
| 31 | 30 |
import exceptiongroup |
| 32 | 31 |
import hypothesis |
| 33 | 32 |
import pytest |
| 34 |
-from hypothesis import stateful, strategies |
|
| 35 |
-from typing_extensions import Any, NamedTuple, TypeAlias |
|
| 33 |
+from hypothesis import strategies |
|
| 34 |
+from typing_extensions import Any, NamedTuple |
|
| 36 | 35 |
|
| 37 | 36 |
import tests.data |
| 38 | 37 |
import tests.data.callables |
| ... | ... |
@@ -48,7 +47,6 @@ from derivepassphrase._internals import ( |
| 48 | 47 |
from derivepassphrase.ssh_agent import socketprovider |
| 49 | 48 |
|
| 50 | 49 |
if TYPE_CHECKING: |
| 51 |
- import multiprocessing |
|
| 52 | 50 |
from collections.abc import Callable, Iterable, Iterator, Sequence |
| 53 | 51 |
from collections.abc import Set as AbstractSet |
| 54 | 52 |
from typing import NoReturn |
| ... | ... |
@@ -5992,1290 +5990,6 @@ class TestCLITransition: |
| 5992 | 5990 |
) == [DUMMY_SERVICE] |
| 5993 | 5991 |
|
| 5994 | 5992 |
|
| 5995 |
-KNOWN_SERVICES = (DUMMY_SERVICE, "email", "bank", "work") |
|
| 5996 |
-"""Known service names. Used for the [`ConfigManagementStateMachine`][].""" |
|
| 5997 |
-VALID_PROPERTIES = ( |
|
| 5998 |
- "length", |
|
| 5999 |
- "repeat", |
|
| 6000 |
- "upper", |
|
| 6001 |
- "lower", |
|
| 6002 |
- "number", |
|
| 6003 |
- "space", |
|
| 6004 |
- "dash", |
|
| 6005 |
- "symbol", |
|
| 6006 |
-) |
|
| 6007 |
-"""Known vault properties. Used for the [`ConfigManagementStateMachine`][].""" |
|
| 6008 |
- |
|
| 6009 |
- |
|
| 6010 |
-def build_reduced_vault_config_settings( |
|
| 6011 |
- config: _types.VaultConfigServicesSettings, |
|
| 6012 |
- keys_to_prune: frozenset[str], |
|
| 6013 |
-) -> _types.VaultConfigServicesSettings: |
|
| 6014 |
- """Return a service settings object with certain keys pruned. |
|
| 6015 |
- |
|
| 6016 |
- Args: |
|
| 6017 |
- config: |
|
| 6018 |
- The original service settings object. |
|
| 6019 |
- keys_to_prune: |
|
| 6020 |
- The keys to prune from the settings object. |
|
| 6021 |
- |
|
| 6022 |
- """ |
|
| 6023 |
- config2 = copy.deepcopy(config) |
|
| 6024 |
- for key in keys_to_prune: |
|
| 6025 |
- config2.pop(key, None) # type: ignore[misc] |
|
| 6026 |
- return config2 |
|
| 6027 |
- |
|
| 6028 |
- |
|
| 6029 |
-SERVICES_STRATEGY = strategies.builds( |
|
| 6030 |
- build_reduced_vault_config_settings, |
|
| 6031 |
- tests.machinery.hypothesis.vault_full_service_config(), |
|
| 6032 |
- strategies.sets( |
|
| 6033 |
- strategies.sampled_from(VALID_PROPERTIES), |
|
| 6034 |
- max_size=7, |
|
| 6035 |
- ), |
|
| 6036 |
-) |
|
| 6037 |
-"""A hypothesis strategy to build incomplete service configurations.""" |
|
| 6038 |
- |
|
| 6039 |
- |
|
| 6040 |
-def services_strategy() -> strategies.SearchStrategy[ |
|
| 6041 |
- _types.VaultConfigServicesSettings |
|
| 6042 |
-]: |
|
| 6043 |
- """Return a strategy to build incomplete service configurations.""" |
|
| 6044 |
- return SERVICES_STRATEGY |
|
| 6045 |
- |
|
| 6046 |
- |
|
| 6047 |
-def assemble_config( |
|
| 6048 |
- global_data: _types.VaultConfigGlobalSettings, |
|
| 6049 |
- service_data: list[tuple[str, _types.VaultConfigServicesSettings]], |
|
| 6050 |
-) -> _types.VaultConfig: |
|
| 6051 |
- """Return a vault config using the global and service data.""" |
|
| 6052 |
- services_dict = dict(service_data) |
|
| 6053 |
- return ( |
|
| 6054 |
- {"global": global_data, "services": services_dict}
|
|
| 6055 |
- if global_data |
|
| 6056 |
- else {"services": services_dict}
|
|
| 6057 |
- ) |
|
| 6058 |
- |
|
| 6059 |
- |
|
| 6060 |
-@strategies.composite |
|
| 6061 |
-def draw_service_name_and_data( |
|
| 6062 |
- draw: hypothesis.strategies.DrawFn, |
|
| 6063 |
- num_entries: int, |
|
| 6064 |
-) -> tuple[tuple[str, _types.VaultConfigServicesSettings], ...]: |
|
| 6065 |
- """Draw a service name and settings, as a hypothesis strategy. |
|
| 6066 |
- |
|
| 6067 |
- Will draw service names from [`KNOWN_SERVICES`][] and service |
|
| 6068 |
- settings via [`services_strategy`][]. |
|
| 6069 |
- |
|
| 6070 |
- Args: |
|
| 6071 |
- draw: |
|
| 6072 |
- The `draw` function, as provided for by hypothesis. |
|
| 6073 |
- num_entries: |
|
| 6074 |
- The number of services to draw. |
|
| 6075 |
- |
|
| 6076 |
- Returns: |
|
| 6077 |
- A sequence of pairs of service names and service settings. |
|
| 6078 |
- |
|
| 6079 |
- """ |
|
| 6080 |
- possible_services = list(KNOWN_SERVICES) |
|
| 6081 |
- selected_services: list[str] = [] |
|
| 6082 |
- for _ in range(num_entries): |
|
| 6083 |
- selected_services.append( |
|
| 6084 |
- draw(strategies.sampled_from(possible_services)) |
|
| 6085 |
- ) |
|
| 6086 |
- possible_services.remove(selected_services[-1]) |
|
| 6087 |
- return tuple( |
|
| 6088 |
- (service, draw(services_strategy())) for service in selected_services |
|
| 6089 |
- ) |
|
| 6090 |
- |
|
| 6091 |
- |
|
| 6092 |
-VAULT_FULL_CONFIG = strategies.builds( |
|
| 6093 |
- assemble_config, |
|
| 6094 |
- services_strategy(), |
|
| 6095 |
- strategies.integers( |
|
| 6096 |
- min_value=2, |
|
| 6097 |
- max_value=4, |
|
| 6098 |
- ).flatmap(draw_service_name_and_data), |
|
| 6099 |
-) |
|
| 6100 |
-"""A hypothesis strategy to build full vault configurations.""" |
|
| 6101 |
- |
|
| 6102 |
- |
|
| 6103 |
-def vault_full_config() -> strategies.SearchStrategy[_types.VaultConfig]: |
|
| 6104 |
- """Return a strategy to build full vault configurations.""" |
|
| 6105 |
- return VAULT_FULL_CONFIG |
|
| 6106 |
- |
|
| 6107 |
- |
|
| 6108 |
-class ConfigManagementStateMachine(stateful.RuleBasedStateMachine): |
|
| 6109 |
- """A state machine recording changes in the vault configuration. |
|
| 6110 |
- |
|
| 6111 |
- Record possible configuration states in bundles, then in each rule, |
|
| 6112 |
- take a configuration and manipulate it somehow. |
|
| 6113 |
- |
|
| 6114 |
- Attributes: |
|
| 6115 |
- setting: |
|
| 6116 |
- A bundle for single-service settings. |
|
| 6117 |
- configuration: |
|
| 6118 |
- A bundle for full vault configurations. |
|
| 6119 |
- |
|
| 6120 |
- """ |
|
| 6121 |
- |
|
| 6122 |
- def __init__(self) -> None: |
|
| 6123 |
- """Initialize self, set up context managers and enter them.""" |
|
| 6124 |
- super().__init__() |
|
| 6125 |
- self.runner = tests.machinery.CliRunner(mix_stderr=False) |
|
| 6126 |
- self.exit_stack = contextlib.ExitStack().__enter__() |
|
| 6127 |
- self.monkeypatch = self.exit_stack.enter_context( |
|
| 6128 |
- pytest.MonkeyPatch().context() |
|
| 6129 |
- ) |
|
| 6130 |
- self.isolated_config = self.exit_stack.enter_context( |
|
| 6131 |
- tests.machinery.pytest.isolated_vault_config( |
|
| 6132 |
- monkeypatch=self.monkeypatch, |
|
| 6133 |
- runner=self.runner, |
|
| 6134 |
- vault_config={"services": {}},
|
|
| 6135 |
- ) |
|
| 6136 |
- ) |
|
| 6137 |
- |
|
| 6138 |
- setting: stateful.Bundle[_types.VaultConfigServicesSettings] = ( |
|
| 6139 |
- stateful.Bundle("setting")
|
|
| 6140 |
- ) |
|
| 6141 |
- """""" |
|
| 6142 |
- configuration: stateful.Bundle[_types.VaultConfig] = stateful.Bundle( |
|
| 6143 |
- "configuration" |
|
| 6144 |
- ) |
|
| 6145 |
- """""" |
|
| 6146 |
- |
|
| 6147 |
- @stateful.initialize( |
|
| 6148 |
- target=configuration, |
|
| 6149 |
- configs=strategies.lists( |
|
| 6150 |
- vault_full_config(), |
|
| 6151 |
- min_size=8, |
|
| 6152 |
- max_size=8, |
|
| 6153 |
- ), |
|
| 6154 |
- ) |
|
| 6155 |
- def declare_initial_configs( |
|
| 6156 |
- self, |
|
| 6157 |
- configs: Iterable[_types.VaultConfig], |
|
| 6158 |
- ) -> stateful.MultipleResults[_types.VaultConfig]: |
|
| 6159 |
- """Initialize the configuration bundle with eight configurations.""" |
|
| 6160 |
- return stateful.multiple(*configs) |
|
| 6161 |
- |
|
| 6162 |
- @stateful.initialize( |
|
| 6163 |
- target=setting, |
|
| 6164 |
- configs=strategies.lists( |
|
| 6165 |
- vault_full_config(), |
|
| 6166 |
- min_size=4, |
|
| 6167 |
- max_size=4, |
|
| 6168 |
- ), |
|
| 6169 |
- ) |
|
| 6170 |
- def extract_initial_settings( |
|
| 6171 |
- self, |
|
| 6172 |
- configs: list[_types.VaultConfig], |
|
| 6173 |
- ) -> stateful.MultipleResults[_types.VaultConfigServicesSettings]: |
|
| 6174 |
- """Initialize the settings bundle with four service settings.""" |
|
| 6175 |
- settings: list[_types.VaultConfigServicesSettings] = [] |
|
| 6176 |
- for c in configs: |
|
| 6177 |
- settings.extend(c["services"].values()) |
|
| 6178 |
- return stateful.multiple(*map(copy.deepcopy, settings)) |
|
| 6179 |
- |
|
| 6180 |
- @staticmethod |
|
| 6181 |
- def fold_configs( |
|
| 6182 |
- c1: _types.VaultConfig, c2: _types.VaultConfig |
|
| 6183 |
- ) -> _types.VaultConfig: |
|
| 6184 |
- """Fold `c1` into `c2`, overriding the latter.""" |
|
| 6185 |
- new_global_dict = c1.get("global", c2.get("global"))
|
|
| 6186 |
- if new_global_dict is not None: |
|
| 6187 |
- return {
|
|
| 6188 |
- "global": new_global_dict, |
|
| 6189 |
- "services": {**c2["services"], **c1["services"]},
|
|
| 6190 |
- } |
|
| 6191 |
- return {
|
|
| 6192 |
- "services": {**c2["services"], **c1["services"]},
|
|
| 6193 |
- } |
|
| 6194 |
- |
|
| 6195 |
- @stateful.rule( |
|
| 6196 |
- target=configuration, |
|
| 6197 |
- config=configuration, |
|
| 6198 |
- setting=setting.filter(bool), |
|
| 6199 |
- maybe_unset=strategies.sets( |
|
| 6200 |
- strategies.sampled_from(VALID_PROPERTIES), |
|
| 6201 |
- max_size=3, |
|
| 6202 |
- ), |
|
| 6203 |
- overwrite=strategies.booleans(), |
|
| 6204 |
- ) |
|
| 6205 |
- def set_globals( |
|
| 6206 |
- self, |
|
| 6207 |
- config: _types.VaultConfig, |
|
| 6208 |
- setting: _types.VaultConfigGlobalSettings, |
|
| 6209 |
- maybe_unset: set[str], |
|
| 6210 |
- overwrite: bool, |
|
| 6211 |
- ) -> _types.VaultConfig: |
|
| 6212 |
- """Set the global settings of a configuration. |
|
| 6213 |
- |
|
| 6214 |
- Args: |
|
| 6215 |
- config: |
|
| 6216 |
- The configuration to edit. |
|
| 6217 |
- setting: |
|
| 6218 |
- The new global settings. |
|
| 6219 |
- maybe_unset: |
|
| 6220 |
- Settings keys to additionally unset, if not already |
|
| 6221 |
- present in the new settings. Corresponds to the |
|
| 6222 |
- `--unset` command-line argument. |
|
| 6223 |
- overwrite: |
|
| 6224 |
- Overwrite the settings object if true, or merge if |
|
| 6225 |
- false. Corresponds to the `--overwrite-existing` and |
|
| 6226 |
- `--merge-existing` command-line arguments. |
|
| 6227 |
- |
|
| 6228 |
- Returns: |
|
| 6229 |
- The amended configuration. |
|
| 6230 |
- |
|
| 6231 |
- """ |
|
| 6232 |
- cli_helpers.save_config(config) |
|
| 6233 |
- config_global = config.get("global", {})
|
|
| 6234 |
- maybe_unset = set(maybe_unset) - setting.keys() |
|
| 6235 |
- if overwrite: |
|
| 6236 |
- config["global"] = config_global = {}
|
|
| 6237 |
- elif maybe_unset: |
|
| 6238 |
- for key in maybe_unset: |
|
| 6239 |
- config_global.pop(key, None) # type: ignore[misc] |
|
| 6240 |
- config.setdefault("global", {}).update(setting)
|
|
| 6241 |
- assert _types.is_vault_config(config) |
|
| 6242 |
- # NOTE: This relies on settings_obj containing only the keys |
|
| 6243 |
- # "length", "repeat", "upper", "lower", "number", "space", |
|
| 6244 |
- # "dash" and "symbol". |
|
| 6245 |
- result = self.runner.invoke( |
|
| 6246 |
- cli.derivepassphrase_vault, |
|
| 6247 |
- [ |
|
| 6248 |
- "--config", |
|
| 6249 |
- "--overwrite-existing" if overwrite else "--merge-existing", |
|
| 6250 |
- ] |
|
| 6251 |
- + [f"--unset={key}" for key in maybe_unset]
|
|
| 6252 |
- + [ |
|
| 6253 |
- f"--{key}={value}"
|
|
| 6254 |
- for key, value in setting.items() |
|
| 6255 |
- if key in VALID_PROPERTIES |
|
| 6256 |
- ], |
|
| 6257 |
- catch_exceptions=False, |
|
| 6258 |
- ) |
|
| 6259 |
- assert result.clean_exit(empty_stderr=False) |
|
| 6260 |
- assert cli_helpers.load_config() == config |
|
| 6261 |
- return config |
|
| 6262 |
- |
|
| 6263 |
- @stateful.rule( |
|
| 6264 |
- target=configuration, |
|
| 6265 |
- config=configuration, |
|
| 6266 |
- service=strategies.sampled_from(KNOWN_SERVICES), |
|
| 6267 |
- setting=setting.filter(bool), |
|
| 6268 |
- maybe_unset=strategies.sets( |
|
| 6269 |
- strategies.sampled_from(VALID_PROPERTIES), |
|
| 6270 |
- max_size=3, |
|
| 6271 |
- ), |
|
| 6272 |
- overwrite=strategies.booleans(), |
|
| 6273 |
- ) |
|
| 6274 |
- def set_service( |
|
| 6275 |
- self, |
|
| 6276 |
- config: _types.VaultConfig, |
|
| 6277 |
- service: str, |
|
| 6278 |
- setting: _types.VaultConfigServicesSettings, |
|
| 6279 |
- maybe_unset: set[str], |
|
| 6280 |
- overwrite: bool, |
|
| 6281 |
- ) -> _types.VaultConfig: |
|
| 6282 |
- """Set the named service settings for a configuration. |
|
| 6283 |
- |
|
| 6284 |
- Args: |
|
| 6285 |
- config: |
|
| 6286 |
- The configuration to edit. |
|
| 6287 |
- service: |
|
| 6288 |
- The name of the service to set. |
|
| 6289 |
- setting: |
|
| 6290 |
- The new service settings. |
|
| 6291 |
- maybe_unset: |
|
| 6292 |
- Settings keys to additionally unset, if not already |
|
| 6293 |
- present in the new settings. Corresponds to the |
|
| 6294 |
- `--unset` command-line argument. |
|
| 6295 |
- overwrite: |
|
| 6296 |
- Overwrite the settings object if true, or merge if |
|
| 6297 |
- false. Corresponds to the `--overwrite-existing` and |
|
| 6298 |
- `--merge-existing` command-line arguments. |
|
| 6299 |
- |
|
| 6300 |
- Returns: |
|
| 6301 |
- The amended configuration. |
|
| 6302 |
- |
|
| 6303 |
- """ |
|
| 6304 |
- cli_helpers.save_config(config) |
|
| 6305 |
- config_service = config["services"].get(service, {})
|
|
| 6306 |
- maybe_unset = set(maybe_unset) - setting.keys() |
|
| 6307 |
- if overwrite: |
|
| 6308 |
- config["services"][service] = config_service = {}
|
|
| 6309 |
- elif maybe_unset: |
|
| 6310 |
- for key in maybe_unset: |
|
| 6311 |
- config_service.pop(key, None) # type: ignore[misc] |
|
| 6312 |
- config["services"].setdefault(service, {}).update(setting)
|
|
| 6313 |
- assert _types.is_vault_config(config) |
|
| 6314 |
- # NOTE: This relies on settings_obj containing only the keys |
|
| 6315 |
- # "length", "repeat", "upper", "lower", "number", "space", |
|
| 6316 |
- # "dash" and "symbol". |
|
| 6317 |
- result = self.runner.invoke( |
|
| 6318 |
- cli.derivepassphrase_vault, |
|
| 6319 |
- [ |
|
| 6320 |
- "--config", |
|
| 6321 |
- "--overwrite-existing" if overwrite else "--merge-existing", |
|
| 6322 |
- ] |
|
| 6323 |
- + [f"--unset={key}" for key in maybe_unset]
|
|
| 6324 |
- + [ |
|
| 6325 |
- f"--{key}={value}"
|
|
| 6326 |
- for key, value in setting.items() |
|
| 6327 |
- if key in VALID_PROPERTIES |
|
| 6328 |
- ] |
|
| 6329 |
- + ["--", service], |
|
| 6330 |
- catch_exceptions=False, |
|
| 6331 |
- ) |
|
| 6332 |
- assert result.clean_exit(empty_stderr=False) |
|
| 6333 |
- assert cli_helpers.load_config() == config |
|
| 6334 |
- return config |
|
| 6335 |
- |
|
| 6336 |
- @stateful.rule( |
|
| 6337 |
- target=configuration, |
|
| 6338 |
- config=configuration, |
|
| 6339 |
- ) |
|
| 6340 |
- def purge_global( |
|
| 6341 |
- self, |
|
| 6342 |
- config: _types.VaultConfig, |
|
| 6343 |
- ) -> _types.VaultConfig: |
|
| 6344 |
- """Purge the globals of a configuration. |
|
| 6345 |
- |
|
| 6346 |
- Args: |
|
| 6347 |
- config: |
|
| 6348 |
- The configuration to edit. |
|
| 6349 |
- |
|
| 6350 |
- Returns: |
|
| 6351 |
- The pruned configuration. |
|
| 6352 |
- |
|
| 6353 |
- """ |
|
| 6354 |
- cli_helpers.save_config(config) |
|
| 6355 |
- config.pop("global", None)
|
|
| 6356 |
- result = self.runner.invoke( |
|
| 6357 |
- cli.derivepassphrase_vault, |
|
| 6358 |
- ["--delete-globals"], |
|
| 6359 |
- input="y", |
|
| 6360 |
- catch_exceptions=False, |
|
| 6361 |
- ) |
|
| 6362 |
- assert result.clean_exit(empty_stderr=False) |
|
| 6363 |
- assert cli_helpers.load_config() == config |
|
| 6364 |
- return config |
|
| 6365 |
- |
|
| 6366 |
- @stateful.rule( |
|
| 6367 |
- target=configuration, |
|
| 6368 |
- config_and_service=configuration.filter( |
|
| 6369 |
- lambda c: bool(c["services"]) |
|
| 6370 |
- ).flatmap( |
|
| 6371 |
- lambda c: strategies.tuples( |
|
| 6372 |
- strategies.just(c), |
|
| 6373 |
- strategies.sampled_from(tuple(c["services"].keys())), |
|
| 6374 |
- ) |
|
| 6375 |
- ), |
|
| 6376 |
- ) |
|
| 6377 |
- def purge_service( |
|
| 6378 |
- self, |
|
| 6379 |
- config_and_service: tuple[_types.VaultConfig, str], |
|
| 6380 |
- ) -> _types.VaultConfig: |
|
| 6381 |
- """Purge the settings of a named service in a configuration. |
|
| 6382 |
- |
|
| 6383 |
- Args: |
|
| 6384 |
- config_and_service: |
|
| 6385 |
- A 2-tuple containing the configuration to edit, and the |
|
| 6386 |
- service name to purge. |
|
| 6387 |
- |
|
| 6388 |
- Returns: |
|
| 6389 |
- The pruned configuration. |
|
| 6390 |
- |
|
| 6391 |
- """ |
|
| 6392 |
- config, service = config_and_service |
|
| 6393 |
- cli_helpers.save_config(config) |
|
| 6394 |
- config["services"].pop(service, None) |
|
| 6395 |
- result = self.runner.invoke( |
|
| 6396 |
- cli.derivepassphrase_vault, |
|
| 6397 |
- ["--delete", "--", service], |
|
| 6398 |
- input="y", |
|
| 6399 |
- catch_exceptions=False, |
|
| 6400 |
- ) |
|
| 6401 |
- assert result.clean_exit(empty_stderr=False) |
|
| 6402 |
- assert cli_helpers.load_config() == config |
|
| 6403 |
- return config |
|
| 6404 |
- |
|
| 6405 |
- @stateful.rule( |
|
| 6406 |
- target=configuration, |
|
| 6407 |
- config=configuration, |
|
| 6408 |
- ) |
|
| 6409 |
- def purge_all( |
|
| 6410 |
- self, |
|
| 6411 |
- config: _types.VaultConfig, |
|
| 6412 |
- ) -> _types.VaultConfig: |
|
| 6413 |
- """Purge the entire configuration. |
|
| 6414 |
- |
|
| 6415 |
- Args: |
|
| 6416 |
- config: |
|
| 6417 |
- The configuration to edit. |
|
| 6418 |
- |
|
| 6419 |
- Returns: |
|
| 6420 |
- The empty configuration. |
|
| 6421 |
- |
|
| 6422 |
- """ |
|
| 6423 |
- cli_helpers.save_config(config) |
|
| 6424 |
- config = {"services": {}}
|
|
| 6425 |
- result = self.runner.invoke( |
|
| 6426 |
- cli.derivepassphrase_vault, |
|
| 6427 |
- ["--clear"], |
|
| 6428 |
- input="y", |
|
| 6429 |
- catch_exceptions=False, |
|
| 6430 |
- ) |
|
| 6431 |
- assert result.clean_exit(empty_stderr=False) |
|
| 6432 |
- assert cli_helpers.load_config() == config |
|
| 6433 |
- return config |
|
| 6434 |
- |
|
| 6435 |
- @stateful.rule( |
|
| 6436 |
- target=configuration, |
|
| 6437 |
- base_config=configuration, |
|
| 6438 |
- config_to_import=configuration, |
|
| 6439 |
- overwrite=strategies.booleans(), |
|
| 6440 |
- ) |
|
| 6441 |
- def import_configuration( |
|
| 6442 |
- self, |
|
| 6443 |
- base_config: _types.VaultConfig, |
|
| 6444 |
- config_to_import: _types.VaultConfig, |
|
| 6445 |
- overwrite: bool, |
|
| 6446 |
- ) -> _types.VaultConfig: |
|
| 6447 |
- """Import the given configuration into a base configuration. |
|
| 6448 |
- |
|
| 6449 |
- Args: |
|
| 6450 |
- base_config: |
|
| 6451 |
- The configuration to import into. |
|
| 6452 |
- config_to_import: |
|
| 6453 |
- The configuration to import. |
|
| 6454 |
- overwrite: |
|
| 6455 |
- Overwrite the base configuration if true, or merge if |
|
| 6456 |
- false. Corresponds to the `--overwrite-existing` and |
|
| 6457 |
- `--merge-existing` command-line arguments. |
|
| 6458 |
- |
|
| 6459 |
- Returns: |
|
| 6460 |
- The imported or merged configuration. |
|
| 6461 |
- |
|
| 6462 |
- """ |
|
| 6463 |
- cli_helpers.save_config(base_config) |
|
| 6464 |
- config = ( |
|
| 6465 |
- self.fold_configs(config_to_import, base_config) |
|
| 6466 |
- if not overwrite |
|
| 6467 |
- else config_to_import |
|
| 6468 |
- ) |
|
| 6469 |
- assert _types.is_vault_config(config) |
|
| 6470 |
- result = self.runner.invoke( |
|
| 6471 |
- cli.derivepassphrase_vault, |
|
| 6472 |
- ["--import", "-"] |
|
| 6473 |
- + (["--overwrite-existing"] if overwrite else []), |
|
| 6474 |
- input=json.dumps(config_to_import), |
|
| 6475 |
- catch_exceptions=False, |
|
| 6476 |
- ) |
|
| 6477 |
- assert result.clean_exit(empty_stderr=False) |
|
| 6478 |
- assert cli_helpers.load_config() == config |
|
| 6479 |
- return config |
|
| 6480 |
- |
|
| 6481 |
- def teardown(self) -> None: |
|
| 6482 |
- """Upon teardown, exit all contexts entered in `__init__`.""" |
|
| 6483 |
- self.exit_stack.close() |
|
| 6484 |
- |
|
| 6485 |
- |
|
| 6486 |
-TestConfigManagement = ConfigManagementStateMachine.TestCase |
|
| 6487 |
-"""The [`unittest.TestCase`][] class that will actually be run.""" |
|
| 6488 |
- |
|
| 6489 |
- |
|
| 6490 |
-class FakeConfigurationMutexAction(NamedTuple): |
|
| 6491 |
- """An action/a step in the [`FakeConfigurationMutexStateMachine`][]. |
|
| 6492 |
- |
|
| 6493 |
- Attributes: |
|
| 6494 |
- command_line: |
|
| 6495 |
- The command-line for `derivepassphrase vault` to execute. |
|
| 6496 |
- input: |
|
| 6497 |
- The input to this command. |
|
| 6498 |
- |
|
| 6499 |
- """ |
|
| 6500 |
- |
|
| 6501 |
- command_line: list[str] |
|
| 6502 |
- """""" |
|
| 6503 |
- input: str | bytes | None = None |
|
| 6504 |
- """""" |
|
| 6505 |
- |
|
| 6506 |
- |
|
| 6507 |
-def run_actions_handler( |
|
| 6508 |
- id_num: int, |
|
| 6509 |
- action: FakeConfigurationMutexAction, |
|
| 6510 |
- *, |
|
| 6511 |
- input_queue: queue.Queue, |
|
| 6512 |
- output_queue: queue.Queue, |
|
| 6513 |
- timeout: int, |
|
| 6514 |
-) -> None: |
|
| 6515 |
- """Prepare the faked mutex, then run `action`. |
|
| 6516 |
- |
|
| 6517 |
- This is a top-level handler function -- to be used in a new |
|
| 6518 |
- [`multiprocessing.Process`][] -- to run a single action from the |
|
| 6519 |
- [`FakeConfigurationMutexStateMachine`][]. Output from this function |
|
| 6520 |
- must be sent down the output queue instead of relying on the call |
|
| 6521 |
- stack. Additionally, because this runs in a separate process, we |
|
| 6522 |
- need to restart coverage tracking if it is currently running. |
|
| 6523 |
- |
|
| 6524 |
- Args: |
|
| 6525 |
- id_num: |
|
| 6526 |
- The internal ID of this subprocess. |
|
| 6527 |
- action: |
|
| 6528 |
- The action to execute. |
|
| 6529 |
- input_queue: |
|
| 6530 |
- The queue for data passed from the manager/parent process to |
|
| 6531 |
- this subprocess. |
|
| 6532 |
- output_queue: |
|
| 6533 |
- The queue for data passed from this subprocess to the |
|
| 6534 |
- manager/parent process. |
|
| 6535 |
- timeout: |
|
| 6536 |
- The maximum amount of time to wait for a data transfer along |
|
| 6537 |
- the input or the output queue. If exceeded, we exit |
|
| 6538 |
- immediately. |
|
| 6539 |
- |
|
| 6540 |
- """ |
|
| 6541 |
- with pytest.MonkeyPatch.context() as monkeypatch: |
|
| 6542 |
- monkeypatch.setattr( |
|
| 6543 |
- cli_helpers, |
|
| 6544 |
- "configuration_mutex", |
|
| 6545 |
- lambda: FakeConfigurationMutexStateMachine.ConfigurationMutexStub( |
|
| 6546 |
- my_id=id_num, |
|
| 6547 |
- input_queue=input_queue, |
|
| 6548 |
- output_queue=output_queue, |
|
| 6549 |
- timeout=timeout, |
|
| 6550 |
- ), |
|
| 6551 |
- ) |
|
| 6552 |
- runner = tests.machinery.CliRunner(mix_stderr=False) |
|
| 6553 |
- try: |
|
| 6554 |
- result = runner.invoke( |
|
| 6555 |
- cli.derivepassphrase_vault, |
|
| 6556 |
- args=action.command_line, |
|
| 6557 |
- input=action.input, |
|
| 6558 |
- catch_exceptions=True, |
|
| 6559 |
- ) |
|
| 6560 |
- output_queue.put( |
|
| 6561 |
- FakeConfigurationMutexStateMachine.IPCMessage( |
|
| 6562 |
- id_num, |
|
| 6563 |
- "result", |
|
| 6564 |
- ( |
|
| 6565 |
- result.clean_exit(empty_stderr=False), |
|
| 6566 |
- copy.copy(result.stdout), |
|
| 6567 |
- copy.copy(result.stderr), |
|
| 6568 |
- ), |
|
| 6569 |
- ), |
|
| 6570 |
- block=True, |
|
| 6571 |
- timeout=timeout, |
|
| 6572 |
- ) |
|
| 6573 |
- except Exception as exc: # pragma: no cover # noqa: BLE001 |
|
| 6574 |
- output_queue.put( |
|
| 6575 |
- FakeConfigurationMutexStateMachine.IPCMessage( |
|
| 6576 |
- id_num, "exception", exc |
|
| 6577 |
- ), |
|
| 6578 |
- block=False, |
|
| 6579 |
- ) |
|
| 6580 |
- |
|
| 6581 |
- |
|
| 6582 |
-@hypothesis.settings( |
|
| 6583 |
- stateful_step_count=tests.machinery.hypothesis.get_concurrency_step_count(), |
|
| 6584 |
- deadline=None, |
|
| 6585 |
-) |
|
| 6586 |
-class FakeConfigurationMutexStateMachine(stateful.RuleBasedStateMachine): |
|
| 6587 |
- """A state machine simulating the (faked) configuration mutex. |
|
| 6588 |
- |
|
| 6589 |
- Generate an ordered set of concurrent writers to the |
|
| 6590 |
- derivepassphrase configuration, then test that the writers' accesses |
|
| 6591 |
- are serialized correctly, i.e., test that the writers correctly use |
|
| 6592 |
- the mutex to avoid concurrent accesses, under the assumption that |
|
| 6593 |
- the mutex itself is correctly implemented. |
|
| 6594 |
- |
|
| 6595 |
- We use a custom mutex implementation to both ensure that all writers |
|
| 6596 |
- attempt to lock the configuration at the same time and that the lock |
|
| 6597 |
- is granted in our desired order. This test is therefore independent |
|
| 6598 |
- of the actual (operating system-specific) mutex implementation in |
|
| 6599 |
- `derivepassphrase`. |
|
| 6600 |
- |
|
| 6601 |
- Attributes: |
|
| 6602 |
- setting: |
|
| 6603 |
- A bundle for single-service settings. |
|
| 6604 |
- configuration: |
|
| 6605 |
- A bundle for full vault configurations. |
|
| 6606 |
- |
|
| 6607 |
- """ |
|
| 6608 |
- |
|
| 6609 |
- class IPCMessage(NamedTuple): |
|
| 6610 |
- """A message for inter-process communication. |
|
| 6611 |
- |
|
| 6612 |
- Used by the configuration mutex stub class to affect/signal the |
|
| 6613 |
- control flow amongst the linked mutex clients. |
|
| 6614 |
- |
|
| 6615 |
- Attributes: |
|
| 6616 |
- child_id: |
|
| 6617 |
- The ID of the sending or receiving child process. |
|
| 6618 |
- message: |
|
| 6619 |
- One of "ready", "go", "config", "result" or "exception". |
|
| 6620 |
- payload: |
|
| 6621 |
- The (optional) message payload. |
|
| 6622 |
- |
|
| 6623 |
- """ |
|
| 6624 |
- |
|
| 6625 |
- child_id: int |
|
| 6626 |
- """""" |
|
| 6627 |
- message: Literal["ready", "go", "config", "result", "exception"] |
|
| 6628 |
- """""" |
|
| 6629 |
- payload: object | None |
|
| 6630 |
- """""" |
|
| 6631 |
- |
|
| 6632 |
- class ConfigurationMutexStub(cli_helpers.ConfigurationMutex): |
|
| 6633 |
- """Configuration mutex subclass that enforces a locking order. |
|
| 6634 |
- |
|
| 6635 |
- Each configuration mutex stub object ("mutex client") has an
|
|
| 6636 |
- associated ID, and one read-only and one write-only pipe |
|
| 6637 |
- (actually: [`multiprocessing.Queue`][] objects) to the "manager" |
|
| 6638 |
- instance coordinating these stub objects. First, the mutex |
|
| 6639 |
- client signals readiness, then the manager signals when the |
|
| 6640 |
- mutex shall be considered "acquired", then finally the mutex |
|
| 6641 |
- client sends the result back (simultaneously releasing the mutex |
|
| 6642 |
- again). The manager may optionally send an abort signal if the |
|
| 6643 |
- operations take too long. |
|
| 6644 |
- |
|
| 6645 |
- This subclass also copies the effective vault configuration |
|
| 6646 |
- to `intermediate_configs` upon releasing the lock. |
|
| 6647 |
- |
|
| 6648 |
- """ |
|
| 6649 |
- |
|
| 6650 |
- def __init__( |
|
| 6651 |
- self, |
|
| 6652 |
- *, |
|
| 6653 |
- my_id: int, |
|
| 6654 |
- timeout: int, |
|
| 6655 |
- input_queue: queue.Queue[ |
|
| 6656 |
- FakeConfigurationMutexStateMachine.IPCMessage |
|
| 6657 |
- ], |
|
| 6658 |
- output_queue: queue.Queue[ |
|
| 6659 |
- FakeConfigurationMutexStateMachine.IPCMessage |
|
| 6660 |
- ], |
|
| 6661 |
- ) -> None: |
|
| 6662 |
- """Initialize this mutex client. |
|
| 6663 |
- |
|
| 6664 |
- Args: |
|
| 6665 |
- my_id: |
|
| 6666 |
- The ID of this client. |
|
| 6667 |
- timeout: |
|
| 6668 |
- The timeout for each get and put operation on the |
|
| 6669 |
- queues. |
|
| 6670 |
- input_queue: |
|
| 6671 |
- The message queue for IPC messages from the manager |
|
| 6672 |
- instance to this mutex client. |
|
| 6673 |
- output_queue: |
|
| 6674 |
- The message queue for IPC messages from this mutex |
|
| 6675 |
- client to the manager instance. |
|
| 6676 |
- |
|
| 6677 |
- """ |
|
| 6678 |
- super().__init__() |
|
| 6679 |
- |
|
| 6680 |
- def lock() -> None: |
|
| 6681 |
- """Simulate locking of the mutex. |
|
| 6682 |
- |
|
| 6683 |
- Issue a "ready" message, wait for a "go", then return. |
|
| 6684 |
- If an exception occurs, issue an "exception" message, |
|
| 6685 |
- then raise the exception. |
|
| 6686 |
- |
|
| 6687 |
- """ |
|
| 6688 |
- IPCMessage: TypeAlias = ( |
|
| 6689 |
- FakeConfigurationMutexStateMachine.IPCMessage |
|
| 6690 |
- ) |
|
| 6691 |
- try: |
|
| 6692 |
- output_queue.put( |
|
| 6693 |
- IPCMessage(my_id, "ready", None), |
|
| 6694 |
- block=True, |
|
| 6695 |
- timeout=timeout, |
|
| 6696 |
- ) |
|
| 6697 |
- ok = input_queue.get(block=True, timeout=timeout) |
|
| 6698 |
- if ok != IPCMessage(my_id, "go", None): # pragma: no cover |
|
| 6699 |
- output_queue.put( |
|
| 6700 |
- IPCMessage(my_id, "exception", ok), block=False |
|
| 6701 |
- ) |
|
| 6702 |
- raise ( |
|
| 6703 |
- ok[2] |
|
| 6704 |
- if isinstance(ok[2], BaseException) |
|
| 6705 |
- else RuntimeError(ok[2]) |
|
| 6706 |
- ) |
|
| 6707 |
- except (queue.Empty, queue.Full) as exc: # pragma: no cover |
|
| 6708 |
- output_queue.put( |
|
| 6709 |
- IPCMessage(my_id, "exception", exc), block=False |
|
| 6710 |
- ) |
|
| 6711 |
- return |
|
| 6712 |
- |
|
| 6713 |
- def unlock() -> None: |
|
| 6714 |
- """Simulate unlocking of the mutex. |
|
| 6715 |
- |
|
| 6716 |
- Issue a "config" message, then return. If an exception |
|
| 6717 |
- occurs, issue an "exception" message, then raise the |
|
| 6718 |
- exception. |
|
| 6719 |
- |
|
| 6720 |
- """ |
|
| 6721 |
- IPCMessage: TypeAlias = ( |
|
| 6722 |
- FakeConfigurationMutexStateMachine.IPCMessage |
|
| 6723 |
- ) |
|
| 6724 |
- try: |
|
| 6725 |
- output_queue.put( |
|
| 6726 |
- IPCMessage( |
|
| 6727 |
- my_id, |
|
| 6728 |
- "config", |
|
| 6729 |
- copy.copy(cli_helpers.load_config()), |
|
| 6730 |
- ), |
|
| 6731 |
- block=True, |
|
| 6732 |
- timeout=timeout, |
|
| 6733 |
- ) |
|
| 6734 |
- except (queue.Empty, queue.Full) as exc: # pragma: no cover |
|
| 6735 |
- output_queue.put( |
|
| 6736 |
- IPCMessage(my_id, "exception", exc), block=False |
|
| 6737 |
- ) |
|
| 6738 |
- raise |
|
| 6739 |
- |
|
| 6740 |
- self.lock = lock |
|
| 6741 |
- self.unlock = unlock |
|
| 6742 |
- |
|
| 6743 |
- setting: stateful.Bundle[_types.VaultConfigServicesSettings] = ( |
|
| 6744 |
- stateful.Bundle("setting")
|
|
| 6745 |
- ) |
|
| 6746 |
- """""" |
|
| 6747 |
- configuration: stateful.Bundle[_types.VaultConfig] = stateful.Bundle( |
|
| 6748 |
- "configuration" |
|
| 6749 |
- ) |
|
| 6750 |
- """""" |
|
| 6751 |
- |
|
| 6752 |
- def __init__(self, *args: Any, **kwargs: Any) -> None: |
|
| 6753 |
- """Initialize the state machine.""" |
|
| 6754 |
- super().__init__(*args, **kwargs) |
|
| 6755 |
- self.actions: list[FakeConfigurationMutexAction] = [] |
|
| 6756 |
- # Determine the step count by poking around in the hypothesis |
|
| 6757 |
- # internals. As this isn't guaranteed to be stable, turn off |
|
| 6758 |
- # coverage. |
|
| 6759 |
- try: # pragma: no cover |
|
| 6760 |
- settings: hypothesis.settings | None |
|
| 6761 |
- settings = FakeConfigurationMutexStateMachine.TestCase.settings |
|
| 6762 |
- except AttributeError: # pragma: no cover |
|
| 6763 |
- settings = None |
|
| 6764 |
- self.step_count = ( |
|
| 6765 |
- tests.machinery.hypothesis.get_concurrency_step_count(settings) |
|
| 6766 |
- ) |
|
| 6767 |
- |
|
| 6768 |
- @stateful.initialize( |
|
| 6769 |
- target=configuration, |
|
| 6770 |
- configs=strategies.lists( |
|
| 6771 |
- vault_full_config(), |
|
| 6772 |
- min_size=8, |
|
| 6773 |
- max_size=8, |
|
| 6774 |
- ), |
|
| 6775 |
- ) |
|
| 6776 |
- def declare_initial_configs( |
|
| 6777 |
- self, |
|
| 6778 |
- configs: list[_types.VaultConfig], |
|
| 6779 |
- ) -> stateful.MultipleResults[_types.VaultConfig]: |
|
| 6780 |
- """Initialize the configuration bundle with eight configurations.""" |
|
| 6781 |
- return stateful.multiple(*configs) |
|
| 6782 |
- |
|
| 6783 |
- @stateful.initialize( |
|
| 6784 |
- target=setting, |
|
| 6785 |
- configs=strategies.lists( |
|
| 6786 |
- vault_full_config(), |
|
| 6787 |
- min_size=4, |
|
| 6788 |
- max_size=4, |
|
| 6789 |
- ), |
|
| 6790 |
- ) |
|
| 6791 |
- def extract_initial_settings( |
|
| 6792 |
- self, |
|
| 6793 |
- configs: list[_types.VaultConfig], |
|
| 6794 |
- ) -> stateful.MultipleResults[_types.VaultConfigServicesSettings]: |
|
| 6795 |
- """Initialize the settings bundle with four service settings.""" |
|
| 6796 |
- settings: list[_types.VaultConfigServicesSettings] = [] |
|
| 6797 |
- for c in configs: |
|
| 6798 |
- settings.extend(c["services"].values()) |
|
| 6799 |
- return stateful.multiple(*map(copy.deepcopy, settings)) |
|
| 6800 |
- |
|
| 6801 |
- @stateful.initialize( |
|
| 6802 |
- config=vault_full_config(), |
|
| 6803 |
- ) |
|
| 6804 |
- def declare_initial_action( |
|
| 6805 |
- self, |
|
| 6806 |
- config: _types.VaultConfig, |
|
| 6807 |
- ) -> None: |
|
| 6808 |
- """Initialize the actions bundle from the configuration bundle. |
|
| 6809 |
- |
|
| 6810 |
- This is roughly comparable to the |
|
| 6811 |
- [`add_import_configuration_action`][] general rule, but adding |
|
| 6812 |
- it as a separate initialize rule avoids having to guard every |
|
| 6813 |
- other action-amending rule against empty action sequences, which |
|
| 6814 |
- would discard huge portions of the rule selection search space |
|
| 6815 |
- and thus trigger loads of hypothesis health check warnings. |
|
| 6816 |
- |
|
| 6817 |
- """ |
|
| 6818 |
- command_line = ["--import", "-", "--overwrite-existing"] |
|
| 6819 |
- input = json.dumps(config) # noqa: A001 |
|
| 6820 |
- hypothesis.note(f"# {command_line = }, {input = }")
|
|
| 6821 |
- action = FakeConfigurationMutexAction( |
|
| 6822 |
- command_line=command_line, input=input |
|
| 6823 |
- ) |
|
| 6824 |
- self.actions.append(action) |
|
| 6825 |
- |
|
| 6826 |
- @stateful.rule( |
|
| 6827 |
- setting=setting.filter(bool), |
|
| 6828 |
- maybe_unset=strategies.sets( |
|
| 6829 |
- strategies.sampled_from(VALID_PROPERTIES), |
|
| 6830 |
- max_size=3, |
|
| 6831 |
- ), |
|
| 6832 |
- overwrite=strategies.booleans(), |
|
| 6833 |
- ) |
|
| 6834 |
- def add_set_globals_action( |
|
| 6835 |
- self, |
|
| 6836 |
- setting: _types.VaultConfigGlobalSettings, |
|
| 6837 |
- maybe_unset: set[str], |
|
| 6838 |
- overwrite: bool, |
|
| 6839 |
- ) -> None: |
|
| 6840 |
- """Set the global settings of a configuration. |
|
| 6841 |
- |
|
| 6842 |
- Args: |
|
| 6843 |
- setting: |
|
| 6844 |
- The new global settings. |
|
| 6845 |
- maybe_unset: |
|
| 6846 |
- Settings keys to additionally unset, if not already |
|
| 6847 |
- present in the new settings. Corresponds to the |
|
| 6848 |
- `--unset` command-line argument. |
|
| 6849 |
- overwrite: |
|
| 6850 |
- Overwrite the settings object if true, or merge if |
|
| 6851 |
- false. Corresponds to the `--overwrite-existing` and |
|
| 6852 |
- `--merge-existing` command-line arguments. |
|
| 6853 |
- |
|
| 6854 |
- """ |
|
| 6855 |
- maybe_unset = set(maybe_unset) - setting.keys() |
|
| 6856 |
- command_line = ( |
|
| 6857 |
- [ |
|
| 6858 |
- "--config", |
|
| 6859 |
- "--overwrite-existing" if overwrite else "--merge-existing", |
|
| 6860 |
- ] |
|
| 6861 |
- + [f"--unset={key}" for key in maybe_unset]
|
|
| 6862 |
- + [ |
|
| 6863 |
- f"--{key}={value}"
|
|
| 6864 |
- for key, value in setting.items() |
|
| 6865 |
- if key in VALID_PROPERTIES |
|
| 6866 |
- ] |
|
| 6867 |
- ) |
|
| 6868 |
- input = None # noqa: A001 |
|
| 6869 |
- hypothesis.note(f"# {command_line = }, {input = }")
|
|
| 6870 |
- action = FakeConfigurationMutexAction( |
|
| 6871 |
- command_line=command_line, input=input |
|
| 6872 |
- ) |
|
| 6873 |
- self.actions.append(action) |
|
| 6874 |
- |
|
| 6875 |
- @stateful.rule( |
|
| 6876 |
- service=strategies.sampled_from(KNOWN_SERVICES), |
|
| 6877 |
- setting=setting.filter(bool), |
|
| 6878 |
- maybe_unset=strategies.sets( |
|
| 6879 |
- strategies.sampled_from(VALID_PROPERTIES), |
|
| 6880 |
- max_size=3, |
|
| 6881 |
- ), |
|
| 6882 |
- overwrite=strategies.booleans(), |
|
| 6883 |
- ) |
|
| 6884 |
- def add_set_service_action( |
|
| 6885 |
- self, |
|
| 6886 |
- service: str, |
|
| 6887 |
- setting: _types.VaultConfigServicesSettings, |
|
| 6888 |
- maybe_unset: set[str], |
|
| 6889 |
- overwrite: bool, |
|
| 6890 |
- ) -> None: |
|
| 6891 |
- """Set the named service settings for a configuration. |
|
| 6892 |
- |
|
| 6893 |
- Args: |
|
| 6894 |
- service: |
|
| 6895 |
- The name of the service to set. |
|
| 6896 |
- setting: |
|
| 6897 |
- The new service settings. |
|
| 6898 |
- maybe_unset: |
|
| 6899 |
- Settings keys to additionally unset, if not already |
|
| 6900 |
- present in the new settings. Corresponds to the |
|
| 6901 |
- `--unset` command-line argument. |
|
| 6902 |
- overwrite: |
|
| 6903 |
- Overwrite the settings object if true, or merge if |
|
| 6904 |
- false. Corresponds to the `--overwrite-existing` and |
|
| 6905 |
- `--merge-existing` command-line arguments. |
|
| 6906 |
- |
|
| 6907 |
- """ |
|
| 6908 |
- maybe_unset = set(maybe_unset) - setting.keys() |
|
| 6909 |
- command_line = ( |
|
| 6910 |
- [ |
|
| 6911 |
- "--config", |
|
| 6912 |
- "--overwrite-existing" if overwrite else "--merge-existing", |
|
| 6913 |
- ] |
|
| 6914 |
- + [f"--unset={key}" for key in maybe_unset]
|
|
| 6915 |
- + [ |
|
| 6916 |
- f"--{key}={value}"
|
|
| 6917 |
- for key, value in setting.items() |
|
| 6918 |
- if key in VALID_PROPERTIES |
|
| 6919 |
- ] |
|
| 6920 |
- + ["--", service] |
|
| 6921 |
- ) |
|
| 6922 |
- input = None # noqa: A001 |
|
| 6923 |
- hypothesis.note(f"# {command_line = }, {input = }")
|
|
| 6924 |
- action = FakeConfigurationMutexAction( |
|
| 6925 |
- command_line=command_line, input=input |
|
| 6926 |
- ) |
|
| 6927 |
- self.actions.append(action) |
|
| 6928 |
- |
|
| 6929 |
- @stateful.rule() |
|
| 6930 |
- def add_purge_global_action( |
|
| 6931 |
- self, |
|
| 6932 |
- ) -> None: |
|
| 6933 |
- """Purge the globals of a configuration.""" |
|
| 6934 |
- command_line = ["--delete-globals"] |
|
| 6935 |
- input = None # 'y' # noqa: A001 |
|
| 6936 |
- hypothesis.note(f"# {command_line = }, {input = }")
|
|
| 6937 |
- action = FakeConfigurationMutexAction( |
|
| 6938 |
- command_line=command_line, input=input |
|
| 6939 |
- ) |
|
| 6940 |
- self.actions.append(action) |
|
| 6941 |
- |
|
| 6942 |
- @stateful.rule( |
|
| 6943 |
- service=strategies.sampled_from(KNOWN_SERVICES), |
|
| 6944 |
- ) |
|
| 6945 |
- def add_purge_service_action( |
|
| 6946 |
- self, |
|
| 6947 |
- service: str, |
|
| 6948 |
- ) -> None: |
|
| 6949 |
- """Purge the settings of a named service in a configuration. |
|
| 6950 |
- |
|
| 6951 |
- Args: |
|
| 6952 |
- service: |
|
| 6953 |
- The service name to purge. |
|
| 6954 |
- |
|
| 6955 |
- """ |
|
| 6956 |
- command_line = ["--delete", "--", service] |
|
| 6957 |
- input = None # 'y' # noqa: A001 |
|
| 6958 |
- hypothesis.note(f"# {command_line = }, {input = }")
|
|
| 6959 |
- action = FakeConfigurationMutexAction( |
|
| 6960 |
- command_line=command_line, input=input |
|
| 6961 |
- ) |
|
| 6962 |
- self.actions.append(action) |
|
| 6963 |
- |
|
| 6964 |
- @stateful.rule() |
|
| 6965 |
- def add_purge_all_action( |
|
| 6966 |
- self, |
|
| 6967 |
- ) -> None: |
|
| 6968 |
- """Purge the entire configuration.""" |
|
| 6969 |
- command_line = ["--clear"] |
|
| 6970 |
- input = None # 'y' # noqa: A001 |
|
| 6971 |
- hypothesis.note(f"# {command_line = }, {input = }")
|
|
| 6972 |
- action = FakeConfigurationMutexAction( |
|
| 6973 |
- command_line=command_line, input=input |
|
| 6974 |
- ) |
|
| 6975 |
- self.actions.append(action) |
|
| 6976 |
- |
|
| 6977 |
- @stateful.rule( |
|
| 6978 |
- config_to_import=configuration, |
|
| 6979 |
- overwrite=strategies.booleans(), |
|
| 6980 |
- ) |
|
| 6981 |
- def add_import_configuration_action( |
|
| 6982 |
- self, |
|
| 6983 |
- config_to_import: _types.VaultConfig, |
|
| 6984 |
- overwrite: bool, |
|
| 6985 |
- ) -> None: |
|
| 6986 |
- """Import the given configuration. |
|
| 6987 |
- |
|
| 6988 |
- Args: |
|
| 6989 |
- config_to_import: |
|
| 6990 |
- The configuration to import. |
|
| 6991 |
- overwrite: |
|
| 6992 |
- Overwrite the base configuration if true, or merge if |
|
| 6993 |
- false. Corresponds to the `--overwrite-existing` and |
|
| 6994 |
- `--merge-existing` command-line arguments. |
|
| 6995 |
- |
|
| 6996 |
- """ |
|
| 6997 |
- command_line = ["--import", "-"] + ( |
|
| 6998 |
- ["--overwrite-existing"] if overwrite else [] |
|
| 6999 |
- ) |
|
| 7000 |
- input = json.dumps(config_to_import) # noqa: A001 |
|
| 7001 |
- hypothesis.note(f"# {command_line = }, {input = }")
|
|
| 7002 |
- action = FakeConfigurationMutexAction( |
|
| 7003 |
- command_line=command_line, input=input |
|
| 7004 |
- ) |
|
| 7005 |
- self.actions.append(action) |
|
| 7006 |
- |
|
| 7007 |
- @stateful.precondition(lambda self: len(self.actions) > 0) |
|
| 7008 |
- @stateful.invariant() |
|
| 7009 |
- def run_actions( # noqa: C901 |
|
| 7010 |
- self, |
|
| 7011 |
- ) -> None: |
|
| 7012 |
- """Run the actions, serially and concurrently. |
|
| 7013 |
- |
|
| 7014 |
- Run the actions once serially, then once more concurrently with |
|
| 7015 |
- the faked configuration mutex, and assert that both runs yield |
|
| 7016 |
- identical intermediate and final results. |
|
| 7017 |
- |
|
| 7018 |
- We must run the concurrent version in processes, not threads or |
|
| 7019 |
- Python async functions, because the `click` testing machinery |
|
| 7020 |
- manipulates global properties (e.g. the standard I/O streams, |
|
| 7021 |
- the current directory, and the environment), and we require this |
|
| 7022 |
- manipulation to happen in a time-overlapped manner. |
|
| 7023 |
- |
|
| 7024 |
- However, running multiple processes increases the risk of the |
|
| 7025 |
- operating system imposing process count or memory limits on us. |
|
| 7026 |
- We therefore skip the test as a whole if we fail to start a new |
|
| 7027 |
- process due to lack of necessary resources (memory, processes, |
|
| 7028 |
- or open file descriptors). |
|
| 7029 |
- |
|
| 7030 |
- """ |
|
| 7031 |
- if not TYPE_CHECKING: # pragma: no branch |
|
| 7032 |
- multiprocessing = pytest.importorskip("multiprocessing")
|
|
| 7033 |
- IPCMessage: TypeAlias = FakeConfigurationMutexStateMachine.IPCMessage |
|
| 7034 |
- intermediate_configs: dict[int, _types.VaultConfig] = {}
|
|
| 7035 |
- intermediate_results: dict[ |
|
| 7036 |
- int, tuple[bool, str | None, str | None] |
|
| 7037 |
- ] = {}
|
|
| 7038 |
- true_configs: dict[int, _types.VaultConfig] = {}
|
|
| 7039 |
- true_results: dict[int, tuple[bool, str | None, str | None]] = {}
|
|
| 7040 |
- timeout = 30 # Hopefully slow enough to accomodate The Annoying OS. |
|
| 7041 |
- actions = self.actions |
|
| 7042 |
- mp = multiprocessing.get_context() |
|
| 7043 |
- # Coverage tracking writes coverage data to the current working |
|
| 7044 |
- # directory, but because the subprocesses are spawned within the |
|
| 7045 |
- # `tests.machinery.pytest.isolated_vault_config` context manager, their starting |
|
| 7046 |
- # working directory is the isolated one, not the original one. |
|
| 7047 |
- orig_cwd = pathlib.Path.cwd() |
|
| 7048 |
- |
|
| 7049 |
- fatal_process_creation_errnos = {
|
|
| 7050 |
- # Specified by POSIX for fork(3). |
|
| 7051 |
- errno.ENOMEM, |
|
| 7052 |
- # Specified by POSIX for fork(3). |
|
| 7053 |
- errno.EAGAIN, |
|
| 7054 |
- # Specified by Linux/glibc for fork(3) |
|
| 7055 |
- getattr(errno, "ENOSYS", errno.ENOMEM), |
|
| 7056 |
- # Specified by POSIX for posix_spawn(3). |
|
| 7057 |
- errno.EINVAL, |
|
| 7058 |
- } |
|
| 7059 |
- |
|
| 7060 |
- hypothesis.note(f"# {actions = }")
|
|
| 7061 |
- |
|
| 7062 |
- stack = contextlib.ExitStack() |
|
| 7063 |
- with stack: |
|
| 7064 |
- runner = tests.machinery.CliRunner(mix_stderr=False) |
|
| 7065 |
- monkeypatch = stack.enter_context(pytest.MonkeyPatch.context()) |
|
| 7066 |
- stack.enter_context( |
|
| 7067 |
- tests.machinery.pytest.isolated_vault_config( |
|
| 7068 |
- monkeypatch=monkeypatch, |
|
| 7069 |
- runner=runner, |
|
| 7070 |
- vault_config={"services": {}},
|
|
| 7071 |
- ) |
|
| 7072 |
- ) |
|
| 7073 |
- for i, action in enumerate(actions): |
|
| 7074 |
- result = runner.invoke( |
|
| 7075 |
- cli.derivepassphrase_vault, |
|
| 7076 |
- args=action.command_line, |
|
| 7077 |
- input=action.input, |
|
| 7078 |
- catch_exceptions=True, |
|
| 7079 |
- ) |
|
| 7080 |
- true_configs[i] = copy.copy(cli_helpers.load_config()) |
|
| 7081 |
- true_results[i] = ( |
|
| 7082 |
- result.clean_exit(empty_stderr=False), |
|
| 7083 |
- result.stdout, |
|
| 7084 |
- result.stderr, |
|
| 7085 |
- ) |
|
| 7086 |
- |
|
| 7087 |
- with stack: # noqa: PLR1702 |
|
| 7088 |
- runner = tests.machinery.CliRunner(mix_stderr=False) |
|
| 7089 |
- monkeypatch = stack.enter_context(pytest.MonkeyPatch.context()) |
|
| 7090 |
- stack.enter_context( |
|
| 7091 |
- tests.machinery.pytest.isolated_vault_config( |
|
| 7092 |
- monkeypatch=monkeypatch, |
|
| 7093 |
- runner=runner, |
|
| 7094 |
- vault_config={"services": {}},
|
|
| 7095 |
- ) |
|
| 7096 |
- ) |
|
| 7097 |
- |
|
| 7098 |
- child_output_queue: multiprocessing.Queue[IPCMessage] = mp.Queue() |
|
| 7099 |
- child_input_queues: list[ |
|
| 7100 |
- multiprocessing.Queue[IPCMessage] | None |
|
| 7101 |
- ] = [] |
|
| 7102 |
- processes: list[multiprocessing.process.BaseProcess] = [] |
|
| 7103 |
- processes_pending: set[multiprocessing.process.BaseProcess] = set() |
|
| 7104 |
- ready_wait: set[int] = set() |
|
| 7105 |
- |
|
| 7106 |
- try: |
|
| 7107 |
- for i, action in enumerate(actions): |
|
| 7108 |
- q: multiprocessing.Queue[IPCMessage] | None = mp.Queue() |
|
| 7109 |
- try: |
|
| 7110 |
- p: multiprocessing.process.BaseProcess = mp.Process( |
|
| 7111 |
- name=f"fake-mutex-action-{i:02d}",
|
|
| 7112 |
- target=run_actions_handler, |
|
| 7113 |
- kwargs={
|
|
| 7114 |
- "id_num": i, |
|
| 7115 |
- "timeout": timeout, |
|
| 7116 |
- "action": action, |
|
| 7117 |
- "input_queue": q, |
|
| 7118 |
- "output_queue": child_output_queue, |
|
| 7119 |
- }, |
|
| 7120 |
- daemon=False, |
|
| 7121 |
- ) |
|
| 7122 |
- p.start() |
|
| 7123 |
- except OSError as exc: # pragma: no cover |
|
| 7124 |
- if exc.errno in fatal_process_creation_errnos: |
|
| 7125 |
- pytest.skip( |
|
| 7126 |
- "cannot test mutex functionality due to " |
|
| 7127 |
- "lack of system resources for " |
|
| 7128 |
- "creating enough subprocesses" |
|
| 7129 |
- ) |
|
| 7130 |
- raise |
|
| 7131 |
- else: |
|
| 7132 |
- processes.append(p) |
|
| 7133 |
- processes_pending.add(p) |
|
| 7134 |
- child_input_queues.append(q) |
|
| 7135 |
- ready_wait.add(i) |
|
| 7136 |
- |
|
| 7137 |
- while processes_pending: |
|
| 7138 |
- try: |
|
| 7139 |
- self.mainloop( |
|
| 7140 |
- timeout=timeout, |
|
| 7141 |
- child_output_queue=child_output_queue, |
|
| 7142 |
- child_input_queues=child_input_queues, |
|
| 7143 |
- ready_wait=ready_wait, |
|
| 7144 |
- intermediate_configs=intermediate_configs, |
|
| 7145 |
- intermediate_results=intermediate_results, |
|
| 7146 |
- processes=processes, |
|
| 7147 |
- processes_pending=processes_pending, |
|
| 7148 |
- block=True, |
|
| 7149 |
- ) |
|
| 7150 |
- except Exception as exc: # pragma: no cover |
|
| 7151 |
- for i, q in enumerate(child_input_queues): |
|
| 7152 |
- if q: |
|
| 7153 |
- q.put(IPCMessage(i, "exception", exc)) |
|
| 7154 |
- for p in processes_pending: |
|
| 7155 |
- p.join(timeout=timeout) |
|
| 7156 |
- raise |
|
| 7157 |
- finally: |
|
| 7158 |
- try: |
|
| 7159 |
- while True: |
|
| 7160 |
- try: |
|
| 7161 |
- self.mainloop( |
|
| 7162 |
- timeout=timeout, |
|
| 7163 |
- child_output_queue=child_output_queue, |
|
| 7164 |
- child_input_queues=child_input_queues, |
|
| 7165 |
- ready_wait=ready_wait, |
|
| 7166 |
- intermediate_configs=intermediate_configs, |
|
| 7167 |
- intermediate_results=intermediate_results, |
|
| 7168 |
- processes=processes, |
|
| 7169 |
- processes_pending=processes_pending, |
|
| 7170 |
- block=False, |
|
| 7171 |
- ) |
|
| 7172 |
- except queue.Empty: |
|
| 7173 |
- break |
|
| 7174 |
- finally: |
|
| 7175 |
- # The subprocesses have this |
|
| 7176 |
- # `tests.machinery.pytest.isolated_vault_config` directory as their |
|
| 7177 |
- # startup and working directory, so systems like |
|
| 7178 |
- # coverage tracking write their data files to this |
|
| 7179 |
- # directory. We need to manually move them back to |
|
| 7180 |
- # the starting working directory if they are to |
|
| 7181 |
- # survive this test. |
|
| 7182 |
- for coverage_file in pathlib.Path.cwd().glob( |
|
| 7183 |
- ".coverage.*" |
|
| 7184 |
- ): |
|
| 7185 |
- shutil.move(coverage_file, orig_cwd) |
|
| 7186 |
- hypothesis.note( |
|
| 7187 |
- f"# {true_results = }, {intermediate_results = }, "
|
|
| 7188 |
- f"identical = {true_results == intermediate_results}"
|
|
| 7189 |
- ) |
|
| 7190 |
- hypothesis.note( |
|
| 7191 |
- f"# {true_configs = }, {intermediate_configs = }, "
|
|
| 7192 |
- f"identical = {true_configs == intermediate_configs}"
|
|
| 7193 |
- ) |
|
| 7194 |
- assert intermediate_results == true_results |
|
| 7195 |
- assert intermediate_configs == true_configs |
|
| 7196 |
- |
|
| 7197 |
- @staticmethod |
|
| 7198 |
- def mainloop( |
|
| 7199 |
- *, |
|
| 7200 |
- timeout: int, |
|
| 7201 |
- child_output_queue: multiprocessing.Queue[ |
|
| 7202 |
- FakeConfigurationMutexStateMachine.IPCMessage |
|
| 7203 |
- ], |
|
| 7204 |
- child_input_queues: list[ |
|
| 7205 |
- multiprocessing.Queue[ |
|
| 7206 |
- FakeConfigurationMutexStateMachine.IPCMessage |
|
| 7207 |
- ] |
|
| 7208 |
- | None |
|
| 7209 |
- ], |
|
| 7210 |
- ready_wait: set[int], |
|
| 7211 |
- intermediate_configs: dict[int, _types.VaultConfig], |
|
| 7212 |
- intermediate_results: dict[int, tuple[bool, str | None, str | None]], |
|
| 7213 |
- processes: list[multiprocessing.process.BaseProcess], |
|
| 7214 |
- processes_pending: set[multiprocessing.process.BaseProcess], |
|
| 7215 |
- block: bool = True, |
|
| 7216 |
- ) -> None: |
|
| 7217 |
- IPCMessage: TypeAlias = FakeConfigurationMutexStateMachine.IPCMessage |
|
| 7218 |
- msg = child_output_queue.get(block=block, timeout=timeout) |
|
| 7219 |
- # TODO(the-13th-letter): Rewrite using structural pattern |
|
| 7220 |
- # matching. |
|
| 7221 |
- # https://the13thletter.info/derivepassphrase/latest/pycompatibility/#after-eol-py3.9 |
|
| 7222 |
- if ( # pragma: no cover |
|
| 7223 |
- isinstance(msg, IPCMessage) |
|
| 7224 |
- and msg[1] == "exception" |
|
| 7225 |
- and isinstance(msg[2], Exception) |
|
| 7226 |
- ): |
|
| 7227 |
- e = msg[2] |
|
| 7228 |
- raise e |
|
| 7229 |
- if isinstance(msg, IPCMessage) and msg[1] == "ready": |
|
| 7230 |
- n = msg[0] |
|
| 7231 |
- ready_wait.remove(n) |
|
| 7232 |
- if not ready_wait: |
|
| 7233 |
- assert child_input_queues |
|
| 7234 |
- assert child_input_queues[0] |
|
| 7235 |
- child_input_queues[0].put( |
|
| 7236 |
- IPCMessage(0, "go", None), |
|
| 7237 |
- block=True, |
|
| 7238 |
- timeout=timeout, |
|
| 7239 |
- ) |
|
| 7240 |
- elif isinstance(msg, IPCMessage) and msg[1] == "config": |
|
| 7241 |
- n = msg[0] |
|
| 7242 |
- config = msg[2] |
|
| 7243 |
- intermediate_configs[n] = cast("_types.VaultConfig", config)
|
|
| 7244 |
- elif isinstance(msg, IPCMessage) and msg[1] == "result": |
|
| 7245 |
- n = msg[0] |
|
| 7246 |
- result_ = msg[2] |
|
| 7247 |
- result_tuple: tuple[bool, str | None, str | None] = cast( |
|
| 7248 |
- "tuple[bool, str | None, str | None]", result_ |
|
| 7249 |
- ) |
|
| 7250 |
- intermediate_results[n] = result_tuple |
|
| 7251 |
- child_input_queues[n] = None |
|
| 7252 |
- p = processes[n] |
|
| 7253 |
- p.join(timeout=timeout) |
|
| 7254 |
- assert not p.is_alive() |
|
| 7255 |
- processes_pending.remove(p) |
|
| 7256 |
- assert result_tuple[0], ( |
|
| 7257 |
- f"action #{n} exited with an error: {result_tuple!r}"
|
|
| 7258 |
- ) |
|
| 7259 |
- if n + 1 < len(processes): |
|
| 7260 |
- next_child_input_queue = child_input_queues[n + 1] |
|
| 7261 |
- assert next_child_input_queue |
|
| 7262 |
- next_child_input_queue.put( |
|
| 7263 |
- IPCMessage(n + 1, "go", None), |
|
| 7264 |
- block=True, |
|
| 7265 |
- timeout=timeout, |
|
| 7266 |
- ) |
|
| 7267 |
- else: |
|
| 7268 |
- raise AssertionError() |
|
| 7269 |
- |
|
| 7270 |
- |
|
| 7271 |
-TestFakedConfigurationMutex = ( |
|
| 7272 |
- tests.machinery.pytest.skip_if_no_multiprocessing_support( |
|
| 7273 |
- FakeConfigurationMutexStateMachine.TestCase |
|
| 7274 |
- ) |
|
| 7275 |
-) |
|
| 7276 |
-"""The [`unittest.TestCase`][] class that will actually be run.""" |
|
| 7277 |
- |
|
| 7278 |
- |
|
| 7279 | 5993 |
def completion_item( |
| 7280 | 5994 |
item: str | click.shell_completion.CompletionItem, |
| 7281 | 5995 |
) -> click.shell_completion.CompletionItem: |
| ... | ... |
@@ -0,0 +1,1319 @@ |
| 1 |
+# SPDX-FileCopyrightText: 2025 Marco Ricci <software@the13thletter.info> |
|
| 2 |
+# |
|
| 3 |
+# SPDX-License-Identifier: Zlib |
|
| 4 |
+ |
|
| 5 |
+from __future__ import annotations |
|
| 6 |
+ |
|
| 7 |
+import contextlib |
|
| 8 |
+import copy |
|
| 9 |
+import errno |
|
| 10 |
+import json |
|
| 11 |
+import pathlib |
|
| 12 |
+import queue |
|
| 13 |
+import shutil |
|
| 14 |
+from typing import TYPE_CHECKING, cast |
|
| 15 |
+ |
|
| 16 |
+import hypothesis |
|
| 17 |
+import pytest |
|
| 18 |
+from hypothesis import stateful, strategies |
|
| 19 |
+from typing_extensions import Any, NamedTuple, TypeAlias |
|
| 20 |
+ |
|
| 21 |
+import tests.data |
|
| 22 |
+import tests.data.callables |
|
| 23 |
+import tests.machinery |
|
| 24 |
+import tests.machinery.hypothesis |
|
| 25 |
+import tests.machinery.pytest |
|
| 26 |
+from derivepassphrase import _types, cli |
|
| 27 |
+from derivepassphrase._internals import cli_helpers |
|
| 28 |
+ |
|
| 29 |
+if TYPE_CHECKING: |
|
| 30 |
+ import multiprocessing |
|
| 31 |
+ from collections.abc import Iterable |
|
| 32 |
+ |
|
| 33 |
+ from typing_extensions import Literal |
|
| 34 |
+ |
|
| 35 |
+# All tests in this module are heavy-duty tests. |
|
| 36 |
+pytestmark = [tests.machinery.pytest.heavy_duty] |
|
| 37 |
+ |
|
| 38 |
+KNOWN_SERVICES = (tests.data.DUMMY_SERVICE, "email", "bank", "work") |
|
| 39 |
+"""Known service names. Used for the [`ConfigManagementStateMachine`][].""" |
|
| 40 |
+VALID_PROPERTIES = ( |
|
| 41 |
+ "length", |
|
| 42 |
+ "repeat", |
|
| 43 |
+ "upper", |
|
| 44 |
+ "lower", |
|
| 45 |
+ "number", |
|
| 46 |
+ "space", |
|
| 47 |
+ "dash", |
|
| 48 |
+ "symbol", |
|
| 49 |
+) |
|
| 50 |
+"""Known vault properties. Used for the [`ConfigManagementStateMachine`][].""" |
|
| 51 |
+ |
|
| 52 |
+ |
|
| 53 |
+def build_reduced_vault_config_settings( |
|
| 54 |
+ config: _types.VaultConfigServicesSettings, |
|
| 55 |
+ keys_to_prune: frozenset[str], |
|
| 56 |
+) -> _types.VaultConfigServicesSettings: |
|
| 57 |
+ """Return a service settings object with certain keys pruned. |
|
| 58 |
+ |
|
| 59 |
+ Args: |
|
| 60 |
+ config: |
|
| 61 |
+ The original service settings object. |
|
| 62 |
+ keys_to_prune: |
|
| 63 |
+ The keys to prune from the settings object. |
|
| 64 |
+ |
|
| 65 |
+ """ |
|
| 66 |
+ config2 = copy.deepcopy(config) |
|
| 67 |
+ for key in keys_to_prune: |
|
| 68 |
+ config2.pop(key, None) # type: ignore[misc] |
|
| 69 |
+ return config2 |
|
| 70 |
+ |
|
| 71 |
+ |
|
| 72 |
+SERVICES_STRATEGY = strategies.builds( |
|
| 73 |
+ build_reduced_vault_config_settings, |
|
| 74 |
+ tests.machinery.hypothesis.vault_full_service_config(), |
|
| 75 |
+ strategies.sets( |
|
| 76 |
+ strategies.sampled_from(VALID_PROPERTIES), |
|
| 77 |
+ max_size=7, |
|
| 78 |
+ ), |
|
| 79 |
+) |
|
| 80 |
+"""A hypothesis strategy to build incomplete service configurations.""" |
|
| 81 |
+ |
|
| 82 |
+ |
|
| 83 |
+def services_strategy() -> strategies.SearchStrategy[ |
|
| 84 |
+ _types.VaultConfigServicesSettings |
|
| 85 |
+]: |
|
| 86 |
+ """Return a strategy to build incomplete service configurations.""" |
|
| 87 |
+ return SERVICES_STRATEGY |
|
| 88 |
+ |
|
| 89 |
+ |
|
| 90 |
+def assemble_config( |
|
| 91 |
+ global_data: _types.VaultConfigGlobalSettings, |
|
| 92 |
+ service_data: list[tuple[str, _types.VaultConfigServicesSettings]], |
|
| 93 |
+) -> _types.VaultConfig: |
|
| 94 |
+ """Return a vault config using the global and service data.""" |
|
| 95 |
+ services_dict = dict(service_data) |
|
| 96 |
+ return ( |
|
| 97 |
+ {"global": global_data, "services": services_dict}
|
|
| 98 |
+ if global_data |
|
| 99 |
+ else {"services": services_dict}
|
|
| 100 |
+ ) |
|
| 101 |
+ |
|
| 102 |
+ |
|
| 103 |
+@strategies.composite |
|
| 104 |
+def draw_service_name_and_data( |
|
| 105 |
+ draw: hypothesis.strategies.DrawFn, |
|
| 106 |
+ num_entries: int, |
|
| 107 |
+) -> tuple[tuple[str, _types.VaultConfigServicesSettings], ...]: |
|
| 108 |
+ """Draw a service name and settings, as a hypothesis strategy. |
|
| 109 |
+ |
|
| 110 |
+ Will draw service names from [`KNOWN_SERVICES`][] and service |
|
| 111 |
+ settings via [`services_strategy`][]. |
|
| 112 |
+ |
|
| 113 |
+ Args: |
|
| 114 |
+ draw: |
|
| 115 |
+ The `draw` function, as provided for by hypothesis. |
|
| 116 |
+ num_entries: |
|
| 117 |
+ The number of services to draw. |
|
| 118 |
+ |
|
| 119 |
+ Returns: |
|
| 120 |
+ A sequence of pairs of service names and service settings. |
|
| 121 |
+ |
|
| 122 |
+ """ |
|
| 123 |
+ possible_services = list(KNOWN_SERVICES) |
|
| 124 |
+ selected_services: list[str] = [] |
|
| 125 |
+ for _ in range(num_entries): |
|
| 126 |
+ selected_services.append( |
|
| 127 |
+ draw(strategies.sampled_from(possible_services)) |
|
| 128 |
+ ) |
|
| 129 |
+ possible_services.remove(selected_services[-1]) |
|
| 130 |
+ return tuple( |
|
| 131 |
+ (service, draw(services_strategy())) for service in selected_services |
|
| 132 |
+ ) |
|
| 133 |
+ |
|
| 134 |
+ |
|
| 135 |
+VAULT_FULL_CONFIG = strategies.builds( |
|
| 136 |
+ assemble_config, |
|
| 137 |
+ services_strategy(), |
|
| 138 |
+ strategies.integers( |
|
| 139 |
+ min_value=2, |
|
| 140 |
+ max_value=4, |
|
| 141 |
+ ).flatmap(draw_service_name_and_data), |
|
| 142 |
+) |
|
| 143 |
+"""A hypothesis strategy to build full vault configurations.""" |
|
| 144 |
+ |
|
| 145 |
+ |
|
| 146 |
+def vault_full_config() -> strategies.SearchStrategy[_types.VaultConfig]: |
|
| 147 |
+ """Return a strategy to build full vault configurations.""" |
|
| 148 |
+ return VAULT_FULL_CONFIG |
|
| 149 |
+ |
|
| 150 |
+ |
|
| 151 |
+class ConfigManagementStateMachine(stateful.RuleBasedStateMachine): |
|
| 152 |
+ """A state machine recording changes in the vault configuration. |
|
| 153 |
+ |
|
| 154 |
+ Record possible configuration states in bundles, then in each rule, |
|
| 155 |
+ take a configuration and manipulate it somehow. |
|
| 156 |
+ |
|
| 157 |
+ Attributes: |
|
| 158 |
+ setting: |
|
| 159 |
+ A bundle for single-service settings. |
|
| 160 |
+ configuration: |
|
| 161 |
+ A bundle for full vault configurations. |
|
| 162 |
+ |
|
| 163 |
+ """ |
|
| 164 |
+ |
|
| 165 |
+ def __init__(self) -> None: |
|
| 166 |
+ """Initialize self, set up context managers and enter them.""" |
|
| 167 |
+ super().__init__() |
|
| 168 |
+ self.runner = tests.machinery.CliRunner(mix_stderr=False) |
|
| 169 |
+ self.exit_stack = contextlib.ExitStack().__enter__() |
|
| 170 |
+ self.monkeypatch = self.exit_stack.enter_context( |
|
| 171 |
+ pytest.MonkeyPatch().context() |
|
| 172 |
+ ) |
|
| 173 |
+ self.isolated_config = self.exit_stack.enter_context( |
|
| 174 |
+ tests.machinery.pytest.isolated_vault_config( |
|
| 175 |
+ monkeypatch=self.monkeypatch, |
|
| 176 |
+ runner=self.runner, |
|
| 177 |
+ vault_config={"services": {}},
|
|
| 178 |
+ ) |
|
| 179 |
+ ) |
|
| 180 |
+ |
|
| 181 |
+ setting: stateful.Bundle[_types.VaultConfigServicesSettings] = ( |
|
| 182 |
+ stateful.Bundle("setting")
|
|
| 183 |
+ ) |
|
| 184 |
+ """""" |
|
| 185 |
+ configuration: stateful.Bundle[_types.VaultConfig] = stateful.Bundle( |
|
| 186 |
+ "configuration" |
|
| 187 |
+ ) |
|
| 188 |
+ """""" |
|
| 189 |
+ |
|
| 190 |
+ @stateful.initialize( |
|
| 191 |
+ target=configuration, |
|
| 192 |
+ configs=strategies.lists( |
|
| 193 |
+ vault_full_config(), |
|
| 194 |
+ min_size=8, |
|
| 195 |
+ max_size=8, |
|
| 196 |
+ ), |
|
| 197 |
+ ) |
|
| 198 |
+ def declare_initial_configs( |
|
| 199 |
+ self, |
|
| 200 |
+ configs: Iterable[_types.VaultConfig], |
|
| 201 |
+ ) -> stateful.MultipleResults[_types.VaultConfig]: |
|
| 202 |
+ """Initialize the configuration bundle with eight configurations.""" |
|
| 203 |
+ return stateful.multiple(*configs) |
|
| 204 |
+ |
|
| 205 |
+ @stateful.initialize( |
|
| 206 |
+ target=setting, |
|
| 207 |
+ configs=strategies.lists( |
|
| 208 |
+ vault_full_config(), |
|
| 209 |
+ min_size=4, |
|
| 210 |
+ max_size=4, |
|
| 211 |
+ ), |
|
| 212 |
+ ) |
|
| 213 |
+ def extract_initial_settings( |
|
| 214 |
+ self, |
|
| 215 |
+ configs: list[_types.VaultConfig], |
|
| 216 |
+ ) -> stateful.MultipleResults[_types.VaultConfigServicesSettings]: |
|
| 217 |
+ """Initialize the settings bundle with four service settings.""" |
|
| 218 |
+ settings: list[_types.VaultConfigServicesSettings] = [] |
|
| 219 |
+ for c in configs: |
|
| 220 |
+ settings.extend(c["services"].values()) |
|
| 221 |
+ return stateful.multiple(*map(copy.deepcopy, settings)) |
|
| 222 |
+ |
|
| 223 |
+ @staticmethod |
|
| 224 |
+ def fold_configs( |
|
| 225 |
+ c1: _types.VaultConfig, c2: _types.VaultConfig |
|
| 226 |
+ ) -> _types.VaultConfig: |
|
| 227 |
+ """Fold `c1` into `c2`, overriding the latter.""" |
|
| 228 |
+ new_global_dict = c1.get("global", c2.get("global"))
|
|
| 229 |
+ if new_global_dict is not None: |
|
| 230 |
+ return {
|
|
| 231 |
+ "global": new_global_dict, |
|
| 232 |
+ "services": {**c2["services"], **c1["services"]},
|
|
| 233 |
+ } |
|
| 234 |
+ return {
|
|
| 235 |
+ "services": {**c2["services"], **c1["services"]},
|
|
| 236 |
+ } |
|
| 237 |
+ |
|
| 238 |
+ @stateful.rule( |
|
| 239 |
+ target=configuration, |
|
| 240 |
+ config=configuration, |
|
| 241 |
+ setting=setting.filter(bool), |
|
| 242 |
+ maybe_unset=strategies.sets( |
|
| 243 |
+ strategies.sampled_from(VALID_PROPERTIES), |
|
| 244 |
+ max_size=3, |
|
| 245 |
+ ), |
|
| 246 |
+ overwrite=strategies.booleans(), |
|
| 247 |
+ ) |
|
| 248 |
+ def set_globals( |
|
| 249 |
+ self, |
|
| 250 |
+ config: _types.VaultConfig, |
|
| 251 |
+ setting: _types.VaultConfigGlobalSettings, |
|
| 252 |
+ maybe_unset: set[str], |
|
| 253 |
+ overwrite: bool, |
|
| 254 |
+ ) -> _types.VaultConfig: |
|
| 255 |
+ """Set the global settings of a configuration. |
|
| 256 |
+ |
|
| 257 |
+ Args: |
|
| 258 |
+ config: |
|
| 259 |
+ The configuration to edit. |
|
| 260 |
+ setting: |
|
| 261 |
+ The new global settings. |
|
| 262 |
+ maybe_unset: |
|
| 263 |
+ Settings keys to additionally unset, if not already |
|
| 264 |
+ present in the new settings. Corresponds to the |
|
| 265 |
+ `--unset` command-line argument. |
|
| 266 |
+ overwrite: |
|
| 267 |
+ Overwrite the settings object if true, or merge if |
|
| 268 |
+ false. Corresponds to the `--overwrite-existing` and |
|
| 269 |
+ `--merge-existing` command-line arguments. |
|
| 270 |
+ |
|
| 271 |
+ Returns: |
|
| 272 |
+ The amended configuration. |
|
| 273 |
+ |
|
| 274 |
+ """ |
|
| 275 |
+ cli_helpers.save_config(config) |
|
| 276 |
+ config_global = config.get("global", {})
|
|
| 277 |
+ maybe_unset = set(maybe_unset) - setting.keys() |
|
| 278 |
+ if overwrite: |
|
| 279 |
+ config["global"] = config_global = {}
|
|
| 280 |
+ elif maybe_unset: |
|
| 281 |
+ for key in maybe_unset: |
|
| 282 |
+ config_global.pop(key, None) # type: ignore[misc] |
|
| 283 |
+ config.setdefault("global", {}).update(setting)
|
|
| 284 |
+ assert _types.is_vault_config(config) |
|
| 285 |
+ # NOTE: This relies on settings_obj containing only the keys |
|
| 286 |
+ # "length", "repeat", "upper", "lower", "number", "space", |
|
| 287 |
+ # "dash" and "symbol". |
|
| 288 |
+ result = self.runner.invoke( |
|
| 289 |
+ cli.derivepassphrase_vault, |
|
| 290 |
+ [ |
|
| 291 |
+ "--config", |
|
| 292 |
+ "--overwrite-existing" if overwrite else "--merge-existing", |
|
| 293 |
+ ] |
|
| 294 |
+ + [f"--unset={key}" for key in maybe_unset]
|
|
| 295 |
+ + [ |
|
| 296 |
+ f"--{key}={value}"
|
|
| 297 |
+ for key, value in setting.items() |
|
| 298 |
+ if key in VALID_PROPERTIES |
|
| 299 |
+ ], |
|
| 300 |
+ catch_exceptions=False, |
|
| 301 |
+ ) |
|
| 302 |
+ assert result.clean_exit(empty_stderr=False) |
|
| 303 |
+ assert cli_helpers.load_config() == config |
|
| 304 |
+ return config |
|
| 305 |
+ |
|
| 306 |
+ @stateful.rule( |
|
| 307 |
+ target=configuration, |
|
| 308 |
+ config=configuration, |
|
| 309 |
+ service=strategies.sampled_from(KNOWN_SERVICES), |
|
| 310 |
+ setting=setting.filter(bool), |
|
| 311 |
+ maybe_unset=strategies.sets( |
|
| 312 |
+ strategies.sampled_from(VALID_PROPERTIES), |
|
| 313 |
+ max_size=3, |
|
| 314 |
+ ), |
|
| 315 |
+ overwrite=strategies.booleans(), |
|
| 316 |
+ ) |
|
| 317 |
+ def set_service( |
|
| 318 |
+ self, |
|
| 319 |
+ config: _types.VaultConfig, |
|
| 320 |
+ service: str, |
|
| 321 |
+ setting: _types.VaultConfigServicesSettings, |
|
| 322 |
+ maybe_unset: set[str], |
|
| 323 |
+ overwrite: bool, |
|
| 324 |
+ ) -> _types.VaultConfig: |
|
| 325 |
+ """Set the named service settings for a configuration. |
|
| 326 |
+ |
|
| 327 |
+ Args: |
|
| 328 |
+ config: |
|
| 329 |
+ The configuration to edit. |
|
| 330 |
+ service: |
|
| 331 |
+ The name of the service to set. |
|
| 332 |
+ setting: |
|
| 333 |
+ The new service settings. |
|
| 334 |
+ maybe_unset: |
|
| 335 |
+ Settings keys to additionally unset, if not already |
|
| 336 |
+ present in the new settings. Corresponds to the |
|
| 337 |
+ `--unset` command-line argument. |
|
| 338 |
+ overwrite: |
|
| 339 |
+ Overwrite the settings object if true, or merge if |
|
| 340 |
+ false. Corresponds to the `--overwrite-existing` and |
|
| 341 |
+ `--merge-existing` command-line arguments. |
|
| 342 |
+ |
|
| 343 |
+ Returns: |
|
| 344 |
+ The amended configuration. |
|
| 345 |
+ |
|
| 346 |
+ """ |
|
| 347 |
+ cli_helpers.save_config(config) |
|
| 348 |
+ config_service = config["services"].get(service, {})
|
|
| 349 |
+ maybe_unset = set(maybe_unset) - setting.keys() |
|
| 350 |
+ if overwrite: |
|
| 351 |
+ config["services"][service] = config_service = {}
|
|
| 352 |
+ elif maybe_unset: |
|
| 353 |
+ for key in maybe_unset: |
|
| 354 |
+ config_service.pop(key, None) # type: ignore[misc] |
|
| 355 |
+ config["services"].setdefault(service, {}).update(setting)
|
|
| 356 |
+ assert _types.is_vault_config(config) |
|
| 357 |
+ # NOTE: This relies on settings_obj containing only the keys |
|
| 358 |
+ # "length", "repeat", "upper", "lower", "number", "space", |
|
| 359 |
+ # "dash" and "symbol". |
|
| 360 |
+ result = self.runner.invoke( |
|
| 361 |
+ cli.derivepassphrase_vault, |
|
| 362 |
+ [ |
|
| 363 |
+ "--config", |
|
| 364 |
+ "--overwrite-existing" if overwrite else "--merge-existing", |
|
| 365 |
+ ] |
|
| 366 |
+ + [f"--unset={key}" for key in maybe_unset]
|
|
| 367 |
+ + [ |
|
| 368 |
+ f"--{key}={value}"
|
|
| 369 |
+ for key, value in setting.items() |
|
| 370 |
+ if key in VALID_PROPERTIES |
|
| 371 |
+ ] |
|
| 372 |
+ + ["--", service], |
|
| 373 |
+ catch_exceptions=False, |
|
| 374 |
+ ) |
|
| 375 |
+ assert result.clean_exit(empty_stderr=False) |
|
| 376 |
+ assert cli_helpers.load_config() == config |
|
| 377 |
+ return config |
|
| 378 |
+ |
|
| 379 |
+ @stateful.rule( |
|
| 380 |
+ target=configuration, |
|
| 381 |
+ config=configuration, |
|
| 382 |
+ ) |
|
| 383 |
+ def purge_global( |
|
| 384 |
+ self, |
|
| 385 |
+ config: _types.VaultConfig, |
|
| 386 |
+ ) -> _types.VaultConfig: |
|
| 387 |
+ """Purge the globals of a configuration. |
|
| 388 |
+ |
|
| 389 |
+ Args: |
|
| 390 |
+ config: |
|
| 391 |
+ The configuration to edit. |
|
| 392 |
+ |
|
| 393 |
+ Returns: |
|
| 394 |
+ The pruned configuration. |
|
| 395 |
+ |
|
| 396 |
+ """ |
|
| 397 |
+ cli_helpers.save_config(config) |
|
| 398 |
+ config.pop("global", None)
|
|
| 399 |
+ result = self.runner.invoke( |
|
| 400 |
+ cli.derivepassphrase_vault, |
|
| 401 |
+ ["--delete-globals"], |
|
| 402 |
+ input="y", |
|
| 403 |
+ catch_exceptions=False, |
|
| 404 |
+ ) |
|
| 405 |
+ assert result.clean_exit(empty_stderr=False) |
|
| 406 |
+ assert cli_helpers.load_config() == config |
|
| 407 |
+ return config |
|
| 408 |
+ |
|
| 409 |
+ @stateful.rule( |
|
| 410 |
+ target=configuration, |
|
| 411 |
+ config_and_service=configuration.filter( |
|
| 412 |
+ lambda c: bool(c["services"]) |
|
| 413 |
+ ).flatmap( |
|
| 414 |
+ lambda c: strategies.tuples( |
|
| 415 |
+ strategies.just(c), |
|
| 416 |
+ strategies.sampled_from(tuple(c["services"].keys())), |
|
| 417 |
+ ) |
|
| 418 |
+ ), |
|
| 419 |
+ ) |
|
| 420 |
+ def purge_service( |
|
| 421 |
+ self, |
|
| 422 |
+ config_and_service: tuple[_types.VaultConfig, str], |
|
| 423 |
+ ) -> _types.VaultConfig: |
|
| 424 |
+ """Purge the settings of a named service in a configuration. |
|
| 425 |
+ |
|
| 426 |
+ Args: |
|
| 427 |
+ config_and_service: |
|
| 428 |
+ A 2-tuple containing the configuration to edit, and the |
|
| 429 |
+ service name to purge. |
|
| 430 |
+ |
|
| 431 |
+ Returns: |
|
| 432 |
+ The pruned configuration. |
|
| 433 |
+ |
|
| 434 |
+ """ |
|
| 435 |
+ config, service = config_and_service |
|
| 436 |
+ cli_helpers.save_config(config) |
|
| 437 |
+ config["services"].pop(service, None) |
|
| 438 |
+ result = self.runner.invoke( |
|
| 439 |
+ cli.derivepassphrase_vault, |
|
| 440 |
+ ["--delete", "--", service], |
|
| 441 |
+ input="y", |
|
| 442 |
+ catch_exceptions=False, |
|
| 443 |
+ ) |
|
| 444 |
+ assert result.clean_exit(empty_stderr=False) |
|
| 445 |
+ assert cli_helpers.load_config() == config |
|
| 446 |
+ return config |
|
| 447 |
+ |
|
| 448 |
+ @stateful.rule( |
|
| 449 |
+ target=configuration, |
|
| 450 |
+ config=configuration, |
|
| 451 |
+ ) |
|
| 452 |
+ def purge_all( |
|
| 453 |
+ self, |
|
| 454 |
+ config: _types.VaultConfig, |
|
| 455 |
+ ) -> _types.VaultConfig: |
|
| 456 |
+ """Purge the entire configuration. |
|
| 457 |
+ |
|
| 458 |
+ Args: |
|
| 459 |
+ config: |
|
| 460 |
+ The configuration to edit. |
|
| 461 |
+ |
|
| 462 |
+ Returns: |
|
| 463 |
+ The empty configuration. |
|
| 464 |
+ |
|
| 465 |
+ """ |
|
| 466 |
+ cli_helpers.save_config(config) |
|
| 467 |
+ config = {"services": {}}
|
|
| 468 |
+ result = self.runner.invoke( |
|
| 469 |
+ cli.derivepassphrase_vault, |
|
| 470 |
+ ["--clear"], |
|
| 471 |
+ input="y", |
|
| 472 |
+ catch_exceptions=False, |
|
| 473 |
+ ) |
|
| 474 |
+ assert result.clean_exit(empty_stderr=False) |
|
| 475 |
+ assert cli_helpers.load_config() == config |
|
| 476 |
+ return config |
|
| 477 |
+ |
|
| 478 |
+ @stateful.rule( |
|
| 479 |
+ target=configuration, |
|
| 480 |
+ base_config=configuration, |
|
| 481 |
+ config_to_import=configuration, |
|
| 482 |
+ overwrite=strategies.booleans(), |
|
| 483 |
+ ) |
|
| 484 |
+ def import_configuration( |
|
| 485 |
+ self, |
|
| 486 |
+ base_config: _types.VaultConfig, |
|
| 487 |
+ config_to_import: _types.VaultConfig, |
|
| 488 |
+ overwrite: bool, |
|
| 489 |
+ ) -> _types.VaultConfig: |
|
| 490 |
+ """Import the given configuration into a base configuration. |
|
| 491 |
+ |
|
| 492 |
+ Args: |
|
| 493 |
+ base_config: |
|
| 494 |
+ The configuration to import into. |
|
| 495 |
+ config_to_import: |
|
| 496 |
+ The configuration to import. |
|
| 497 |
+ overwrite: |
|
| 498 |
+ Overwrite the base configuration if true, or merge if |
|
| 499 |
+ false. Corresponds to the `--overwrite-existing` and |
|
| 500 |
+ `--merge-existing` command-line arguments. |
|
| 501 |
+ |
|
| 502 |
+ Returns: |
|
| 503 |
+ The imported or merged configuration. |
|
| 504 |
+ |
|
| 505 |
+ """ |
|
| 506 |
+ cli_helpers.save_config(base_config) |
|
| 507 |
+ config = ( |
|
| 508 |
+ self.fold_configs(config_to_import, base_config) |
|
| 509 |
+ if not overwrite |
|
| 510 |
+ else config_to_import |
|
| 511 |
+ ) |
|
| 512 |
+ assert _types.is_vault_config(config) |
|
| 513 |
+ result = self.runner.invoke( |
|
| 514 |
+ cli.derivepassphrase_vault, |
|
| 515 |
+ ["--import", "-"] |
|
| 516 |
+ + (["--overwrite-existing"] if overwrite else []), |
|
| 517 |
+ input=json.dumps(config_to_import), |
|
| 518 |
+ catch_exceptions=False, |
|
| 519 |
+ ) |
|
| 520 |
+ assert result.clean_exit(empty_stderr=False) |
|
| 521 |
+ assert cli_helpers.load_config() == config |
|
| 522 |
+ return config |
|
| 523 |
+ |
|
| 524 |
+ def teardown(self) -> None: |
|
| 525 |
+ """Upon teardown, exit all contexts entered in `__init__`.""" |
|
| 526 |
+ self.exit_stack.close() |
|
| 527 |
+ |
|
| 528 |
+ |
|
| 529 |
+TestConfigManagement = ConfigManagementStateMachine.TestCase |
|
| 530 |
+"""The [`unittest.TestCase`][] class that will actually be run.""" |
|
| 531 |
+ |
|
| 532 |
+ |
|
| 533 |
+class FakeConfigurationMutexAction(NamedTuple): |
|
| 534 |
+ """An action/a step in the [`FakeConfigurationMutexStateMachine`][]. |
|
| 535 |
+ |
|
| 536 |
+ Attributes: |
|
| 537 |
+ command_line: |
|
| 538 |
+ The command-line for `derivepassphrase vault` to execute. |
|
| 539 |
+ input: |
|
| 540 |
+ The input to this command. |
|
| 541 |
+ |
|
| 542 |
+ """ |
|
| 543 |
+ |
|
| 544 |
+ command_line: list[str] |
|
| 545 |
+ """""" |
|
| 546 |
+ input: str | bytes | None = None |
|
| 547 |
+ """""" |
|
| 548 |
+ |
|
| 549 |
+ |
|
| 550 |
+def run_actions_handler( |
|
| 551 |
+ id_num: int, |
|
| 552 |
+ action: FakeConfigurationMutexAction, |
|
| 553 |
+ *, |
|
| 554 |
+ input_queue: queue.Queue, |
|
| 555 |
+ output_queue: queue.Queue, |
|
| 556 |
+ timeout: int, |
|
| 557 |
+) -> None: |
|
| 558 |
+ """Prepare the faked mutex, then run `action`. |
|
| 559 |
+ |
|
| 560 |
+ This is a top-level handler function -- to be used in a new |
|
| 561 |
+ [`multiprocessing.Process`][] -- to run a single action from the |
|
| 562 |
+ [`FakeConfigurationMutexStateMachine`][]. Output from this function |
|
| 563 |
+ must be sent down the output queue instead of relying on the call |
|
| 564 |
+ stack. Additionally, because this runs in a separate process, we |
|
| 565 |
+ need to restart coverage tracking if it is currently running. |
|
| 566 |
+ |
|
| 567 |
+ Args: |
|
| 568 |
+ id_num: |
|
| 569 |
+ The internal ID of this subprocess. |
|
| 570 |
+ action: |
|
| 571 |
+ The action to execute. |
|
| 572 |
+ input_queue: |
|
| 573 |
+ The queue for data passed from the manager/parent process to |
|
| 574 |
+ this subprocess. |
|
| 575 |
+ output_queue: |
|
| 576 |
+ The queue for data passed from this subprocess to the |
|
| 577 |
+ manager/parent process. |
|
| 578 |
+ timeout: |
|
| 579 |
+ The maximum amount of time to wait for a data transfer along |
|
| 580 |
+ the input or the output queue. If exceeded, we exit |
|
| 581 |
+ immediately. |
|
| 582 |
+ |
|
| 583 |
+ """ |
|
| 584 |
+ with pytest.MonkeyPatch.context() as monkeypatch: |
|
| 585 |
+ monkeypatch.setattr( |
|
| 586 |
+ cli_helpers, |
|
| 587 |
+ "configuration_mutex", |
|
| 588 |
+ lambda: FakeConfigurationMutexStateMachine.ConfigurationMutexStub( |
|
| 589 |
+ my_id=id_num, |
|
| 590 |
+ input_queue=input_queue, |
|
| 591 |
+ output_queue=output_queue, |
|
| 592 |
+ timeout=timeout, |
|
| 593 |
+ ), |
|
| 594 |
+ ) |
|
| 595 |
+ runner = tests.machinery.CliRunner(mix_stderr=False) |
|
| 596 |
+ try: |
|
| 597 |
+ result = runner.invoke( |
|
| 598 |
+ cli.derivepassphrase_vault, |
|
| 599 |
+ args=action.command_line, |
|
| 600 |
+ input=action.input, |
|
| 601 |
+ catch_exceptions=True, |
|
| 602 |
+ ) |
|
| 603 |
+ output_queue.put( |
|
| 604 |
+ FakeConfigurationMutexStateMachine.IPCMessage( |
|
| 605 |
+ id_num, |
|
| 606 |
+ "result", |
|
| 607 |
+ ( |
|
| 608 |
+ result.clean_exit(empty_stderr=False), |
|
| 609 |
+ copy.copy(result.stdout), |
|
| 610 |
+ copy.copy(result.stderr), |
|
| 611 |
+ ), |
|
| 612 |
+ ), |
|
| 613 |
+ block=True, |
|
| 614 |
+ timeout=timeout, |
|
| 615 |
+ ) |
|
| 616 |
+ except Exception as exc: # pragma: no cover # noqa: BLE001 |
|
| 617 |
+ output_queue.put( |
|
| 618 |
+ FakeConfigurationMutexStateMachine.IPCMessage( |
|
| 619 |
+ id_num, "exception", exc |
|
| 620 |
+ ), |
|
| 621 |
+ block=False, |
|
| 622 |
+ ) |
|
| 623 |
+ |
|
| 624 |
+ |
|
| 625 |
+@hypothesis.settings( |
|
| 626 |
+ stateful_step_count=tests.machinery.hypothesis.get_concurrency_step_count(), |
|
| 627 |
+ deadline=None, |
|
| 628 |
+) |
|
| 629 |
+class FakeConfigurationMutexStateMachine(stateful.RuleBasedStateMachine): |
|
| 630 |
+ """A state machine simulating the (faked) configuration mutex. |
|
| 631 |
+ |
|
| 632 |
+ Generate an ordered set of concurrent writers to the |
|
| 633 |
+ derivepassphrase configuration, then test that the writers' accesses |
|
| 634 |
+ are serialized correctly, i.e., test that the writers correctly use |
|
| 635 |
+ the mutex to avoid concurrent accesses, under the assumption that |
|
| 636 |
+ the mutex itself is correctly implemented. |
|
| 637 |
+ |
|
| 638 |
+ We use a custom mutex implementation to both ensure that all writers |
|
| 639 |
+ attempt to lock the configuration at the same time and that the lock |
|
| 640 |
+ is granted in our desired order. This test is therefore independent |
|
| 641 |
+ of the actual (operating system-specific) mutex implementation in |
|
| 642 |
+ `derivepassphrase`. |
|
| 643 |
+ |
|
| 644 |
+ Attributes: |
|
| 645 |
+ setting: |
|
| 646 |
+ A bundle for single-service settings. |
|
| 647 |
+ configuration: |
|
| 648 |
+ A bundle for full vault configurations. |
|
| 649 |
+ |
|
| 650 |
+ """ |
|
| 651 |
+ |
|
| 652 |
+ class IPCMessage(NamedTuple): |
|
| 653 |
+ """A message for inter-process communication. |
|
| 654 |
+ |
|
| 655 |
+ Used by the configuration mutex stub class to affect/signal the |
|
| 656 |
+ control flow amongst the linked mutex clients. |
|
| 657 |
+ |
|
| 658 |
+ Attributes: |
|
| 659 |
+ child_id: |
|
| 660 |
+ The ID of the sending or receiving child process. |
|
| 661 |
+ message: |
|
| 662 |
+ One of "ready", "go", "config", "result" or "exception". |
|
| 663 |
+ payload: |
|
| 664 |
+ The (optional) message payload. |
|
| 665 |
+ |
|
| 666 |
+ """ |
|
| 667 |
+ |
|
| 668 |
+ child_id: int |
|
| 669 |
+ """""" |
|
| 670 |
+ message: Literal["ready", "go", "config", "result", "exception"] |
|
| 671 |
+ """""" |
|
| 672 |
+ payload: object | None |
|
| 673 |
+ """""" |
|
| 674 |
+ |
|
| 675 |
+ class ConfigurationMutexStub(cli_helpers.ConfigurationMutex): |
|
| 676 |
+ """Configuration mutex subclass that enforces a locking order. |
|
| 677 |
+ |
|
| 678 |
+ Each configuration mutex stub object ("mutex client") has an
|
|
| 679 |
+ associated ID, and one read-only and one write-only pipe |
|
| 680 |
+ (actually: [`multiprocessing.Queue`][] objects) to the "manager" |
|
| 681 |
+ instance coordinating these stub objects. First, the mutex |
|
| 682 |
+ client signals readiness, then the manager signals when the |
|
| 683 |
+ mutex shall be considered "acquired", then finally the mutex |
|
| 684 |
+ client sends the result back (simultaneously releasing the mutex |
|
| 685 |
+ again). The manager may optionally send an abort signal if the |
|
| 686 |
+ operations take too long. |
|
| 687 |
+ |
|
| 688 |
+ This subclass also copies the effective vault configuration |
|
| 689 |
+ to `intermediate_configs` upon releasing the lock. |
|
| 690 |
+ |
|
| 691 |
+ """ |
|
| 692 |
+ |
|
| 693 |
+ def __init__( |
|
| 694 |
+ self, |
|
| 695 |
+ *, |
|
| 696 |
+ my_id: int, |
|
| 697 |
+ timeout: int, |
|
| 698 |
+ input_queue: queue.Queue[ |
|
| 699 |
+ FakeConfigurationMutexStateMachine.IPCMessage |
|
| 700 |
+ ], |
|
| 701 |
+ output_queue: queue.Queue[ |
|
| 702 |
+ FakeConfigurationMutexStateMachine.IPCMessage |
|
| 703 |
+ ], |
|
| 704 |
+ ) -> None: |
|
| 705 |
+ """Initialize this mutex client. |
|
| 706 |
+ |
|
| 707 |
+ Args: |
|
| 708 |
+ my_id: |
|
| 709 |
+ The ID of this client. |
|
| 710 |
+ timeout: |
|
| 711 |
+ The timeout for each get and put operation on the |
|
| 712 |
+ queues. |
|
| 713 |
+ input_queue: |
|
| 714 |
+ The message queue for IPC messages from the manager |
|
| 715 |
+ instance to this mutex client. |
|
| 716 |
+ output_queue: |
|
| 717 |
+ The message queue for IPC messages from this mutex |
|
| 718 |
+ client to the manager instance. |
|
| 719 |
+ |
|
| 720 |
+ """ |
|
| 721 |
+ super().__init__() |
|
| 722 |
+ |
|
| 723 |
+ def lock() -> None: |
|
| 724 |
+ """Simulate locking of the mutex. |
|
| 725 |
+ |
|
| 726 |
+ Issue a "ready" message, wait for a "go", then return. |
|
| 727 |
+ If an exception occurs, issue an "exception" message, |
|
| 728 |
+ then raise the exception. |
|
| 729 |
+ |
|
| 730 |
+ """ |
|
| 731 |
+ IPCMessage: TypeAlias = ( |
|
| 732 |
+ FakeConfigurationMutexStateMachine.IPCMessage |
|
| 733 |
+ ) |
|
| 734 |
+ try: |
|
| 735 |
+ output_queue.put( |
|
| 736 |
+ IPCMessage(my_id, "ready", None), |
|
| 737 |
+ block=True, |
|
| 738 |
+ timeout=timeout, |
|
| 739 |
+ ) |
|
| 740 |
+ ok = input_queue.get(block=True, timeout=timeout) |
|
| 741 |
+ if ok != IPCMessage(my_id, "go", None): # pragma: no cover |
|
| 742 |
+ output_queue.put( |
|
| 743 |
+ IPCMessage(my_id, "exception", ok), block=False |
|
| 744 |
+ ) |
|
| 745 |
+ raise ( |
|
| 746 |
+ ok[2] |
|
| 747 |
+ if isinstance(ok[2], BaseException) |
|
| 748 |
+ else RuntimeError(ok[2]) |
|
| 749 |
+ ) |
|
| 750 |
+ except (queue.Empty, queue.Full) as exc: # pragma: no cover |
|
| 751 |
+ output_queue.put( |
|
| 752 |
+ IPCMessage(my_id, "exception", exc), block=False |
|
| 753 |
+ ) |
|
| 754 |
+ return |
|
| 755 |
+ |
|
| 756 |
+ def unlock() -> None: |
|
| 757 |
+ """Simulate unlocking of the mutex. |
|
| 758 |
+ |
|
| 759 |
+ Issue a "config" message, then return. If an exception |
|
| 760 |
+ occurs, issue an "exception" message, then raise the |
|
| 761 |
+ exception. |
|
| 762 |
+ |
|
| 763 |
+ """ |
|
| 764 |
+ IPCMessage: TypeAlias = ( |
|
| 765 |
+ FakeConfigurationMutexStateMachine.IPCMessage |
|
| 766 |
+ ) |
|
| 767 |
+ try: |
|
| 768 |
+ output_queue.put( |
|
| 769 |
+ IPCMessage( |
|
| 770 |
+ my_id, |
|
| 771 |
+ "config", |
|
| 772 |
+ copy.copy(cli_helpers.load_config()), |
|
| 773 |
+ ), |
|
| 774 |
+ block=True, |
|
| 775 |
+ timeout=timeout, |
|
| 776 |
+ ) |
|
| 777 |
+ except (queue.Empty, queue.Full) as exc: # pragma: no cover |
|
| 778 |
+ output_queue.put( |
|
| 779 |
+ IPCMessage(my_id, "exception", exc), block=False |
|
| 780 |
+ ) |
|
| 781 |
+ raise |
|
| 782 |
+ |
|
| 783 |
+ self.lock = lock |
|
| 784 |
+ self.unlock = unlock |
|
| 785 |
+ |
|
| 786 |
+ setting: stateful.Bundle[_types.VaultConfigServicesSettings] = ( |
|
| 787 |
+ stateful.Bundle("setting")
|
|
| 788 |
+ ) |
|
| 789 |
+ """""" |
|
| 790 |
+ configuration: stateful.Bundle[_types.VaultConfig] = stateful.Bundle( |
|
| 791 |
+ "configuration" |
|
| 792 |
+ ) |
|
| 793 |
+ """""" |
|
| 794 |
+ |
|
| 795 |
+ def __init__(self, *args: Any, **kwargs: Any) -> None: |
|
| 796 |
+ """Initialize the state machine.""" |
|
| 797 |
+ super().__init__(*args, **kwargs) |
|
| 798 |
+ self.actions: list[FakeConfigurationMutexAction] = [] |
|
| 799 |
+ # Determine the step count by poking around in the hypothesis |
|
| 800 |
+ # internals. As this isn't guaranteed to be stable, turn off |
|
| 801 |
+ # coverage. |
|
| 802 |
+ try: # pragma: no cover |
|
| 803 |
+ settings: hypothesis.settings | None |
|
| 804 |
+ settings = FakeConfigurationMutexStateMachine.TestCase.settings |
|
| 805 |
+ except AttributeError: # pragma: no cover |
|
| 806 |
+ settings = None |
|
| 807 |
+ self.step_count = ( |
|
| 808 |
+ tests.machinery.hypothesis.get_concurrency_step_count(settings) |
|
| 809 |
+ ) |
|
| 810 |
+ |
|
| 811 |
+ @stateful.initialize( |
|
| 812 |
+ target=configuration, |
|
| 813 |
+ configs=strategies.lists( |
|
| 814 |
+ vault_full_config(), |
|
| 815 |
+ min_size=8, |
|
| 816 |
+ max_size=8, |
|
| 817 |
+ ), |
|
| 818 |
+ ) |
|
| 819 |
+ def declare_initial_configs( |
|
| 820 |
+ self, |
|
| 821 |
+ configs: list[_types.VaultConfig], |
|
| 822 |
+ ) -> stateful.MultipleResults[_types.VaultConfig]: |
|
| 823 |
+ """Initialize the configuration bundle with eight configurations.""" |
|
| 824 |
+ return stateful.multiple(*configs) |
|
| 825 |
+ |
|
| 826 |
+ @stateful.initialize( |
|
| 827 |
+ target=setting, |
|
| 828 |
+ configs=strategies.lists( |
|
| 829 |
+ vault_full_config(), |
|
| 830 |
+ min_size=4, |
|
| 831 |
+ max_size=4, |
|
| 832 |
+ ), |
|
| 833 |
+ ) |
|
| 834 |
+ def extract_initial_settings( |
|
| 835 |
+ self, |
|
| 836 |
+ configs: list[_types.VaultConfig], |
|
| 837 |
+ ) -> stateful.MultipleResults[_types.VaultConfigServicesSettings]: |
|
| 838 |
+ """Initialize the settings bundle with four service settings.""" |
|
| 839 |
+ settings: list[_types.VaultConfigServicesSettings] = [] |
|
| 840 |
+ for c in configs: |
|
| 841 |
+ settings.extend(c["services"].values()) |
|
| 842 |
+ return stateful.multiple(*map(copy.deepcopy, settings)) |
|
| 843 |
+ |
|
| 844 |
+ @stateful.initialize( |
|
| 845 |
+ config=vault_full_config(), |
|
| 846 |
+ ) |
|
| 847 |
+ def declare_initial_action( |
|
| 848 |
+ self, |
|
| 849 |
+ config: _types.VaultConfig, |
|
| 850 |
+ ) -> None: |
|
| 851 |
+ """Initialize the actions bundle from the configuration bundle. |
|
| 852 |
+ |
|
| 853 |
+ This is roughly comparable to the |
|
| 854 |
+ [`add_import_configuration_action`][] general rule, but adding |
|
| 855 |
+ it as a separate initialize rule avoids having to guard every |
|
| 856 |
+ other action-amending rule against empty action sequences, which |
|
| 857 |
+ would discard huge portions of the rule selection search space |
|
| 858 |
+ and thus trigger loads of hypothesis health check warnings. |
|
| 859 |
+ |
|
| 860 |
+ """ |
|
| 861 |
+ command_line = ["--import", "-", "--overwrite-existing"] |
|
| 862 |
+ input = json.dumps(config) # noqa: A001 |
|
| 863 |
+ hypothesis.note(f"# {command_line = }, {input = }")
|
|
| 864 |
+ action = FakeConfigurationMutexAction( |
|
| 865 |
+ command_line=command_line, input=input |
|
| 866 |
+ ) |
|
| 867 |
+ self.actions.append(action) |
|
| 868 |
+ |
|
| 869 |
+ @stateful.rule( |
|
| 870 |
+ setting=setting.filter(bool), |
|
| 871 |
+ maybe_unset=strategies.sets( |
|
| 872 |
+ strategies.sampled_from(VALID_PROPERTIES), |
|
| 873 |
+ max_size=3, |
|
| 874 |
+ ), |
|
| 875 |
+ overwrite=strategies.booleans(), |
|
| 876 |
+ ) |
|
| 877 |
+ def add_set_globals_action( |
|
| 878 |
+ self, |
|
| 879 |
+ setting: _types.VaultConfigGlobalSettings, |
|
| 880 |
+ maybe_unset: set[str], |
|
| 881 |
+ overwrite: bool, |
|
| 882 |
+ ) -> None: |
|
| 883 |
+ """Set the global settings of a configuration. |
|
| 884 |
+ |
|
| 885 |
+ Args: |
|
| 886 |
+ setting: |
|
| 887 |
+ The new global settings. |
|
| 888 |
+ maybe_unset: |
|
| 889 |
+ Settings keys to additionally unset, if not already |
|
| 890 |
+ present in the new settings. Corresponds to the |
|
| 891 |
+ `--unset` command-line argument. |
|
| 892 |
+ overwrite: |
|
| 893 |
+ Overwrite the settings object if true, or merge if |
|
| 894 |
+ false. Corresponds to the `--overwrite-existing` and |
|
| 895 |
+ `--merge-existing` command-line arguments. |
|
| 896 |
+ |
|
| 897 |
+ """ |
|
| 898 |
+ maybe_unset = set(maybe_unset) - setting.keys() |
|
| 899 |
+ command_line = ( |
|
| 900 |
+ [ |
|
| 901 |
+ "--config", |
|
| 902 |
+ "--overwrite-existing" if overwrite else "--merge-existing", |
|
| 903 |
+ ] |
|
| 904 |
+ + [f"--unset={key}" for key in maybe_unset]
|
|
| 905 |
+ + [ |
|
| 906 |
+ f"--{key}={value}"
|
|
| 907 |
+ for key, value in setting.items() |
|
| 908 |
+ if key in VALID_PROPERTIES |
|
| 909 |
+ ] |
|
| 910 |
+ ) |
|
| 911 |
+ input = None # noqa: A001 |
|
| 912 |
+ hypothesis.note(f"# {command_line = }, {input = }")
|
|
| 913 |
+ action = FakeConfigurationMutexAction( |
|
| 914 |
+ command_line=command_line, input=input |
|
| 915 |
+ ) |
|
| 916 |
+ self.actions.append(action) |
|
| 917 |
+ |
|
| 918 |
+ @stateful.rule( |
|
| 919 |
+ service=strategies.sampled_from(KNOWN_SERVICES), |
|
| 920 |
+ setting=setting.filter(bool), |
|
| 921 |
+ maybe_unset=strategies.sets( |
|
| 922 |
+ strategies.sampled_from(VALID_PROPERTIES), |
|
| 923 |
+ max_size=3, |
|
| 924 |
+ ), |
|
| 925 |
+ overwrite=strategies.booleans(), |
|
| 926 |
+ ) |
|
| 927 |
+ def add_set_service_action( |
|
| 928 |
+ self, |
|
| 929 |
+ service: str, |
|
| 930 |
+ setting: _types.VaultConfigServicesSettings, |
|
| 931 |
+ maybe_unset: set[str], |
|
| 932 |
+ overwrite: bool, |
|
| 933 |
+ ) -> None: |
|
| 934 |
+ """Set the named service settings for a configuration. |
|
| 935 |
+ |
|
| 936 |
+ Args: |
|
| 937 |
+ service: |
|
| 938 |
+ The name of the service to set. |
|
| 939 |
+ setting: |
|
| 940 |
+ The new service settings. |
|
| 941 |
+ maybe_unset: |
|
| 942 |
+ Settings keys to additionally unset, if not already |
|
| 943 |
+ present in the new settings. Corresponds to the |
|
| 944 |
+ `--unset` command-line argument. |
|
| 945 |
+ overwrite: |
|
| 946 |
+ Overwrite the settings object if true, or merge if |
|
| 947 |
+ false. Corresponds to the `--overwrite-existing` and |
|
| 948 |
+ `--merge-existing` command-line arguments. |
|
| 949 |
+ |
|
| 950 |
+ """ |
|
| 951 |
+ maybe_unset = set(maybe_unset) - setting.keys() |
|
| 952 |
+ command_line = ( |
|
| 953 |
+ [ |
|
| 954 |
+ "--config", |
|
| 955 |
+ "--overwrite-existing" if overwrite else "--merge-existing", |
|
| 956 |
+ ] |
|
| 957 |
+ + [f"--unset={key}" for key in maybe_unset]
|
|
| 958 |
+ + [ |
|
| 959 |
+ f"--{key}={value}"
|
|
| 960 |
+ for key, value in setting.items() |
|
| 961 |
+ if key in VALID_PROPERTIES |
|
| 962 |
+ ] |
|
| 963 |
+ + ["--", service] |
|
| 964 |
+ ) |
|
| 965 |
+ input = None # noqa: A001 |
|
| 966 |
+ hypothesis.note(f"# {command_line = }, {input = }")
|
|
| 967 |
+ action = FakeConfigurationMutexAction( |
|
| 968 |
+ command_line=command_line, input=input |
|
| 969 |
+ ) |
|
| 970 |
+ self.actions.append(action) |
|
| 971 |
+ |
|
| 972 |
+ @stateful.rule() |
|
| 973 |
+ def add_purge_global_action( |
|
| 974 |
+ self, |
|
| 975 |
+ ) -> None: |
|
| 976 |
+ """Purge the globals of a configuration.""" |
|
| 977 |
+ command_line = ["--delete-globals"] |
|
| 978 |
+ input = None # 'y' # noqa: A001 |
|
| 979 |
+ hypothesis.note(f"# {command_line = }, {input = }")
|
|
| 980 |
+ action = FakeConfigurationMutexAction( |
|
| 981 |
+ command_line=command_line, input=input |
|
| 982 |
+ ) |
|
| 983 |
+ self.actions.append(action) |
|
| 984 |
+ |
|
| 985 |
+ @stateful.rule( |
|
| 986 |
+ service=strategies.sampled_from(KNOWN_SERVICES), |
|
| 987 |
+ ) |
|
| 988 |
+ def add_purge_service_action( |
|
| 989 |
+ self, |
|
| 990 |
+ service: str, |
|
| 991 |
+ ) -> None: |
|
| 992 |
+ """Purge the settings of a named service in a configuration. |
|
| 993 |
+ |
|
| 994 |
+ Args: |
|
| 995 |
+ service: |
|
| 996 |
+ The service name to purge. |
|
| 997 |
+ |
|
| 998 |
+ """ |
|
| 999 |
+ command_line = ["--delete", "--", service] |
|
| 1000 |
+ input = None # 'y' # noqa: A001 |
|
| 1001 |
+ hypothesis.note(f"# {command_line = }, {input = }")
|
|
| 1002 |
+ action = FakeConfigurationMutexAction( |
|
| 1003 |
+ command_line=command_line, input=input |
|
| 1004 |
+ ) |
|
| 1005 |
+ self.actions.append(action) |
|
| 1006 |
+ |
|
| 1007 |
+ @stateful.rule() |
|
| 1008 |
+ def add_purge_all_action( |
|
| 1009 |
+ self, |
|
| 1010 |
+ ) -> None: |
|
| 1011 |
+ """Purge the entire configuration.""" |
|
| 1012 |
+ command_line = ["--clear"] |
|
| 1013 |
+ input = None # 'y' # noqa: A001 |
|
| 1014 |
+ hypothesis.note(f"# {command_line = }, {input = }")
|
|
| 1015 |
+ action = FakeConfigurationMutexAction( |
|
| 1016 |
+ command_line=command_line, input=input |
|
| 1017 |
+ ) |
|
| 1018 |
+ self.actions.append(action) |
|
| 1019 |
+ |
|
| 1020 |
+ @stateful.rule( |
|
| 1021 |
+ config_to_import=configuration, |
|
| 1022 |
+ overwrite=strategies.booleans(), |
|
| 1023 |
+ ) |
|
| 1024 |
+ def add_import_configuration_action( |
|
| 1025 |
+ self, |
|
| 1026 |
+ config_to_import: _types.VaultConfig, |
|
| 1027 |
+ overwrite: bool, |
|
| 1028 |
+ ) -> None: |
|
| 1029 |
+ """Import the given configuration. |
|
| 1030 |
+ |
|
| 1031 |
+ Args: |
|
| 1032 |
+ config_to_import: |
|
| 1033 |
+ The configuration to import. |
|
| 1034 |
+ overwrite: |
|
| 1035 |
+ Overwrite the base configuration if true, or merge if |
|
| 1036 |
+ false. Corresponds to the `--overwrite-existing` and |
|
| 1037 |
+ `--merge-existing` command-line arguments. |
|
| 1038 |
+ |
|
| 1039 |
+ """ |
|
| 1040 |
+ command_line = ["--import", "-"] + ( |
|
| 1041 |
+ ["--overwrite-existing"] if overwrite else [] |
|
| 1042 |
+ ) |
|
| 1043 |
+ input = json.dumps(config_to_import) # noqa: A001 |
|
| 1044 |
+ hypothesis.note(f"# {command_line = }, {input = }")
|
|
| 1045 |
+ action = FakeConfigurationMutexAction( |
|
| 1046 |
+ command_line=command_line, input=input |
|
| 1047 |
+ ) |
|
| 1048 |
+ self.actions.append(action) |
|
| 1049 |
+ |
|
| 1050 |
+ @stateful.precondition(lambda self: len(self.actions) > 0) |
|
| 1051 |
+ @stateful.invariant() |
|
| 1052 |
+ def run_actions( # noqa: C901 |
|
| 1053 |
+ self, |
|
| 1054 |
+ ) -> None: |
|
| 1055 |
+ """Run the actions, serially and concurrently. |
|
| 1056 |
+ |
|
| 1057 |
+ Run the actions once serially, then once more concurrently with |
|
| 1058 |
+ the faked configuration mutex, and assert that both runs yield |
|
| 1059 |
+ identical intermediate and final results. |
|
| 1060 |
+ |
|
| 1061 |
+ We must run the concurrent version in processes, not threads or |
|
| 1062 |
+ Python async functions, because the `click` testing machinery |
|
| 1063 |
+ manipulates global properties (e.g. the standard I/O streams, |
|
| 1064 |
+ the current directory, and the environment), and we require this |
|
| 1065 |
+ manipulation to happen in a time-overlapped manner. |
|
| 1066 |
+ |
|
| 1067 |
+ However, running multiple processes increases the risk of the |
|
| 1068 |
+ operating system imposing process count or memory limits on us. |
|
| 1069 |
+ We therefore skip the test as a whole if we fail to start a new |
|
| 1070 |
+ process due to lack of necessary resources (memory, processes, |
|
| 1071 |
+ or open file descriptors). |
|
| 1072 |
+ |
|
| 1073 |
+ """ |
|
| 1074 |
+ if not TYPE_CHECKING: # pragma: no branch |
|
| 1075 |
+ multiprocessing = pytest.importorskip("multiprocessing")
|
|
| 1076 |
+ IPCMessage: TypeAlias = FakeConfigurationMutexStateMachine.IPCMessage |
|
| 1077 |
+ intermediate_configs: dict[int, _types.VaultConfig] = {}
|
|
| 1078 |
+ intermediate_results: dict[ |
|
| 1079 |
+ int, tuple[bool, str | None, str | None] |
|
| 1080 |
+ ] = {}
|
|
| 1081 |
+ true_configs: dict[int, _types.VaultConfig] = {}
|
|
| 1082 |
+ true_results: dict[int, tuple[bool, str | None, str | None]] = {}
|
|
| 1083 |
+ timeout = 30 # Hopefully slow enough to accomodate The Annoying OS. |
|
| 1084 |
+ actions = self.actions |
|
| 1085 |
+ mp = multiprocessing.get_context() |
|
| 1086 |
+ # Coverage tracking writes coverage data to the current working |
|
| 1087 |
+ # directory, but because the subprocesses are spawned within the |
|
| 1088 |
+ # `tests.machinery.pytest.isolated_vault_config` context manager, their starting |
|
| 1089 |
+ # working directory is the isolated one, not the original one. |
|
| 1090 |
+ orig_cwd = pathlib.Path.cwd() |
|
| 1091 |
+ |
|
| 1092 |
+ fatal_process_creation_errnos = {
|
|
| 1093 |
+ # Specified by POSIX for fork(3). |
|
| 1094 |
+ errno.ENOMEM, |
|
| 1095 |
+ # Specified by POSIX for fork(3). |
|
| 1096 |
+ errno.EAGAIN, |
|
| 1097 |
+ # Specified by Linux/glibc for fork(3) |
|
| 1098 |
+ getattr(errno, "ENOSYS", errno.ENOMEM), |
|
| 1099 |
+ # Specified by POSIX for posix_spawn(3). |
|
| 1100 |
+ errno.EINVAL, |
|
| 1101 |
+ } |
|
| 1102 |
+ |
|
| 1103 |
+ hypothesis.note(f"# {actions = }")
|
|
| 1104 |
+ |
|
| 1105 |
+ stack = contextlib.ExitStack() |
|
| 1106 |
+ with stack: |
|
| 1107 |
+ runner = tests.machinery.CliRunner(mix_stderr=False) |
|
| 1108 |
+ monkeypatch = stack.enter_context(pytest.MonkeyPatch.context()) |
|
| 1109 |
+ stack.enter_context( |
|
| 1110 |
+ tests.machinery.pytest.isolated_vault_config( |
|
| 1111 |
+ monkeypatch=monkeypatch, |
|
| 1112 |
+ runner=runner, |
|
| 1113 |
+ vault_config={"services": {}},
|
|
| 1114 |
+ ) |
|
| 1115 |
+ ) |
|
| 1116 |
+ for i, action in enumerate(actions): |
|
| 1117 |
+ result = runner.invoke( |
|
| 1118 |
+ cli.derivepassphrase_vault, |
|
| 1119 |
+ args=action.command_line, |
|
| 1120 |
+ input=action.input, |
|
| 1121 |
+ catch_exceptions=True, |
|
| 1122 |
+ ) |
|
| 1123 |
+ true_configs[i] = copy.copy(cli_helpers.load_config()) |
|
| 1124 |
+ true_results[i] = ( |
|
| 1125 |
+ result.clean_exit(empty_stderr=False), |
|
| 1126 |
+ result.stdout, |
|
| 1127 |
+ result.stderr, |
|
| 1128 |
+ ) |
|
| 1129 |
+ |
|
| 1130 |
+ with stack: # noqa: PLR1702 |
|
| 1131 |
+ runner = tests.machinery.CliRunner(mix_stderr=False) |
|
| 1132 |
+ monkeypatch = stack.enter_context(pytest.MonkeyPatch.context()) |
|
| 1133 |
+ stack.enter_context( |
|
| 1134 |
+ tests.machinery.pytest.isolated_vault_config( |
|
| 1135 |
+ monkeypatch=monkeypatch, |
|
| 1136 |
+ runner=runner, |
|
| 1137 |
+ vault_config={"services": {}},
|
|
| 1138 |
+ ) |
|
| 1139 |
+ ) |
|
| 1140 |
+ |
|
| 1141 |
+ child_output_queue: multiprocessing.Queue[IPCMessage] = mp.Queue() |
|
| 1142 |
+ child_input_queues: list[ |
|
| 1143 |
+ multiprocessing.Queue[IPCMessage] | None |
|
| 1144 |
+ ] = [] |
|
| 1145 |
+ processes: list[multiprocessing.process.BaseProcess] = [] |
|
| 1146 |
+ processes_pending: set[multiprocessing.process.BaseProcess] = set() |
|
| 1147 |
+ ready_wait: set[int] = set() |
|
| 1148 |
+ |
|
| 1149 |
+ try: |
|
| 1150 |
+ for i, action in enumerate(actions): |
|
| 1151 |
+ q: multiprocessing.Queue[IPCMessage] | None = mp.Queue() |
|
| 1152 |
+ try: |
|
| 1153 |
+ p: multiprocessing.process.BaseProcess = mp.Process( |
|
| 1154 |
+ name=f"fake-mutex-action-{i:02d}",
|
|
| 1155 |
+ target=run_actions_handler, |
|
| 1156 |
+ kwargs={
|
|
| 1157 |
+ "id_num": i, |
|
| 1158 |
+ "timeout": timeout, |
|
| 1159 |
+ "action": action, |
|
| 1160 |
+ "input_queue": q, |
|
| 1161 |
+ "output_queue": child_output_queue, |
|
| 1162 |
+ }, |
|
| 1163 |
+ daemon=False, |
|
| 1164 |
+ ) |
|
| 1165 |
+ p.start() |
|
| 1166 |
+ except OSError as exc: # pragma: no cover |
|
| 1167 |
+ if exc.errno in fatal_process_creation_errnos: |
|
| 1168 |
+ pytest.skip( |
|
| 1169 |
+ "cannot test mutex functionality due to " |
|
| 1170 |
+ "lack of system resources for " |
|
| 1171 |
+ "creating enough subprocesses" |
|
| 1172 |
+ ) |
|
| 1173 |
+ raise |
|
| 1174 |
+ else: |
|
| 1175 |
+ processes.append(p) |
|
| 1176 |
+ processes_pending.add(p) |
|
| 1177 |
+ child_input_queues.append(q) |
|
| 1178 |
+ ready_wait.add(i) |
|
| 1179 |
+ |
|
| 1180 |
+ while processes_pending: |
|
| 1181 |
+ try: |
|
| 1182 |
+ self.mainloop( |
|
| 1183 |
+ timeout=timeout, |
|
| 1184 |
+ child_output_queue=child_output_queue, |
|
| 1185 |
+ child_input_queues=child_input_queues, |
|
| 1186 |
+ ready_wait=ready_wait, |
|
| 1187 |
+ intermediate_configs=intermediate_configs, |
|
| 1188 |
+ intermediate_results=intermediate_results, |
|
| 1189 |
+ processes=processes, |
|
| 1190 |
+ processes_pending=processes_pending, |
|
| 1191 |
+ block=True, |
|
| 1192 |
+ ) |
|
| 1193 |
+ except Exception as exc: # pragma: no cover |
|
| 1194 |
+ for i, q in enumerate(child_input_queues): |
|
| 1195 |
+ if q: |
|
| 1196 |
+ q.put(IPCMessage(i, "exception", exc)) |
|
| 1197 |
+ for p in processes_pending: |
|
| 1198 |
+ p.join(timeout=timeout) |
|
| 1199 |
+ raise |
|
| 1200 |
+ finally: |
|
| 1201 |
+ try: |
|
| 1202 |
+ while True: |
|
| 1203 |
+ try: |
|
| 1204 |
+ self.mainloop( |
|
| 1205 |
+ timeout=timeout, |
|
| 1206 |
+ child_output_queue=child_output_queue, |
|
| 1207 |
+ child_input_queues=child_input_queues, |
|
| 1208 |
+ ready_wait=ready_wait, |
|
| 1209 |
+ intermediate_configs=intermediate_configs, |
|
| 1210 |
+ intermediate_results=intermediate_results, |
|
| 1211 |
+ processes=processes, |
|
| 1212 |
+ processes_pending=processes_pending, |
|
| 1213 |
+ block=False, |
|
| 1214 |
+ ) |
|
| 1215 |
+ except queue.Empty: |
|
| 1216 |
+ break |
|
| 1217 |
+ finally: |
|
| 1218 |
+ # The subprocesses have this |
|
| 1219 |
+ # `tests.machinery.pytest.isolated_vault_config` directory as their |
|
| 1220 |
+ # startup and working directory, so systems like |
|
| 1221 |
+ # coverage tracking write their data files to this |
|
| 1222 |
+ # directory. We need to manually move them back to |
|
| 1223 |
+ # the starting working directory if they are to |
|
| 1224 |
+ # survive this test. |
|
| 1225 |
+ for coverage_file in pathlib.Path.cwd().glob( |
|
| 1226 |
+ ".coverage.*" |
|
| 1227 |
+ ): |
|
| 1228 |
+ shutil.move(coverage_file, orig_cwd) |
|
| 1229 |
+ hypothesis.note( |
|
| 1230 |
+ f"# {true_results = }, {intermediate_results = }, "
|
|
| 1231 |
+ f"identical = {true_results == intermediate_results}"
|
|
| 1232 |
+ ) |
|
| 1233 |
+ hypothesis.note( |
|
| 1234 |
+ f"# {true_configs = }, {intermediate_configs = }, "
|
|
| 1235 |
+ f"identical = {true_configs == intermediate_configs}"
|
|
| 1236 |
+ ) |
|
| 1237 |
+ assert intermediate_results == true_results |
|
| 1238 |
+ assert intermediate_configs == true_configs |
|
| 1239 |
+ |
|
| 1240 |
+ @staticmethod |
|
| 1241 |
+ def mainloop( |
|
| 1242 |
+ *, |
|
| 1243 |
+ timeout: int, |
|
| 1244 |
+ child_output_queue: multiprocessing.Queue[ |
|
| 1245 |
+ FakeConfigurationMutexStateMachine.IPCMessage |
|
| 1246 |
+ ], |
|
| 1247 |
+ child_input_queues: list[ |
|
| 1248 |
+ multiprocessing.Queue[ |
|
| 1249 |
+ FakeConfigurationMutexStateMachine.IPCMessage |
|
| 1250 |
+ ] |
|
| 1251 |
+ | None |
|
| 1252 |
+ ], |
|
| 1253 |
+ ready_wait: set[int], |
|
| 1254 |
+ intermediate_configs: dict[int, _types.VaultConfig], |
|
| 1255 |
+ intermediate_results: dict[int, tuple[bool, str | None, str | None]], |
|
| 1256 |
+ processes: list[multiprocessing.process.BaseProcess], |
|
| 1257 |
+ processes_pending: set[multiprocessing.process.BaseProcess], |
|
| 1258 |
+ block: bool = True, |
|
| 1259 |
+ ) -> None: |
|
| 1260 |
+ IPCMessage: TypeAlias = FakeConfigurationMutexStateMachine.IPCMessage |
|
| 1261 |
+ msg = child_output_queue.get(block=block, timeout=timeout) |
|
| 1262 |
+ # TODO(the-13th-letter): Rewrite using structural pattern |
|
| 1263 |
+ # matching. |
|
| 1264 |
+ # https://the13thletter.info/derivepassphrase/latest/pycompatibility/#after-eol-py3.9 |
|
| 1265 |
+ if ( # pragma: no cover |
|
| 1266 |
+ isinstance(msg, IPCMessage) |
|
| 1267 |
+ and msg[1] == "exception" |
|
| 1268 |
+ and isinstance(msg[2], Exception) |
|
| 1269 |
+ ): |
|
| 1270 |
+ e = msg[2] |
|
| 1271 |
+ raise e |
|
| 1272 |
+ if isinstance(msg, IPCMessage) and msg[1] == "ready": |
|
| 1273 |
+ n = msg[0] |
|
| 1274 |
+ ready_wait.remove(n) |
|
| 1275 |
+ if not ready_wait: |
|
| 1276 |
+ assert child_input_queues |
|
| 1277 |
+ assert child_input_queues[0] |
|
| 1278 |
+ child_input_queues[0].put( |
|
| 1279 |
+ IPCMessage(0, "go", None), |
|
| 1280 |
+ block=True, |
|
| 1281 |
+ timeout=timeout, |
|
| 1282 |
+ ) |
|
| 1283 |
+ elif isinstance(msg, IPCMessage) and msg[1] == "config": |
|
| 1284 |
+ n = msg[0] |
|
| 1285 |
+ config = msg[2] |
|
| 1286 |
+ intermediate_configs[n] = cast("_types.VaultConfig", config)
|
|
| 1287 |
+ elif isinstance(msg, IPCMessage) and msg[1] == "result": |
|
| 1288 |
+ n = msg[0] |
|
| 1289 |
+ result_ = msg[2] |
|
| 1290 |
+ result_tuple: tuple[bool, str | None, str | None] = cast( |
|
| 1291 |
+ "tuple[bool, str | None, str | None]", result_ |
|
| 1292 |
+ ) |
|
| 1293 |
+ intermediate_results[n] = result_tuple |
|
| 1294 |
+ child_input_queues[n] = None |
|
| 1295 |
+ p = processes[n] |
|
| 1296 |
+ p.join(timeout=timeout) |
|
| 1297 |
+ assert not p.is_alive() |
|
| 1298 |
+ processes_pending.remove(p) |
|
| 1299 |
+ assert result_tuple[0], ( |
|
| 1300 |
+ f"action #{n} exited with an error: {result_tuple!r}"
|
|
| 1301 |
+ ) |
|
| 1302 |
+ if n + 1 < len(processes): |
|
| 1303 |
+ next_child_input_queue = child_input_queues[n + 1] |
|
| 1304 |
+ assert next_child_input_queue |
|
| 1305 |
+ next_child_input_queue.put( |
|
| 1306 |
+ IPCMessage(n + 1, "go", None), |
|
| 1307 |
+ block=True, |
|
| 1308 |
+ timeout=timeout, |
|
| 1309 |
+ ) |
|
| 1310 |
+ else: |
|
| 1311 |
+ raise AssertionError() |
|
| 1312 |
+ |
|
| 1313 |
+ |
|
| 1314 |
+TestFakedConfigurationMutex = ( |
|
| 1315 |
+ tests.machinery.pytest.skip_if_no_multiprocessing_support( |
|
| 1316 |
+ FakeConfigurationMutexStateMachine.TestCase |
|
| 1317 |
+ ) |
|
| 1318 |
+) |
|
| 1319 |
+"""The [`unittest.TestCase`][] class that will actually be run.""" |
| ... | ... |
@@ -23,7 +23,7 @@ import click |
| 23 | 23 |
import click.testing |
| 24 | 24 |
import hypothesis |
| 25 | 25 |
import pytest |
| 26 |
-from hypothesis import stateful, strategies |
|
| 26 |
+from hypothesis import strategies |
|
| 27 | 27 |
|
| 28 | 28 |
import tests.data |
| 29 | 29 |
import tests.data.callables |
| ... | ... |
@@ -1694,231 +1694,3 @@ class TestAgentInteraction: |
| 1694 | 1694 |
match=r"Malformed response|does not match request", |
| 1695 | 1695 |
): |
| 1696 | 1696 |
client.query_extensions() |
| 1697 |
- |
|
| 1698 |
- |
|
| 1699 |
-@strategies.composite |
|
| 1700 |
-def draw_alias_chain( |
|
| 1701 |
- draw: strategies.DrawFn, |
|
| 1702 |
- *, |
|
| 1703 |
- known_keys_strategy: strategies.SearchStrategy[str], |
|
| 1704 |
- new_keys_strategy: strategies.SearchStrategy[str], |
|
| 1705 |
- chain_size: strategies.SearchStrategy[int] = strategies.integers( # noqa: B008 |
|
| 1706 |
- min_value=1, |
|
| 1707 |
- max_value=5, |
|
| 1708 |
- ), |
|
| 1709 |
- existing: bool = False, |
|
| 1710 |
-) -> tuple[str, ...]: |
|
| 1711 |
- """Draw names for alias chains in the SSH agent socket provider registry. |
|
| 1712 |
- |
|
| 1713 |
- Depending on arguments, draw a set of names from the new keys bundle |
|
| 1714 |
- that do not yet exist in the registry, to insert as a new alias |
|
| 1715 |
- chain. Alternatively, draw a non-alias name from the known keys |
|
| 1716 |
- bundle, then draw other names that either don't exist yet in the |
|
| 1717 |
- registry, or that alias the first name directly or indirectly. The |
|
| 1718 |
- chain length, and whether to target existing registry entries or |
|
| 1719 |
- not, may be set statically, or may be drawn from a respective |
|
| 1720 |
- strategy. |
|
| 1721 |
- |
|
| 1722 |
- Args: |
|
| 1723 |
- draw: |
|
| 1724 |
- The `hypothesis` draw function. |
|
| 1725 |
- chain_size: |
|
| 1726 |
- A strategy for determining the correct alias chain length. |
|
| 1727 |
- Must not yield any integers less than 1. |
|
| 1728 |
- existing: |
|
| 1729 |
- If true, target an existing registry entry in the alias |
|
| 1730 |
- chain, and permit rewriting existing aliases of that same |
|
| 1731 |
- entry to the new alias. Otherwise, draw only new names. |
|
| 1732 |
- known_keys_strategy: |
|
| 1733 |
- A strategy for generating provider registry keys already |
|
| 1734 |
- contained in the registry. Typically, this is |
|
| 1735 |
- a [Bundle][hypothesis.stateful.Bundle]. |
|
| 1736 |
- new_keys_strategy: |
|
| 1737 |
- A strategy for generating provider registry keys not yet |
|
| 1738 |
- contained in the registry with high probability. Typically, |
|
| 1739 |
- this is a [consuming][hypothesis.stateful.consumes] |
|
| 1740 |
- [Bundle][hypothesis.stateful.Bundle]. |
|
| 1741 |
- |
|
| 1742 |
- Returns: |
|
| 1743 |
- A tuple of names forming an alias chain, each entry pointing to |
|
| 1744 |
- or intending to point to the previous entry in the tuple. |
|
| 1745 |
- |
|
| 1746 |
- """ |
|
| 1747 |
- registry = socketprovider.SocketProvider.registry |
|
| 1748 |
- |
|
| 1749 |
- def not_an_alias(key: str) -> bool: |
|
| 1750 |
- return key in registry and not isinstance(registry[key], str) |
|
| 1751 |
- |
|
| 1752 |
- def is_indirect_alias_of( |
|
| 1753 |
- key: str, target: str |
|
| 1754 |
- ) -> bool: # pragma: no cover |
|
| 1755 |
- if key == target: |
|
| 1756 |
- return False # not an alias |
|
| 1757 |
- seen = set() # loop detection |
|
| 1758 |
- while key not in seen: |
|
| 1759 |
- seen.add(key) |
|
| 1760 |
- if key not in registry: |
|
| 1761 |
- return False |
|
| 1762 |
- if not isinstance(registry[key], str): |
|
| 1763 |
- return False |
|
| 1764 |
- if key == target: |
|
| 1765 |
- return True |
|
| 1766 |
- tmp = registry[key] |
|
| 1767 |
- assert isinstance(tmp, str) |
|
| 1768 |
- key = tmp |
|
| 1769 |
- return False # loop |
|
| 1770 |
- |
|
| 1771 |
- err_msg_chain_size = "Chain sizes must always be 1 or larger." |
|
| 1772 |
- |
|
| 1773 |
- size = draw(chain_size) |
|
| 1774 |
- if size < 1: # pragma: no cover |
|
| 1775 |
- raise ValueError(err_msg_chain_size) |
|
| 1776 |
- names: list[str] = [] |
|
| 1777 |
- base: str | None = None |
|
| 1778 |
- if existing: |
|
| 1779 |
- names.append(draw(known_keys_strategy.filter(not_an_alias))) |
|
| 1780 |
- base = names[0] |
|
| 1781 |
- size -= 1 |
|
| 1782 |
- new_key_strategy = new_keys_strategy.filter( |
|
| 1783 |
- lambda key: key not in registry |
|
| 1784 |
- ) |
|
| 1785 |
- old_key_strategy = known_keys_strategy.filter( |
|
| 1786 |
- lambda key: is_indirect_alias_of(key, target=base) |
|
| 1787 |
- ) |
|
| 1788 |
- list_strategy_source = strategies.one_of( |
|
| 1789 |
- new_key_strategy, old_key_strategy |
|
| 1790 |
- ) |
|
| 1791 |
- else: |
|
| 1792 |
- list_strategy_source = new_keys_strategy.filter( |
|
| 1793 |
- lambda key: key not in registry |
|
| 1794 |
- ) |
|
| 1795 |
- list_strategy = strategies.lists( |
|
| 1796 |
- list_strategy_source.filter(lambda candidate: candidate != base), |
|
| 1797 |
- min_size=size, |
|
| 1798 |
- max_size=size, |
|
| 1799 |
- unique=True, |
|
| 1800 |
- ) |
|
| 1801 |
- names.extend(draw(list_strategy)) |
|
| 1802 |
- return tuple(names) |
|
| 1803 |
- |
|
| 1804 |
- |
|
| 1805 |
-class SSHAgentSocketProviderRegistryStateMachine( |
|
| 1806 |
- stateful.RuleBasedStateMachine |
|
| 1807 |
-): |
|
| 1808 |
- """A state machine for the SSH agent socket provider registry. |
|
| 1809 |
- |
|
| 1810 |
- Record possible changes to the socket provider registry, keeping track |
|
| 1811 |
- of true entries, aliases, and reservations. |
|
| 1812 |
- |
|
| 1813 |
- """ |
|
| 1814 |
- |
|
| 1815 |
- def __init__(self) -> None: |
|
| 1816 |
- """Initialize self, set up context managers and enter them.""" |
|
| 1817 |
- super().__init__() |
|
| 1818 |
- self.exit_stack = contextlib.ExitStack().__enter__() |
|
| 1819 |
- self.monkeypatch = self.exit_stack.enter_context( |
|
| 1820 |
- pytest.MonkeyPatch.context() |
|
| 1821 |
- ) |
|
| 1822 |
- self.orig_registry = socketprovider.SocketProvider.registry |
|
| 1823 |
- self.registry: dict[ |
|
| 1824 |
- str, _types.SSHAgentSocketProvider | str | None |
|
| 1825 |
- ] = {
|
|
| 1826 |
- "posix": self.orig_registry["posix"], |
|
| 1827 |
- "the_annoying_os": self.orig_registry["the_annoying_os"], |
|
| 1828 |
- "native": self.orig_registry["native"], |
|
| 1829 |
- "unix_domain": "posix", |
|
| 1830 |
- "the_annoying_os_named_pipe": "the_annoying_os", |
|
| 1831 |
- } |
|
| 1832 |
- self.monkeypatch.setattr( |
|
| 1833 |
- socketprovider.SocketProvider, "registry", self.registry |
|
| 1834 |
- ) |
|
| 1835 |
- self.model: dict[str, _types.SSHAgentSocketProvider | None] = {}
|
|
| 1836 |
- |
|
| 1837 |
- known_keys: stateful.Bundle[str] = stateful.Bundle("known_keys")
|
|
| 1838 |
- """""" |
|
| 1839 |
- new_keys: stateful.Bundle[str] = stateful.Bundle("new_keys")
|
|
| 1840 |
- """""" |
|
| 1841 |
- |
|
| 1842 |
- def sample_provider(self) -> _types.SSHAgentSocket: |
|
| 1843 |
- raise AssertionError |
|
| 1844 |
- |
|
| 1845 |
- @stateful.initialize( |
|
| 1846 |
- target=known_keys, |
|
| 1847 |
- ) |
|
| 1848 |
- def get_registry_keys(self) -> stateful.MultipleResults[str]: |
|
| 1849 |
- """Read the standard keys from the registry.""" |
|
| 1850 |
- self.model.update({
|
|
| 1851 |
- k: socketprovider.SocketProvider.lookup(k) for k in self.registry |
|
| 1852 |
- }) |
|
| 1853 |
- return stateful.multiple(*self.registry.keys()) |
|
| 1854 |
- |
|
| 1855 |
- @stateful.rule( |
|
| 1856 |
- target=new_keys, |
|
| 1857 |
- k=strategies.text("abcdefghijklmnopqrstuvwxyz0123456789_").filter(
|
|
| 1858 |
- lambda s: s not in socketprovider.SocketProvider.registry |
|
| 1859 |
- ), |
|
| 1860 |
- ) |
|
| 1861 |
- def new_key(self, k: str) -> str: |
|
| 1862 |
- return k |
|
| 1863 |
- |
|
| 1864 |
- @stateful.invariant() |
|
| 1865 |
- def check_consistency(self) -> None: |
|
| 1866 |
- lookup = socketprovider.SocketProvider.lookup |
|
| 1867 |
- assert self.registry.keys() == self.model.keys() |
|
| 1868 |
- for k in self.model: |
|
| 1869 |
- resolved = lookup(k) |
|
| 1870 |
- modelled = self.model[k] |
|
| 1871 |
- step1 = self.registry[k] |
|
| 1872 |
- manually = lookup(step1) if isinstance(step1, str) else step1 |
|
| 1873 |
- assert resolved == modelled |
|
| 1874 |
- assert resolved == manually |
|
| 1875 |
- |
|
| 1876 |
- @stateful.rule( |
|
| 1877 |
- target=known_keys, |
|
| 1878 |
- chain=draw_alias_chain( |
|
| 1879 |
- known_keys_strategy=known_keys, |
|
| 1880 |
- new_keys_strategy=stateful.consumes(new_keys), |
|
| 1881 |
- existing=True, |
|
| 1882 |
- ), |
|
| 1883 |
- ) |
|
| 1884 |
- def alias_existing( |
|
| 1885 |
- self, chain: tuple[str, ...] |
|
| 1886 |
- ) -> stateful.MultipleResults[str]: |
|
| 1887 |
- try: |
|
| 1888 |
- provider = socketprovider.SocketProvider.resolve(chain[0]) |
|
| 1889 |
- except NotImplementedError: # pragma: no cover [failsafe] |
|
| 1890 |
- provider = self.sample_provider |
|
| 1891 |
- assert ( |
|
| 1892 |
- socketprovider.SocketProvider.register(*chain)(provider) |
|
| 1893 |
- == provider |
|
| 1894 |
- ) |
|
| 1895 |
- for k in chain: |
|
| 1896 |
- self.model[k] = provider |
|
| 1897 |
- return stateful.multiple(*chain[1:]) |
|
| 1898 |
- |
|
| 1899 |
- @stateful.rule( |
|
| 1900 |
- target=known_keys, |
|
| 1901 |
- chain=draw_alias_chain( |
|
| 1902 |
- known_keys_strategy=known_keys, |
|
| 1903 |
- new_keys_strategy=stateful.consumes(new_keys), |
|
| 1904 |
- existing=False, |
|
| 1905 |
- ), |
|
| 1906 |
- ) |
|
| 1907 |
- def alias_new(self, chain: list[str]) -> stateful.MultipleResults[str]: |
|
| 1908 |
- provider = self.sample_provider |
|
| 1909 |
- assert ( |
|
| 1910 |
- socketprovider.SocketProvider.register(*chain)(provider) |
|
| 1911 |
- == provider |
|
| 1912 |
- ) |
|
| 1913 |
- for k in chain: |
|
| 1914 |
- self.model[k] = provider |
|
| 1915 |
- return stateful.multiple(*chain) |
|
| 1916 |
- |
|
| 1917 |
- def teardown(self) -> None: |
|
| 1918 |
- """Upon teardown, exit all contexts entered in `__init__`.""" |
|
| 1919 |
- self.exit_stack.close() |
|
| 1920 |
- |
|
| 1921 |
- |
|
| 1922 |
-TestSSHAgentSocketProviderRegistry = ( |
|
| 1923 |
- SSHAgentSocketProviderRegistryStateMachine.TestCase |
|
| 1924 |
-) |
| ... | ... |
@@ -0,0 +1,250 @@ |
| 1 |
+# SPDX-FileCopyrightText: 2025 Marco Ricci <software@the13thletter.info> |
|
| 2 |
+# |
|
| 3 |
+# SPDX-License-Identifier: Zlib |
|
| 4 |
+ |
|
| 5 |
+"""Test OpenSSH key loading and signing.""" |
|
| 6 |
+ |
|
| 7 |
+from __future__ import annotations |
|
| 8 |
+ |
|
| 9 |
+import contextlib |
|
| 10 |
+from typing import TYPE_CHECKING |
|
| 11 |
+ |
|
| 12 |
+import pytest |
|
| 13 |
+from hypothesis import stateful, strategies |
|
| 14 |
+ |
|
| 15 |
+import tests.machinery.pytest |
|
| 16 |
+from derivepassphrase.ssh_agent import socketprovider |
|
| 17 |
+ |
|
| 18 |
+if TYPE_CHECKING: |
|
| 19 |
+ from derivepassphrase import _types |
|
| 20 |
+ |
|
| 21 |
+# All tests in this module are heavy-duty tests. |
|
| 22 |
+pytestmark = [tests.machinery.pytest.heavy_duty] |
|
| 23 |
+ |
|
| 24 |
+ |
|
| 25 |
+@strategies.composite |
|
| 26 |
+def draw_alias_chain( |
|
| 27 |
+ draw: strategies.DrawFn, |
|
| 28 |
+ *, |
|
| 29 |
+ known_keys_strategy: strategies.SearchStrategy[str], |
|
| 30 |
+ new_keys_strategy: strategies.SearchStrategy[str], |
|
| 31 |
+ chain_size: strategies.SearchStrategy[int] = strategies.integers( # noqa: B008 |
|
| 32 |
+ min_value=1, |
|
| 33 |
+ max_value=5, |
|
| 34 |
+ ), |
|
| 35 |
+ existing: bool = False, |
|
| 36 |
+) -> tuple[str, ...]: |
|
| 37 |
+ """Draw names for alias chains in the SSH agent socket provider registry. |
|
| 38 |
+ |
|
| 39 |
+ Depending on arguments, draw a set of names from the new keys bundle |
|
| 40 |
+ that do not yet exist in the registry, to insert as a new alias |
|
| 41 |
+ chain. Alternatively, draw a non-alias name from the known keys |
|
| 42 |
+ bundle, then draw other names that either don't exist yet in the |
|
| 43 |
+ registry, or that alias the first name directly or indirectly. The |
|
| 44 |
+ chain length, and whether to target existing registry entries or |
|
| 45 |
+ not, may be set statically, or may be drawn from a respective |
|
| 46 |
+ strategy. |
|
| 47 |
+ |
|
| 48 |
+ Args: |
|
| 49 |
+ draw: |
|
| 50 |
+ The `hypothesis` draw function. |
|
| 51 |
+ chain_size: |
|
| 52 |
+ A strategy for determining the correct alias chain length. |
|
| 53 |
+ Must not yield any integers less than 1. |
|
| 54 |
+ existing: |
|
| 55 |
+ If true, target an existing registry entry in the alias |
|
| 56 |
+ chain, and permit rewriting existing aliases of that same |
|
| 57 |
+ entry to the new alias. Otherwise, draw only new names. |
|
| 58 |
+ known_keys_strategy: |
|
| 59 |
+ A strategy for generating provider registry keys already |
|
| 60 |
+ contained in the registry. Typically, this is |
|
| 61 |
+ a [Bundle][hypothesis.stateful.Bundle]. |
|
| 62 |
+ new_keys_strategy: |
|
| 63 |
+ A strategy for generating provider registry keys not yet |
|
| 64 |
+ contained in the registry with high probability. Typically, |
|
| 65 |
+ this is a [consuming][hypothesis.stateful.consumes] |
|
| 66 |
+ [Bundle][hypothesis.stateful.Bundle]. |
|
| 67 |
+ |
|
| 68 |
+ Returns: |
|
| 69 |
+ A tuple of names forming an alias chain, each entry pointing to |
|
| 70 |
+ or intending to point to the previous entry in the tuple. |
|
| 71 |
+ |
|
| 72 |
+ """ |
|
| 73 |
+ registry = socketprovider.SocketProvider.registry |
|
| 74 |
+ |
|
| 75 |
+ def not_an_alias(key: str) -> bool: |
|
| 76 |
+ return key in registry and not isinstance(registry[key], str) |
|
| 77 |
+ |
|
| 78 |
+ def is_indirect_alias_of( |
|
| 79 |
+ key: str, target: str |
|
| 80 |
+ ) -> bool: # pragma: no cover |
|
| 81 |
+ if key == target: |
|
| 82 |
+ return False # not an alias |
|
| 83 |
+ seen = set() # loop detection |
|
| 84 |
+ while key not in seen: |
|
| 85 |
+ seen.add(key) |
|
| 86 |
+ if key not in registry: |
|
| 87 |
+ return False |
|
| 88 |
+ if not isinstance(registry[key], str): |
|
| 89 |
+ return False |
|
| 90 |
+ if key == target: |
|
| 91 |
+ return True |
|
| 92 |
+ tmp = registry[key] |
|
| 93 |
+ assert isinstance(tmp, str) |
|
| 94 |
+ key = tmp |
|
| 95 |
+ return False # loop |
|
| 96 |
+ |
|
| 97 |
+ err_msg_chain_size = "Chain sizes must always be 1 or larger." |
|
| 98 |
+ |
|
| 99 |
+ size = draw(chain_size) |
|
| 100 |
+ if size < 1: # pragma: no cover |
|
| 101 |
+ raise ValueError(err_msg_chain_size) |
|
| 102 |
+ names: list[str] = [] |
|
| 103 |
+ base: str | None = None |
|
| 104 |
+ if existing: |
|
| 105 |
+ names.append(draw(known_keys_strategy.filter(not_an_alias))) |
|
| 106 |
+ base = names[0] |
|
| 107 |
+ size -= 1 |
|
| 108 |
+ new_key_strategy = new_keys_strategy.filter( |
|
| 109 |
+ lambda key: key not in registry |
|
| 110 |
+ ) |
|
| 111 |
+ old_key_strategy = known_keys_strategy.filter( |
|
| 112 |
+ lambda key: is_indirect_alias_of(key, target=base) |
|
| 113 |
+ ) |
|
| 114 |
+ list_strategy_source = strategies.one_of( |
|
| 115 |
+ new_key_strategy, old_key_strategy |
|
| 116 |
+ ) |
|
| 117 |
+ else: |
|
| 118 |
+ list_strategy_source = new_keys_strategy.filter( |
|
| 119 |
+ lambda key: key not in registry |
|
| 120 |
+ ) |
|
| 121 |
+ list_strategy = strategies.lists( |
|
| 122 |
+ list_strategy_source.filter(lambda candidate: candidate != base), |
|
| 123 |
+ min_size=size, |
|
| 124 |
+ max_size=size, |
|
| 125 |
+ unique=True, |
|
| 126 |
+ ) |
|
| 127 |
+ names.extend(draw(list_strategy)) |
|
| 128 |
+ return tuple(names) |
|
| 129 |
+ |
|
| 130 |
+ |
|
| 131 |
+class SSHAgentSocketProviderRegistryStateMachine( |
|
| 132 |
+ stateful.RuleBasedStateMachine |
|
| 133 |
+): |
|
| 134 |
+ """A state machine for the SSH agent socket provider registry. |
|
| 135 |
+ |
|
| 136 |
+ Record possible changes to the socket provider registry, keeping track |
|
| 137 |
+ of true entries, aliases, and reservations. |
|
| 138 |
+ |
|
| 139 |
+ """ |
|
| 140 |
+ |
|
| 141 |
+ def __init__(self) -> None: |
|
| 142 |
+ """Initialize self, set up context managers and enter them.""" |
|
| 143 |
+ super().__init__() |
|
| 144 |
+ self.exit_stack = contextlib.ExitStack().__enter__() |
|
| 145 |
+ self.monkeypatch = self.exit_stack.enter_context( |
|
| 146 |
+ pytest.MonkeyPatch.context() |
|
| 147 |
+ ) |
|
| 148 |
+ self.orig_registry = socketprovider.SocketProvider.registry |
|
| 149 |
+ self.registry: dict[ |
|
| 150 |
+ str, _types.SSHAgentSocketProvider | str | None |
|
| 151 |
+ ] = {
|
|
| 152 |
+ "posix": self.orig_registry["posix"], |
|
| 153 |
+ "the_annoying_os": self.orig_registry["the_annoying_os"], |
|
| 154 |
+ "native": self.orig_registry["native"], |
|
| 155 |
+ "unix_domain": "posix", |
|
| 156 |
+ "the_annoying_os_named_pipe": "the_annoying_os", |
|
| 157 |
+ } |
|
| 158 |
+ self.monkeypatch.setattr( |
|
| 159 |
+ socketprovider.SocketProvider, "registry", self.registry |
|
| 160 |
+ ) |
|
| 161 |
+ self.model: dict[str, _types.SSHAgentSocketProvider | None] = {}
|
|
| 162 |
+ |
|
| 163 |
+ known_keys: stateful.Bundle[str] = stateful.Bundle("known_keys")
|
|
| 164 |
+ """""" |
|
| 165 |
+ new_keys: stateful.Bundle[str] = stateful.Bundle("new_keys")
|
|
| 166 |
+ """""" |
|
| 167 |
+ |
|
| 168 |
+ def sample_provider(self) -> _types.SSHAgentSocket: |
|
| 169 |
+ raise AssertionError |
|
| 170 |
+ |
|
| 171 |
+ @stateful.initialize( |
|
| 172 |
+ target=known_keys, |
|
| 173 |
+ ) |
|
| 174 |
+ def get_registry_keys(self) -> stateful.MultipleResults[str]: |
|
| 175 |
+ """Read the standard keys from the registry.""" |
|
| 176 |
+ self.model.update({
|
|
| 177 |
+ k: socketprovider.SocketProvider.lookup(k) for k in self.registry |
|
| 178 |
+ }) |
|
| 179 |
+ return stateful.multiple(*self.registry.keys()) |
|
| 180 |
+ |
|
| 181 |
+ @stateful.rule( |
|
| 182 |
+ target=new_keys, |
|
| 183 |
+ k=strategies.text("abcdefghijklmnopqrstuvwxyz0123456789_").filter(
|
|
| 184 |
+ lambda s: s not in socketprovider.SocketProvider.registry |
|
| 185 |
+ ), |
|
| 186 |
+ ) |
|
| 187 |
+ def new_key(self, k: str) -> str: |
|
| 188 |
+ return k |
|
| 189 |
+ |
|
| 190 |
+ @stateful.invariant() |
|
| 191 |
+ def check_consistency(self) -> None: |
|
| 192 |
+ lookup = socketprovider.SocketProvider.lookup |
|
| 193 |
+ assert self.registry.keys() == self.model.keys() |
|
| 194 |
+ for k in self.model: |
|
| 195 |
+ resolved = lookup(k) |
|
| 196 |
+ modelled = self.model[k] |
|
| 197 |
+ step1 = self.registry[k] |
|
| 198 |
+ manually = lookup(step1) if isinstance(step1, str) else step1 |
|
| 199 |
+ assert resolved == modelled |
|
| 200 |
+ assert resolved == manually |
|
| 201 |
+ |
|
| 202 |
+ @stateful.rule( |
|
| 203 |
+ target=known_keys, |
|
| 204 |
+ chain=draw_alias_chain( |
|
| 205 |
+ known_keys_strategy=known_keys, |
|
| 206 |
+ new_keys_strategy=stateful.consumes(new_keys), |
|
| 207 |
+ existing=True, |
|
| 208 |
+ ), |
|
| 209 |
+ ) |
|
| 210 |
+ def alias_existing( |
|
| 211 |
+ self, chain: tuple[str, ...] |
|
| 212 |
+ ) -> stateful.MultipleResults[str]: |
|
| 213 |
+ try: |
|
| 214 |
+ provider = socketprovider.SocketProvider.resolve(chain[0]) |
|
| 215 |
+ except NotImplementedError: # pragma: no cover [failsafe] |
|
| 216 |
+ provider = self.sample_provider |
|
| 217 |
+ assert ( |
|
| 218 |
+ socketprovider.SocketProvider.register(*chain)(provider) |
|
| 219 |
+ == provider |
|
| 220 |
+ ) |
|
| 221 |
+ for k in chain: |
|
| 222 |
+ self.model[k] = provider |
|
| 223 |
+ return stateful.multiple(*chain[1:]) |
|
| 224 |
+ |
|
| 225 |
+ @stateful.rule( |
|
| 226 |
+ target=known_keys, |
|
| 227 |
+ chain=draw_alias_chain( |
|
| 228 |
+ known_keys_strategy=known_keys, |
|
| 229 |
+ new_keys_strategy=stateful.consumes(new_keys), |
|
| 230 |
+ existing=False, |
|
| 231 |
+ ), |
|
| 232 |
+ ) |
|
| 233 |
+ def alias_new(self, chain: list[str]) -> stateful.MultipleResults[str]: |
|
| 234 |
+ provider = self.sample_provider |
|
| 235 |
+ assert ( |
|
| 236 |
+ socketprovider.SocketProvider.register(*chain)(provider) |
|
| 237 |
+ == provider |
|
| 238 |
+ ) |
|
| 239 |
+ for k in chain: |
|
| 240 |
+ self.model[k] = provider |
|
| 241 |
+ return stateful.multiple(*chain) |
|
| 242 |
+ |
|
| 243 |
+ def teardown(self) -> None: |
|
| 244 |
+ """Upon teardown, exit all contexts entered in `__init__`.""" |
|
| 245 |
+ self.exit_stack.close() |
|
| 246 |
+ |
|
| 247 |
+ |
|
| 248 |
+TestSSHAgentSocketProviderRegistry = ( |
|
| 249 |
+ SSHAgentSocketProviderRegistryStateMachine.TestCase |
|
| 250 |
+) |
| ... | ... |
@@ -5,13 +5,11 @@ |
| 5 | 5 |
from __future__ import annotations |
| 6 | 6 |
|
| 7 | 7 |
import copy |
| 8 |
-import math |
|
| 9 | 8 |
import types |
| 10 | 9 |
|
| 11 | 10 |
import hypothesis |
| 12 | 11 |
import pytest |
| 13 | 12 |
from hypothesis import strategies |
| 14 |
-from typing_extensions import Any |
|
| 15 | 13 |
|
| 16 | 14 |
import tests.data |
| 17 | 15 |
import tests.data.callables |
| ... | ... |
@@ -19,56 +17,6 @@ import tests.machinery.hypothesis |
| 19 | 17 |
from derivepassphrase import _types |
| 20 | 18 |
|
| 21 | 19 |
|
| 22 |
-@strategies.composite |
|
| 23 |
-def js_atoms_strategy( |
|
| 24 |
- draw: strategies.DrawFn, |
|
| 25 |
-) -> int | float | str | bytes | bool | None: |
|
| 26 |
- """Yield a JS atom.""" |
|
| 27 |
- return draw( |
|
| 28 |
- strategies.one_of( |
|
| 29 |
- strategies.integers(), |
|
| 30 |
- strategies.floats(allow_nan=False, allow_infinity=False), |
|
| 31 |
- strategies.text(max_size=100), |
|
| 32 |
- strategies.binary(max_size=100), |
|
| 33 |
- strategies.booleans(), |
|
| 34 |
- strategies.none(), |
|
| 35 |
- ), |
|
| 36 |
- ) |
|
| 37 |
- |
|
| 38 |
- |
|
| 39 |
-@strategies.composite |
|
| 40 |
-def js_nested_strategy(draw: strategies.DrawFn) -> Any: |
|
| 41 |
- """Yield an arbitrary and perhaps nested JS value.""" |
|
| 42 |
- return draw( |
|
| 43 |
- strategies.one_of( |
|
| 44 |
- js_atoms_strategy(), |
|
| 45 |
- strategies.builds(tuple), |
|
| 46 |
- strategies.builds(list), |
|
| 47 |
- strategies.builds(dict), |
|
| 48 |
- strategies.builds(set), |
|
| 49 |
- strategies.builds(frozenset), |
|
| 50 |
- strategies.recursive( |
|
| 51 |
- js_atoms_strategy(), |
|
| 52 |
- lambda s: strategies.one_of( |
|
| 53 |
- strategies.frozensets(s, max_size=100), |
|
| 54 |
- strategies.builds( |
|
| 55 |
- tuple, strategies.frozensets(s, max_size=100) |
|
| 56 |
- ), |
|
| 57 |
- ), |
|
| 58 |
- max_leaves=8, |
|
| 59 |
- ), |
|
| 60 |
- strategies.recursive( |
|
| 61 |
- js_atoms_strategy(), |
|
| 62 |
- lambda s: strategies.one_of( |
|
| 63 |
- strategies.lists(s, max_size=100), |
|
| 64 |
- strategies.dictionaries(strategies.text(max_size=100), s), |
|
| 65 |
- ), |
|
| 66 |
- max_leaves=25, |
|
| 67 |
- ), |
|
| 68 |
- ), |
|
| 69 |
- ) |
|
| 70 |
- |
|
| 71 |
- |
|
| 72 | 20 |
class Parametrize(types.SimpleNamespace): |
| 73 | 21 |
VALID_VAULT_TEST_CONFIGS = pytest.mark.parametrize( |
| 74 | 22 |
"test_config", |
| ... | ... |
@@ -86,25 +34,6 @@ class Parametrize(types.SimpleNamespace): |
| 86 | 34 |
) |
| 87 | 35 |
|
| 88 | 36 |
|
| 89 |
-@hypothesis.given(value=js_nested_strategy()) |
|
| 90 |
-@hypothesis.example(float("nan"))
|
|
| 91 |
-def test_100_js_truthiness(value: Any) -> None: |
|
| 92 |
- """Determine the truthiness of a value according to JavaScript. |
|
| 93 |
- |
|
| 94 |
- Use hypothesis to generate test values. |
|
| 95 |
- |
|
| 96 |
- """ |
|
| 97 |
- expected = ( |
|
| 98 |
- value is not None # noqa: PLR1714 |
|
| 99 |
- and value != False # noqa: E712 |
|
| 100 |
- and value != 0 |
|
| 101 |
- and value != 0.0 |
|
| 102 |
- and value != "" |
|
| 103 |
- and not (isinstance(value, float) and math.isnan(value)) |
|
| 104 |
- ) |
|
| 105 |
- assert _types.js_truthiness(value) == expected |
|
| 106 |
- |
|
| 107 |
- |
|
| 108 | 37 |
@Parametrize.VALID_VAULT_TEST_CONFIGS |
| 109 | 38 |
def test_200_is_vault_config(test_config: tests.data.VaultTestConfig) -> None: |
| 110 | 39 |
"""Is this vault configuration recognized as valid/invalid? |
| ... | ... |
@@ -0,0 +1,86 @@ |
| 1 |
+# SPDX-FileCopyrightText: 2025 Marco Ricci <software@the13thletter.info> |
|
| 2 |
+# |
|
| 3 |
+# SPDX-License-Identifier: Zlib |
|
| 4 |
+ |
|
| 5 |
+from __future__ import annotations |
|
| 6 |
+ |
|
| 7 |
+import math |
|
| 8 |
+ |
|
| 9 |
+import hypothesis |
|
| 10 |
+from hypothesis import strategies |
|
| 11 |
+from typing_extensions import Any |
|
| 12 |
+ |
|
| 13 |
+import tests.machinery.pytest |
|
| 14 |
+from derivepassphrase import _types |
|
| 15 |
+ |
|
| 16 |
+# All tests in this module are heavy-duty tests. |
|
| 17 |
+pytestmark = [tests.machinery.pytest.heavy_duty] |
|
| 18 |
+ |
|
| 19 |
+ |
|
| 20 |
+@strategies.composite |
|
| 21 |
+def js_atoms_strategy( |
|
| 22 |
+ draw: strategies.DrawFn, |
|
| 23 |
+) -> int | float | str | bytes | bool | None: |
|
| 24 |
+ """Yield a JS atom.""" |
|
| 25 |
+ return draw( |
|
| 26 |
+ strategies.one_of( |
|
| 27 |
+ strategies.integers(), |
|
| 28 |
+ strategies.floats(allow_nan=False, allow_infinity=False), |
|
| 29 |
+ strategies.text(max_size=100), |
|
| 30 |
+ strategies.binary(max_size=100), |
|
| 31 |
+ strategies.booleans(), |
|
| 32 |
+ strategies.none(), |
|
| 33 |
+ ), |
|
| 34 |
+ ) |
|
| 35 |
+ |
|
| 36 |
+ |
|
| 37 |
+@strategies.composite |
|
| 38 |
+def js_nested_strategy(draw: strategies.DrawFn) -> Any: |
|
| 39 |
+ """Yield an arbitrary and perhaps nested JS value.""" |
|
| 40 |
+ return draw( |
|
| 41 |
+ strategies.one_of( |
|
| 42 |
+ js_atoms_strategy(), |
|
| 43 |
+ strategies.builds(tuple), |
|
| 44 |
+ strategies.builds(list), |
|
| 45 |
+ strategies.builds(dict), |
|
| 46 |
+ strategies.builds(set), |
|
| 47 |
+ strategies.builds(frozenset), |
|
| 48 |
+ strategies.recursive( |
|
| 49 |
+ js_atoms_strategy(), |
|
| 50 |
+ lambda s: strategies.one_of( |
|
| 51 |
+ strategies.frozensets(s, max_size=100), |
|
| 52 |
+ strategies.builds( |
|
| 53 |
+ tuple, strategies.frozensets(s, max_size=100) |
|
| 54 |
+ ), |
|
| 55 |
+ ), |
|
| 56 |
+ max_leaves=8, |
|
| 57 |
+ ), |
|
| 58 |
+ strategies.recursive( |
|
| 59 |
+ js_atoms_strategy(), |
|
| 60 |
+ lambda s: strategies.one_of( |
|
| 61 |
+ strategies.lists(s, max_size=100), |
|
| 62 |
+ strategies.dictionaries(strategies.text(max_size=100), s), |
|
| 63 |
+ ), |
|
| 64 |
+ max_leaves=25, |
|
| 65 |
+ ), |
|
| 66 |
+ ), |
|
| 67 |
+ ) |
|
| 68 |
+ |
|
| 69 |
+ |
|
| 70 |
+@hypothesis.given(value=js_nested_strategy()) |
|
| 71 |
+@hypothesis.example(float("nan"))
|
|
| 72 |
+def test_100_js_truthiness(value: Any) -> None: |
|
| 73 |
+ """Determine the truthiness of a value according to JavaScript. |
|
| 74 |
+ |
|
| 75 |
+ Use hypothesis to generate test values. |
|
| 76 |
+ |
|
| 77 |
+ """ |
|
| 78 |
+ expected = ( |
|
| 79 |
+ value is not None # noqa: PLR1714 |
|
| 80 |
+ and value != False # noqa: E712 |
|
| 81 |
+ and value != 0 |
|
| 82 |
+ and value != 0.0 |
|
| 83 |
+ and value != "" |
|
| 84 |
+ and not (isinstance(value, float) and math.isnan(value)) |
|
| 85 |
+ ) |
|
| 86 |
+ assert _types.js_truthiness(value) == expected |
|
| 0 | 87 |