diff --git a/.github/workflows/tests_role_ravendb_node.yml b/.github/workflows/tests_role_ravendb_node.yml index e680aad..8c86999 100644 --- a/.github/workflows/tests_role_ravendb_node.yml +++ b/.github/workflows/tests_role_ravendb_node.yml @@ -151,6 +151,15 @@ jobs: ansible-galaxy collection build . ansible-galaxy collection install ./ravendb-ravendb-*.tar.gz --force -p ./ + - name: Run modules scenarios + working-directory: roles/ravendb_node + run: molecule test -s plugins-unsecured + + # todo: enable once we put a license file in the repo + # - name: Run modules scenarios nodes + # working-directory: roles/ravendb_node + # run: molecule test -s plugins-unsecured-nodes + - name: Set up .NET environment variables run: | export DOTNET_ROOT=$HOME/.dotnet diff --git a/CHANGELOG.md b/CHANGELOG.md index 8504994..b0de9b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,3 +46,14 @@ The full changelog is maintained in [changelogs/changelog.yml](./changelogs/chan - Create databases with encryption enabled. - Ability to manage database settings via the `ravendb.ravendb.database` module. - Joining Let's Encrypt–secured nodes into existing RavenDB clusters. + + +## [1.0.4] - 2025-09-04 + +### Added +- Database placement on specific nodes via `topology_members` in `ravendb.ravendb.database`. +- Index deployment mode support (`rolling`, `parallel`) in `ravendb.ravendb.index`. +- Per-index configuration reconciliation via `index_configuration` in `ravendb.ravendb.index`. + +### Changed +- Modularized the project internals for clearer responsibilities and easier maintenance. diff --git a/changelogs/changelog.yml b/changelogs/changelog.yml index 2d65fc3..fccd1bc 100644 --- a/changelogs/changelog.yml +++ b/changelogs/changelog.yml @@ -44,4 +44,16 @@ releases: - Distribute keys across all cluster nodes. - Create databases with encryption enabled. - Ability to manage database settings via the `ravendb.ravendb.database` module. - - Joining Let's Encrypt–secured nodes into existing RavenDB clusters. \ No newline at end of file + - Joining Let's Encrypt–secured nodes into existing RavenDB clusters. + + - name: "Placement & Index Configuration" + tag: "v1.0.4" + version: "1.0.4" + changes: + - added: + - Database placement controls in `ravendb.ravendb.database`: + - Fixed placement via `topology_members` to deploy on specific node tags. + - Index deployment mode in `ravendb.ravendb.index` (rolling/parallel). + - Per-index configuration reconciliation in `ravendb.ravendb.index` via `index_configuration`. + - changed: + - Modularized the project structure and internals for clearer responsibilities and easier maintenance. \ No newline at end of file diff --git a/playbooks/node/ravendb_add_nodes.yml b/playbooks/node/ravendb_add_nodes.yml index c10e7a6..adc7e43 100644 --- a/playbooks/node/ravendb_add_nodes.yml +++ b/playbooks/node/ravendb_add_nodes.yml @@ -3,6 +3,8 @@ hosts: localhost gather_facts: no + roles: + - ravendb.ravendb.ravendb_python_client_prerequisites tasks: - name: Join Node B as Watcher (check_mode) diff --git a/playbooks/node/ravendb_add_nodes_secured.yml b/playbooks/node/ravendb_add_nodes_secured.yml index 0139bdd..e732c78 100644 --- a/playbooks/node/ravendb_add_nodes_secured.yml +++ b/playbooks/node/ravendb_add_nodes_secured.yml @@ -1,6 +1,6 @@ --- - name: Form Cluster - hosts: ravendb_nodes + hosts: localhost gather_facts: no tasks: diff --git a/plugins/module_utils/core/client.py b/plugins/module_utils/core/client.py new file mode 100644 index 0000000..8eb9e21 --- /dev/null +++ b/plugins/module_utils/core/client.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from .deps import require_ravendb + + +class StoreContext(object): + def __init__(self, store): + self.store = store + + def maintenance_server(self): + return self.store.maintenance.server + + def maintenance_for_db(self, db_name): + return self.store.maintenance.for_database(db_name) + + def close(self): + try: + self.store.close() + except Exception: + raise + + +class DocumentStoreFactory(object): + @staticmethod + def create(url, database=None, certificate_path=None, ca_cert_path=None): + require_ravendb() + from ravendb import DocumentStore + + s = DocumentStore(urls=[url], database=database) + if certificate_path: + s.certificate_pem_path = certificate_path + if ca_cert_path: + s.trust_store_path = ca_cert_path + s.initialize() + return StoreContext(store=s) diff --git a/plugins/module_utils/core/configuration.py b/plugins/module_utils/core/configuration.py new file mode 100644 index 0000000..cca5914 --- /dev/null +++ b/plugins/module_utils/core/configuration.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +def normalize_str_values(d): + """ + Return a new dict with all keys as str and values as str (None -> ""). + """ + if not d: + return {} + return {str(k): ("" if v is None else str(v)) for k, v in d.items()} + + +def validate_kv(d, name, allow_none): + """ + Generic KV validator/normalizer. + Returns: (ok: bool, normalized: dict|None, err: str|None) + """ + if d is None: + if allow_none: + return True, {}, None + return False, None, "{} must be a dict.".format(name) + + if not isinstance(d, dict): + return False, None, "{} must be a dict.".format(name) + + return True, normalize_str_values(d), None + + +def diff_kv(desired, current): + """ + Compare desired vs current (normalized) and return only the keys that differ. + """ + desired_n = normalize_str_values(desired or {}) + current_n = normalize_str_values(current or {}) + to_apply = {} + + for k, dv in desired_n.items(): + if current_n.get(k) != dv: + to_apply[k] = dv + + return to_apply diff --git a/plugins/module_utils/core/deps.py b/plugins/module_utils/core/deps.py new file mode 100644 index 0000000..92f33c4 --- /dev/null +++ b/plugins/module_utils/core/deps.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import traceback +import importlib + +HAS_RAVEN = True +RAVEN_IMP_ERR = None + +try: + importlib.import_module("ravendb") +except Exception: + HAS_RAVEN = False + RAVEN_IMP_ERR = traceback.format_exc() + + +def require_ravendb(): + if not HAS_RAVEN: + msg = ( + "The 'ravendb' Python client is required. " + "Install it via the ravendb_python_client_prerequisites role.\n" + ) + if RAVEN_IMP_ERR: + msg += "Original import error:\n" + str(RAVEN_IMP_ERR) + raise ImportError(msg) diff --git a/plugins/module_utils/core/messages.py b/plugins/module_utils/core/messages.py new file mode 100644 index 0000000..bc2c115 --- /dev/null +++ b/plugins/module_utils/core/messages.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +def _enc_suffix(encrypted): + return " (encrypted)" if encrypted else "" + + +def db_exists(n): + return "Database '{}' already exists.".format(n) + + +def db_not_exists(n): + return "Database '{}' does not exist.".format(n) + + +def db_created(n, encrypted=False): + return "Database '{}' created successfully{}.".format(n, _enc_suffix(encrypted)) + + +def db_would_create(n, encrypted=False): + return "Database '{}' would be created{}.".format(n, _enc_suffix(encrypted)) + + +def db_deleted(n): + return "Database '{}' deleted successfully.".format(n) + + +def db_would_delete(n): + return "Database '{}' would be deleted.".format(n) + + +def db_no_changes(base): + return "{} No changes.".format(base) + + +def rf_required_on_create(): + return "replication_factor is required when creating a database." + + +def settings_applied(prefix, keys): + ks = ", ".join(sorted(keys)) if not isinstance(keys, str) else keys + return "{} Applied settings ({}) and reloaded.".format(prefix, ks) + + +def settings_would_apply(prefix, keys): + ks = ", ".join(sorted(keys)) if not isinstance(keys, str) else keys + return "{} Would apply settings ({}) and reload.".format(prefix, ks) + + +def would_assign_encryption_key(db): + return "Would assign encryption key for database '{}'.".format(db) + + +def assigned_encryption_key(db): + return "Assigned encryption key for database '{}'.".format(db) + + +def encryption_mismatch(name, actual, desired): + return ( + "Database '{}' already exists but encryption status is '{}' while requested '{}'. " + "RavenDB does not support toggling encryption on an existing database. " + "Delete & recreate, or backup and restore with the desired key." + ).format(name, actual, desired) + + +def _cluster_suffix(cluster_wide): + return " cluster-wide" if cluster_wide else "" + + +def idx_cfg_applied(index_name, keys_str): + return "Applied configuration for index '{}' (keys: {}).".format(index_name, keys_str) + + +def idx_cfg_would_apply(index_name, keys_str): + return "Would apply configuration for index '{}' (keys: {}).".format(index_name, keys_str) + + +def idx_would_enable(name, cluster_wide=False): + return "Index '{}' would be enabled{}.".format(name, _cluster_suffix(cluster_wide)) + + +def idx_would_disable(name, cluster_wide=False): + return "Index '{}' would be disabled{}.".format(name, _cluster_suffix(cluster_wide)) + + +def idx_created(name): + return "Index '{}' created successfully.".format(name) + + +def idx_would_create(name): + return "Index '{}' would be created.".format(name) + + +def idx_deleted(name): + return "Index '{}' deleted successfully.".format(name) + + +def idx_would_delete(name): + return "Index '{}' would be deleted.".format(name) + + +def idx_enabled(name, cluster_wide=False): + return "Index '{}' enabled successfully{}.".format(name, _cluster_suffix(cluster_wide)) + + +def idx_disabled(name, cluster_wide=False): + return "Index '{}' disabled successfully{}.".format(name, _cluster_suffix(cluster_wide)) + + +def idx_already_enabled(name): + return "Index '{}' is already enabled.".format(name) + + +def idx_already_disabled(name): + return "Index '{}' is already disabled.".format(name) + + +def idx_resumed(name): + return "Index '{}' resumed successfully.".format(name) + + +def idx_already_resumed(name): + return "Index '{}' is already running.".format(name) + + +def idx_would_resume(name): + return "Index '{}' would be resumed.".format(name) + + +def idx_paused(name): + return "Index '{}' paused successfully.".format(name) + + +def idx_already_paused(name): + return "Index '{}' is already paused.".format(name) + + +def idx_would_pause(name): + return "Index '{}' would be paused.".format(name) + + +def idx_reset(name): + return "Index '{}' reset successfully.".format(name) + + +def idx_would_reset(name): + return "Index '{}' would be reset.".format(name) + + +def idx_exists(name): + return "Index '{}' already exists.".format(name) + + +def idx_already_absent(name): + return "Index '{}' is already absent.".format(name) + + +def idx_not_exist_cannot_apply_mode(name): + return "Index '{}' does not exist. Cannot apply mode.".format(name) + + +def node_already_present(tag, role, url): + return "Node '{}' already present as {} at {}.".format(tag, role, url) + + +def node_would_add(tag, node_type): + return "Node '{}' would be added as {}.".format(tag, node_type) + + +def node_added(tag, node_type): + return "Node '{}' added as {}.".format(tag, node_type) + + +def failed_add_node(tag, error): + return "Failed to add node '{}': {}".format(tag, error) diff --git a/plugins/module_utils/core/result.py b/plugins/module_utils/core/result.py new file mode 100644 index 0000000..30806fb --- /dev/null +++ b/plugins/module_utils/core/result.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleResult(object): + def __init__(self, changed=False, failed=False, msg="", extras=None): + self.changed = bool(changed) + self.failed = bool(failed) + self.msg = msg or "" + self.extras = dict(extras) if extras else {} + + @classmethod + def ok(cls, msg="", changed=False, **extras): + return cls(changed=changed, failed=False, msg=msg, extras=extras) + + @classmethod + def error(cls, msg, **extras): + return cls(changed=False, failed=True, msg=msg, extras=extras) + + def to_ansible(self): + data = dict(changed=self.changed, msg=self.msg) + if self.failed: + data["failed"] = True + if self.extras: + data.update(self.extras) + return data diff --git a/plugins/module_utils/core/tls.py b/plugins/module_utils/core/tls.py new file mode 100644 index 0000000..41fe8ad --- /dev/null +++ b/plugins/module_utils/core/tls.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class TLSConfig(object): + + def __init__(self, certificate_path=None, ca_cert_path=None): + self.certificate_path = certificate_path + self.ca_cert_path = ca_cert_path + + def to_requests_tuple(self): + """ + Decide what to pass to requests for TLS. + Returns a tuple: (cert, verify) + """ + cert = None + verify = True + + if self.certificate_path: + cert = self.certificate_path + verify = self.ca_cert_path if self.ca_cert_path else False + elif self.ca_cert_path: + verify = self.ca_cert_path + + return cert, verify diff --git a/plugins/module_utils/core/validation.py b/plugins/module_utils/core/validation.py new file mode 100644 index 0000000..3629ddf --- /dev/null +++ b/plugins/module_utils/core/validation.py @@ -0,0 +1,172 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os +import re +try: + + from urllib.parse import urlparse +except Exception: + + from urlparse import urlparse + +_NAME_RE = re.compile(r"^[A-Za-z0-9_-]+$") +_TAG_RE = re.compile(r"^[A-Z0-9]{1,4}$") + + +def is_valid_url(url): + """Return True if the given URL is a string with a valid HTTP or HTTPS scheme.""" + if not isinstance(url, str): + return False + parsed = urlparse(url) + return parsed.scheme in ("http", "https") and bool(parsed.netloc) + + +def validate_url(url): + """Validate a URL string and return (ok, error).""" + if not is_valid_url(url): + return False, "Invalid URL: {}".format(url) + return True, None + + +def is_valid_name(name): + """Generic name rule: must contain only letters, numbers, dashes, or underscores.""" + return isinstance(name, str) and bool(_NAME_RE.match(name)) + + +def is_valid_database_name(name): + """Return True if the database name matches RavenDB rules.""" + return is_valid_name(name) + + +def validate_database_name(name): + """Validate database name and return (ok, error).""" + if not is_valid_database_name(name): + return ( + False, + "Invalid database name: {}. Only letters, numbers, dashes, and underscores are allowed.".format(name), + ) + return True, None + + +def is_valid_replication_factor(factor): + """Return True if replication factor is a positive integer.""" + return isinstance(factor, int) and factor > 0 + + +def validate_replication_factor(factor): + """Validate replication factor and return (ok, error).""" + if not is_valid_replication_factor(factor): + return False, "Invalid replication factor: {}. Must be a positive integer.".format(factor) + return True, None + + +def validate_replication_factor_optional(factor): + """Accepts None or a positive integer.""" + if factor is None: + return True, None + return validate_replication_factor(factor) + + +def validate_topology_members(members, replication_factor): + """Validate that topology_members is a list of tags with length == replication_factor.""" + if not members: + return True, None + if not isinstance(members, list) or not all(isinstance(m, str) for m in members): + return False, "topology_members must be a list of strings." + if replication_factor is not None and len(members) != replication_factor: + return False, "topology_members length ({}) must equal replication_factor ({}).".format(len(members), replication_factor) + return True, None + + +def validate_paths_exist(*paths): + """Ensure all given file paths exist on the filesystem.""" + for p in paths: + if p and not os.path.isfile(p): + return False, "Path does not exist: {}".format(p) + return True, None + + +def is_valid_state(state): + """Return True if the state is either 'present' or 'absent'.""" + return state in ("present", "absent") + + +def validate_state(state): + """Validate state and return (ok, error).""" + if not is_valid_state(state): + return False, "Invalid state: {}. Must be 'present' or 'absent'.".format(state) + return True, None + + +def is_valid_index_name(name): + return is_valid_name(name) + + +def validate_index_name(name): + if not is_valid_index_name(name): + return False, "Invalid index name: {}. Only letters, numbers, dashes, and underscores are allowed.".format(name) + return True, None + + +def validate_state_optional(state): + """Accepts None, 'present', or 'absent' (for mode-only operations).""" + if state is None: + return True, None + return validate_state(state) + + +def is_valid_mode(mode): + return mode in (None, 'resumed', 'paused', 'enabled', 'disabled', 'reset') + + +def validate_mode(mode): + if not is_valid_mode(mode): + return False, "Invalid mode: {}. Must be one of 'resumed', 'paused', 'enabled', 'disabled', 'reset'.".format(mode) + return True, None + + +def is_valid_bool(value): + return isinstance(value, bool) + + +def validate_bool(name, value): + if not is_valid_bool(value): + return False, "Invalid {} flag: {}. Must be a boolean.".format(name, value) + return True, None + + +def is_valid_dict(value): + return isinstance(value, dict) or value is None + + +def validate_dict(name, value): + if not is_valid_dict(value): + return False, "Invalid {}: Must be a dictionary.".format(name) + return True, None + + +def is_valid_tag(tag): + """Return True if the tag is uppercase alphanumeric with length 1..4.""" + return isinstance(tag, str) and bool(_TAG_RE.match(tag)) + + +def validate_tag(tag): + if not is_valid_tag(tag): + return False, "Invalid node tag: {}. Must be uppercase alphanumeric (1–4 chars).".format(tag) + return True, None + + +def collect_errors(*results): + """ + Accept many (ok, err) tuples and return (ok, combined_err_string_or_None). + Aggregates all error messages. + """ + errors = [err for ok, err in results if not ok and err] + return (len(errors) == 0, "; ".join(errors) if errors else None) diff --git a/plugins/module_utils/dto/database.py b/plugins/module_utils/dto/database.py new file mode 100644 index 0000000..7634f55 --- /dev/null +++ b/plugins/module_utils/dto/database.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class EncryptionSpec(object): + def __init__(self, + enabled=False, + certificate_path=None, + ca_cert_path=None, + generate_key=False, + key_path=None, + output_path=None): + self.enabled = enabled + self.certificate_path = certificate_path + self.ca_cert_path = ca_cert_path + self.generate_key = generate_key + self.key_path = key_path + self.output_path = output_path + + +class DatabaseSpec(object): + def __init__(self, url, name, replication_factor=None, settings=None, encryption=None, members=None): + if settings is None: + settings = {} + if encryption is None: + encryption = EncryptionSpec() + if members is None: + members = [] + + self.url = url + self.name = name + self.replication_factor = replication_factor + self.settings = settings + self.encryption = encryption + self.members = members diff --git a/plugins/module_utils/dto/index.py b/plugins/module_utils/dto/index.py new file mode 100644 index 0000000..9705d52 --- /dev/null +++ b/plugins/module_utils/dto/index.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class IndexDefinitionSpec(object): + def __init__(self, maps=None, reduce=None, deployment_mode=None): + if maps is None: + maps = [] + elif isinstance(maps, str): + maps = [maps] + else: + maps = list(maps) + + self.maps = maps + self.reduce = None if reduce == "" else reduce + self.deployment_mode = deployment_mode + + @classmethod + def from_dict(cls, d): + if not d: + return None + maps = d.get("map") or [] + if isinstance(maps, str): + maps = [maps] + dm_raw = d.get("deployment_mode") or d.get("DeploymentMode") + if dm_raw is None: + dm_norm = None + else: + dm_norm = str(dm_raw).strip().lower() + return cls(maps=maps, reduce=d.get("reduce"), deployment_mode=dm_norm) + + def to_dict(self): + out = {} + if self.maps: + out["map"] = list(self.maps) + if self.reduce: + out["reduce"] = self.reduce + if self.deployment_mode: + out["deployment_mode"] = self.deployment_mode + return out + + +class IndexSpec(object): + def __init__(self, db_name, name, definition=None, mode=None, cluster_wide=False, configuration=None): + self.db_name = db_name + self.name = name + self.definition = definition + self.mode = mode + self.cluster_wide = bool(cluster_wide) + self.configuration = dict(configuration or {}) diff --git a/plugins/module_utils/dto/node.py b/plugins/module_utils/dto/node.py new file mode 100644 index 0000000..84383ba --- /dev/null +++ b/plugins/module_utils/dto/node.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class NodeSpec(object): + def __init__(self, tag, url, leader_url, node_type="Member"): + self.tag = tag + self.url = url + self.leader_url = leader_url + self.node_type = node_type + + @property + def is_watcher(self): + return self.node_type == "Watcher" diff --git a/plugins/module_utils/reconcilers/base.py b/plugins/module_utils/reconcilers/base.py new file mode 100644 index 0000000..d4efa73 --- /dev/null +++ b/plugins/module_utils/reconcilers/base.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ReconcilerContext(object): + def __init__(self, check_mode=False): + self.check_mode = bool(check_mode) diff --git a/plugins/module_utils/reconcilers/database_reconciler.py b/plugins/module_utils/reconcilers/database_reconciler.py new file mode 100644 index 0000000..4c446a8 --- /dev/null +++ b/plugins/module_utils/reconcilers/database_reconciler.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type +from ansible_collections.ravendb.ravendb.plugins.module_utils.core.result import ModuleResult +from ansible_collections.ravendb.ravendb.plugins.module_utils.core import messages as msg +from ansible_collections.ravendb.ravendb.plugins.module_utils.services import database_service as dbs +from ansible_collections.ravendb.ravendb.plugins.module_utils.services import db_settings_service as setsvc +from ansible_collections.ravendb.ravendb.plugins.module_utils.services import encryption_service as encsvc +from ansible_collections.ravendb.ravendb.plugins.module_utils.services.cluster_service import fetch_topology, collect_tags + + +class DatabaseReconciler: + def __init__(self, ctx): + self.ctx = ctx + + def ensure_present(self, spec, tls, check_mode): + """ + Ensure the specified database exists. + Returns: ModuleResult: contains `changed` (bool) and `msg` (str). + """ + existing_databases = dbs.list_databases(self.ctx) + created = False + + if spec.name not in existing_databases: + if spec.replication_factor is None: + return ModuleResult.error(msg=msg.rf_required_on_create()) + + if spec.members: + wanted = list(dict.fromkeys(spec.members)) + if len(wanted) != spec.replication_factor: + return ModuleResult.error( + msg="topology_members length ({}) must equal replication_factor ({}).".format( + len(wanted), spec.replication_factor + ) + ) + try: + cluster_tags = set(collect_tags(fetch_topology(self.ctx))) + except Exception as e: + return ModuleResult.error(msg="Failed to fetch cluster topology: {}".format(str(e))) + + missing = [t for t in wanted if t not in cluster_tags] + if missing: + return ModuleResult.error(msg="Unknown node tags in topology_members: {}".format(", ".join(missing))) + + spec.members = wanted + + if spec.encryption.enabled: + if check_mode: + return ModuleResult.ok(msg=msg.db_would_create(spec.name, encrypted=True), changed=True) + + if spec.encryption.generate_key: + key = encsvc.fetch_generated_key(self.ctx, tls) + if spec.encryption.output_path: + encsvc.write_key_safe(spec.encryption.output_path, key) + else: + key = encsvc.read_key(spec.encryption.key_path) + encsvc.distribute_key(self.ctx, spec.name, key, tls, only_tags=(spec.members or None)) + + if check_mode: + return ModuleResult.ok(msg=msg.db_would_create(spec.name), changed=True) + + dbs.create_database(self.ctx, spec.name, spec.replication_factor, spec.encryption.enabled, members=(spec.members or None), tls=tls) + created = True + base_msg = msg.db_created(spec.name, encrypted=spec.encryption.enabled) + + else: + record = dbs.get_record(self.ctx, spec.name) + actual_flag = bool(getattr(record, "encrypted", False)) + if spec.encryption.enabled != actual_flag: + # toggling between encrypted db and regular db is forbidden + return ModuleResult.error(msg=msg.encryption_mismatch(spec.name, actual_flag, spec.encryption.enabled)) + base_msg = msg.db_exists(spec.name) + + if spec.members: + return ModuleResult.error( + msg=( + "topology_members is only supported on database creation; " + "modifying an existing database topology is not supported." + ) + ) + + if spec.settings: + current = setsvc.get_current(self.ctx, spec.name) + to_apply = setsvc.diff(spec.settings, current) + if to_apply: + if check_mode: + return ModuleResult.ok(msg=msg.settings_would_apply(base_msg, list(to_apply.keys())), changed=True) + setsvc.apply(self.ctx, spec.name, to_apply) + return ModuleResult.ok(msg=msg.settings_applied(base_msg, list(to_apply.keys())), changed=True) + + if created: + return ModuleResult.ok(msg=base_msg, changed=True) + return ModuleResult.ok(msg=msg.db_no_changes(base_msg), changed=False) + + def ensure_absent(self, name, check_mode): + """ + Ensure the specified database is absent. + Returns: ModuleResult: contains `changed` (bool) and `msg` (str). + """ + existing = dbs.list_databases(self.ctx) + if name not in existing: + return ModuleResult.ok(msg=msg.db_not_exists(name), changed=False) + + if check_mode: + return ModuleResult.ok(msg=msg.db_would_delete(name), changed=True) + + dbs.delete_database(self.ctx, name) + return ModuleResult.ok(msg=msg.db_deleted(name), changed=True) diff --git a/plugins/module_utils/reconcilers/index_reconciler.py b/plugins/module_utils/reconcilers/index_reconciler.py new file mode 100644 index 0000000..4aa707f --- /dev/null +++ b/plugins/module_utils/reconcilers/index_reconciler.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.ravendb.ravendb.plugins.module_utils.core.result import ModuleResult +from ansible_collections.ravendb.ravendb.plugins.module_utils.core import messages as msg +from ansible_collections.ravendb.ravendb.plugins.module_utils.dto.index import IndexDefinitionSpec +from ansible_collections.ravendb.ravendb.plugins.module_utils.services import index_service as idxsvc +from ansible_collections.ravendb.ravendb.plugins.module_utils.services import index_config_service as cfgsvc + + +class IndexReconciler(object): + def __init__(self, ctx, db_name): + self.ctx = ctx + self.db_name = db_name + + def _apply_index(self, name, raw_def, check_mode): + """Create or update an index with the given raw definition.""" + if check_mode: + return ModuleResult.ok(msg=msg.idx_would_create(name), changed=True) + + idxsvc.create_index(self.ctx, self.db_name, name, raw_def) + return ModuleResult.ok(msg=msg.idx_created(name), changed=True) + + def ensure_absent(self, name, check_mode): + """Delete the index if it exists.""" + existing_defs = idxsvc.list_definitions(self.ctx, self.db_name) + existing_names = [getattr(i, "name", None) for i in existing_defs] + + if name not in existing_names: + return ModuleResult.ok(msg=msg.idx_already_absent(name), changed=False) + + if check_mode: + return ModuleResult.ok(msg=msg.idx_would_delete(name), changed=True) + + idxsvc.delete_index(self.ctx, self.db_name, name) + return ModuleResult.ok(msg=msg.idx_deleted(name), changed=True) + + def ensure_present(self, spec, check_mode): + """ + Create or update the index definition, optionally apply mode and per-index configuration. + """ + existing_defs = idxsvc.list_definitions(self.ctx, self.db_name) + existing_names = [getattr(i, "name", None) for i in existing_defs] + base_msg = None + changed_any = False + + if isinstance(spec.definition, IndexDefinitionSpec): + raw_def = spec.definition.to_dict() + else: + raw_def = None + + if spec.name not in existing_names: + if raw_def is None: + return ModuleResult.error("index_definition is required when creating a new index.") + + result = self._apply_index(spec.name, raw_def, check_mode) + base_msg, changed_any = result.msg, True + + else: + existing_def = idxsvc.get_definition(self.ctx, self.db_name, spec.name) + if raw_def and not idxsvc.index_matches(existing_def, raw_def): + result = self._apply_index(spec.name, raw_def, check_mode) + base_msg, changed_any = result.msg, True + else: + base_msg = msg.idx_exists(spec.name) + + if spec.mode: + mode_changed, mode_msg = idxsvc.apply_mode(self.ctx, self.db_name, spec.name, spec.mode, spec.cluster_wide, check_mode) + if mode_changed: + changed_any = True + if base_msg: + base_msg = "{} {}".format(base_msg, mode_msg).strip() + else: + base_msg = mode_msg + else: + base_msg = mode_msg or base_msg + + if spec.configuration: + current = cfgsvc.get_current(self.ctx, self.db_name, spec.name) + to_apply = cfgsvc.diff(spec.configuration, current) + if to_apply: + keys_str = ", ".join(sorted(to_apply.keys())) + if check_mode: + return ModuleResult.ok(msg="{} {}".format(base_msg, msg.idx_cfg_would_apply(spec.name, keys_str)), changed=True) + + cfgsvc.apply(self.ctx, self.db_name, spec.name, to_apply) + return ModuleResult.ok(msg="{} {}".format(base_msg, msg.idx_cfg_applied(spec.name, keys_str)), changed=True) + + return ModuleResult.ok(msg=base_msg, changed=changed_any) diff --git a/plugins/module_utils/reconcilers/node_reconciler.py b/plugins/module_utils/reconcilers/node_reconciler.py new file mode 100644 index 0000000..be2b532 --- /dev/null +++ b/plugins/module_utils/reconcilers/node_reconciler.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.ravendb.ravendb.plugins.module_utils.core.result import ModuleResult +from ansible_collections.ravendb.ravendb.plugins.module_utils.core import messages as msg +from ansible_collections.ravendb.ravendb.plugins.module_utils.services.cluster_service import fetch_topology_http +from ansible_collections.ravendb.ravendb.plugins.module_utils.services import node_service + + +class NodeReconciler(object): + """ + Reconciles a node's presence in a cluster). + """ + def __init__(self, ctx): + self.ctx = ctx + + def ensure_present(self, spec, tls, check_mode): + topology = fetch_topology_http(spec.leader_url, tls) + present, role, existing_tag, existing_url = node_service.node_in_topology(topology, spec.tag, spec.url) + if present: + return ModuleResult.ok(msg=msg.node_already_present(existing_tag, role, existing_url), changed=False) + + if check_mode: + return ModuleResult.ok(msg=msg.node_would_add(spec.tag, spec.node_type), changed=True) + + try: + node_service.add_node(self.ctx, spec.tag, spec.url, is_watcher=spec.is_watcher, tls=tls) + except Exception as e: + return ModuleResult.error(msg=msg.failed_add_node(spec.tag, str(e))) + + return ModuleResult.ok(msg=msg.node_added(spec.tag, spec.node_type), changed=True) diff --git a/plugins/module_utils/services/cluster_service.py b/plugins/module_utils/services/cluster_service.py new file mode 100644 index 0000000..41e5d4e --- /dev/null +++ b/plugins/module_utils/services/cluster_service.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ClusterTopology: + def __init__(self, members, watchers, promotables): + self.members = members + self.watchers = watchers + self.promotables = promotables + + +def fetch_topology_http(leader_url, tls, timeout=10): + import requests + cert, verify = tls.to_requests_tuple() + endpoint = leader_url.rstrip('/') + "/cluster/topology" + + r = requests.get(endpoint, cert=cert, verify=verify, timeout=timeout) + r.raise_for_status() + data = r.json() + topo = data.get("Topology") or data.get("topology") or data + + def _to_map(g): + if isinstance(g, dict): + return dict((str(k), ("" if v is None else str(v))) for k, v in g.items()) + return {} + + members = _to_map(topo.get("Members") or topo.get("members")) + watchers = _to_map(topo.get("Watchers") or topo.get("watchers")) + promotables = _to_map(topo.get("Promotables") or topo.get("promotables")) + + return ClusterTopology(members, watchers, promotables) + + +def fetch_topology(ctx): + """ + Fetch cluster topology using RavenDB Python Client. + Returns ClusterTopology. + """ + from ravendb.serverwide.commands import GetClusterTopologyCommand + cmd = GetClusterTopologyCommand() + ctx.store.get_request_executor().execute_command(cmd) + return cmd.result.topology + + +def collect_tags(topology): + all_nodes = getattr(topology, "all_nodes", None) or {} + if all_nodes: + return sorted(all_nodes.keys()) + + members = getattr(topology, "members", None) or {} + promotables = getattr(topology, "promotables", None) or {} + watchers = getattr(topology, "watchers", None) or {} + return sorted(set(list(members.keys()) + list(promotables.keys()) + list(watchers.keys()))) diff --git a/plugins/module_utils/services/database_service.py b/plugins/module_utils/services/database_service.py new file mode 100644 index 0000000..98a0d70 --- /dev/null +++ b/plugins/module_utils/services/database_service.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type +from ansible_collections.ravendb.ravendb.plugins.module_utils.core.tls import TLSConfig + + +def list_databases(ctx, start=0, max=128): + """Return a list of database names from the server.""" + from ravendb.serverwide.operations.common import GetDatabaseNamesOperation + return ctx.maintenance_server().send(GetDatabaseNamesOperation(start, max)) + + +def get_record(ctx, db_name): + """ + Fetch the database record for the specified database. + """ + from ravendb.serverwide.operations.common import GetDatabaseRecordOperation + return ctx.maintenance_server().send(GetDatabaseRecordOperation(db_name)) + + +def create_database(ctx, db_name, replication_factor, encrypted, members=None, tls=None): + if members: + import requests + body = { + "DatabaseName": db_name, + "ReplicationFactor": replication_factor, + "Encrypted": bool(encrypted), + "DisableDynamicNodesDistribution": True, + "Topology": { + "Members": list(members), + "ReplicationFactor": replication_factor, + "DynamicNodesDistribution": False, + }, + } + base = ctx.store.urls[0].rstrip("/") + url = base + "/admin/databases" # todo: move to client operation when it will be supported + cert, verify = (tls or TLSConfig()).to_requests_tuple() + r = requests.put(url, json=body, cert=cert, verify=verify, timeout=30) + r.raise_for_status() + return + + from ravendb.serverwide.database_record import DatabaseRecord + from ravendb.serverwide.operations.common import CreateDatabaseOperation + rec = DatabaseRecord(db_name) + if encrypted: + rec.encrypted = True + ctx.maintenance_server().send(CreateDatabaseOperation(rec, replication_factor)) + + +def delete_database(ctx, db_name): + from ravendb.serverwide.operations.common import DeleteDatabaseOperation + ctx.maintenance_server().send(DeleteDatabaseOperation(db_name)) diff --git a/plugins/module_utils/services/db_settings_service.py b/plugins/module_utils/services/db_settings_service.py new file mode 100644 index 0000000..c510069 --- /dev/null +++ b/plugins/module_utils/services/db_settings_service.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.ravendb.ravendb.plugins.module_utils.core.configuration import validate_kv, diff_kv + + +def validate_database_settings(d): + """Validate and normalize database_settings.""" + return validate_kv(d, "database_settings", allow_none=True) + + +def get_current(ctx, db_name): + """ + Returns dict of current db settings + """ + from ravendb.serverwide.operations.configuration import GetDatabaseSettingsOperation + s = ctx.store.maintenance.send(GetDatabaseSettingsOperation(db_name)) + return (s.settings or {}) if s else {} + + +def apply(ctx, db_name, to_apply): + from ravendb.serverwide.operations.configuration import PutDatabaseSettingsOperation + from ravendb.documents.operations.server_misc import ToggleDatabasesStateOperation + ctx.store.maintenance.send(PutDatabaseSettingsOperation(db_name, to_apply)) + ctx.maintenance_server().send(ToggleDatabasesStateOperation(db_name, True)) + ctx.maintenance_server().send(ToggleDatabasesStateOperation(db_name, False)) + + +def diff(desired, current): + """ + Compare desired and current settings. + Returns dict of settings to apply. + """ + return diff_kv(desired, current) diff --git a/plugins/module_utils/services/encryption_service.py b/plugins/module_utils/services/encryption_service.py new file mode 100644 index 0000000..5de894c --- /dev/null +++ b/plugins/module_utils/services/encryption_service.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os +import errno +from ansible_collections.ravendb.ravendb.plugins.module_utils.services.cluster_service import fetch_topology, collect_tags + + +def _requests(): + try: + import requests + return requests + except ImportError: + raise RuntimeError("Python 'requests' is required for encryption operations. Install 'requests'.") + + +def fetch_generated_key(ctx, tls): + """ + Ask the server to generate an encryption key. + """ + base = ctx.store.urls[0].rstrip("/") + url = "{}/admin/secrets/generate".format(base) + cert, verify = tls.to_requests_tuple() + + response = _requests().get(url, cert=cert, verify=verify) + response.raise_for_status() + return response.text.strip() + + +def write_key_safe(path, key): + """ + Write the key to 'path'. + """ + directory = os.path.dirname(path) or "." + try: + os.makedirs(directory) + except OSError as e: + if e.errno != errno.EEXIST: + raise + prev_umask = os.umask(0o177) + try: + with open(path, "w") as f: + f.write(key + "\n") + finally: + os.umask(prev_umask) + + +def read_key(path): + """ + Read entire file and strip trailing whitespace/newlines. + """ + with open(path, "r") as f: + return f.read().strip() + + +def distribute_key(ctx, db_name, key, tls, only_tags=None): + """ + Distribute the encryption key to ALL nodes in the cluster. + If only_tags is None/empty, distribute to all nodes in the cluster. + """ + if only_tags: + tags = list(only_tags) + else: + topology = fetch_topology(ctx) + tags = collect_tags(topology) + if not tags: + raise RuntimeError("No nodes found in cluster topology.") + + params = [("name", db_name)] + for t in tags: + params.append(("node", t)) + + base = ctx.store.urls[0].rstrip("/") + url = "{}/admin/secrets/distribute".format(base) + cert, verify = tls.to_requests_tuple() + + response = _requests().post(url, params=params, data=key, headers={"Content-Type": "text/plain"}, cert=cert, verify=verify) + if response.status_code not in (200, 201, 204): + raise RuntimeError("Assigning encryption key failed: HTTP {} - {}".format(response.status_code, response.text)) + + return tags + + +def validate_encryption_params(desired_state, tls, encrypted, + generate_key, key_path=None, output_path=None): + """ + Validate parameters when creating an encrypted database. + """ + if desired_state == "present" and encrypted: + if not tls.certificate_path: + return False, "encrypted=true requires certificate_path for admin endpoints." + + if not (generate_key or key_path): + return False, "encrypted=true requires either generate_encryption_key=true or key_path=." + + if generate_key and key_path: + return False, "generate_encryption_key and key_path are mutually exclusive." + + if output_path and not generate_key: + return False, "encryption_key_output_path can only be used when generate_encryption_key=true." + + return True, None diff --git a/plugins/module_utils/services/index_config_service.py b/plugins/module_utils/services/index_config_service.py new file mode 100644 index 0000000..99e5ab1 --- /dev/null +++ b/plugins/module_utils/services/index_config_service.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.ravendb.ravendb.plugins.module_utils.core.configuration import ( + validate_kv, diff_kv, normalize_str_values +) +from ansible_collections.ravendb.ravendb.plugins.module_utils.services.index_service import ( + get_definition, + _normalize_deployment_mode_value, _to_deployment_mode_enum +) + + +def validate_index_configuration(d): + """Validate and normalize per-index configuration.""" + return validate_kv(d, "index_configuration", allow_none=True) + + +def get_current(ctx, db_name, index_name): + """Return per-index configuration as a normalized dict.""" + definition = get_definition(ctx, db_name, index_name) + if not definition: + return {} + cfg = getattr(definition, "configuration", None) or {} + return normalize_str_values(cfg) + + +def diff(desired, current): + """Compute config differences.""" + return diff_kv(desired, current) + + +def _build_index_definition(name, maps, reduce=None, configuration=None, deployment_mode=None): + """Build a minimal IndexDefinition (name, maps, reduce, configuration).""" + from ravendb.documents.indexes.definitions import IndexDefinition + idx = IndexDefinition() + idx.name = name + if maps: + if isinstance(maps, set): + idx.maps = maps + elif isinstance(maps, (list, tuple)): + idx.maps = set(maps) + else: + raise TypeError("maps must be a list/tuple/set of strings") + if reduce: + idx.reduce = reduce + cfg = normalize_str_values(configuration or {}) + if cfg: + idx.configuration = cfg + if deployment_mode: + idx.deployment_mode = _to_deployment_mode_enum(deployment_mode) + + return idx + + +def _put_index_definition(ctx, db_name, index_definition): + """PUT a single definition using PutIndexesOperation, handling older signatures.""" + from ravendb.documents.operations.indexes import PutIndexesOperation + m = ctx.maintenance_for_db(db_name) + try: + op = PutIndexesOperation(index_definition) + return m.send(op) + except TypeError: + op = PutIndexesOperation([index_definition]) + return m.send(op) + + +def apply(ctx, db_name, index_name, to_apply): + """Merge and apply configuration changes to an index.""" + definition = get_definition(ctx, db_name, index_name) + if not definition: + raise RuntimeError("Index definition '{}' not found while applying configuration.".format(index_name)) + + current_cfg = normalize_str_values(getattr(definition, "configuration", None) or {}) + merged_cfg = dict(current_cfg) + merged_cfg.update(to_apply) + + maps = list(definition.maps) if getattr(definition, "maps", None) else [] + reduce = getattr(definition, "reduce", None) + existing_dm = _normalize_deployment_mode_value(getattr(definition, "deployment_mode", None)) + + new_def = _build_index_definition(index_name, maps, reduce, merged_cfg, deployment_mode=existing_dm) + _put_index_definition(ctx, db_name, new_def) diff --git a/plugins/module_utils/services/index_service.py b/plugins/module_utils/services/index_service.py new file mode 100644 index 0000000..d390991 --- /dev/null +++ b/plugins/module_utils/services/index_service.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import sys +from ansible_collections.ravendb.ravendb.plugins.module_utils.core import messages as msg + + +def _normalize_deployment_mode_value(value): + if value is None: + return None + + name = getattr(value, "name", None) + s = str(name if name else value).strip().lower() + if "rolling" in s: + return "rolling" + if "parallel" in s: + return "parallel" + return s + + +def _to_deployment_mode_enum(value): + if value is None: + return None + from ravendb.documents.indexes.definitions import IndexDeploymentMode + norm = _normalize_deployment_mode_value(value) + if norm == "rolling": + return IndexDeploymentMode.ROLLING + if norm == "parallel": + return IndexDeploymentMode.PARALLEL + raise ValueError("Unknown deployment_mode: {}".format(value)) + + +def create_dynamic_index(name, definition): + """Dynamically create a single-map index class based on the given definition.""" + from ravendb import AbstractIndexCreationTask + + class DynamicIndex(AbstractIndexCreationTask): + def __init__(self): + super(DynamicIndex, self).__init__() + self.map = definition.get("map")[0] + reduce_def = definition.get("reduce") + if reduce_def: + self.reduce = reduce_def + + dm = definition.get("deployment_mode") or definition.get("DeploymentMode") + if dm: + self.deployment_mode = _to_deployment_mode_enum(dm) + + DynamicIndex.__name__ = name + return DynamicIndex + + +def create_dynamic_multimap_index(name, definition): + """Dynamically create a multi-map index class based on the given definition.""" + from ravendb.documents.indexes.abstract_index_creation_tasks import AbstractMultiMapIndexCreationTask + + class DynamicIndex(AbstractMultiMapIndexCreationTask): + def __init__(self): + super(DynamicIndex, self).__init__() + maps_def = definition.get("map") or [] + for map_def in maps_def: + self._add_map(map_def) + + reduce_def = definition.get("reduce") + if reduce_def: + self.reduce = reduce_def + + dm = definition.get("deployment_mode") or definition.get("DeploymentMode") + if dm: + self.deployment_mode = _to_deployment_mode_enum(dm) + + DynamicIndex.__name__ = name + return DynamicIndex + + +def list_definitions(ctx, db_name): + from ravendb.documents.operations.indexes import GetIndexesOperation + return ctx.maintenance_for_db(db_name).send(GetIndexesOperation(0, sys.maxsize)) or [] + + +def get_definition(ctx, db_name, index_name): + defs = list_definitions(ctx, db_name) + for d in defs: + if getattr(d, "name", None) == index_name: + return d + return None + + +def index_matches(existing_index, definition): + """Check if an existing index matches the expected definition (map/reduce).""" + if definition is None: + return True + + existing_maps = set(map(str.strip, existing_index.maps)) if getattr(existing_index, "maps", None) else set() + existing_reduce = getattr(existing_index, "reduce", None) + + expected_maps = set(map(str.strip, definition.get("map", []))) + normalized_existing_reduce = existing_reduce.strip() if isinstance(existing_reduce, str) and existing_reduce else None + normalized_expected_reduce = (definition.get("reduce") or "").strip() + if not normalized_expected_reduce: + normalized_expected_reduce = None + + if not (existing_maps == expected_maps and normalized_existing_reduce == normalized_expected_reduce): + return False + + desired_dm = _normalize_deployment_mode_value(definition.get("deployment_mode") or definition.get("DeploymentMode")) + if desired_dm is None: + return True + + existing_dm = _normalize_deployment_mode_value(getattr(existing_index, "deployment_mode", None)) + return desired_dm == existing_dm + + +def create_index(ctx, db_name, name, definition): + """Create an index, handling both single-map and multi-map definitions.""" + if len(definition.get("map")) > 1: + DynamicIndexClass = create_dynamic_multimap_index(name, definition) + else: + DynamicIndexClass = create_dynamic_index(name, definition) + index = DynamicIndexClass() + index.execute(ctx.store, db_name) + + +def delete_index(ctx, db_name, name): + from ravendb.documents.operations.indexes import DeleteIndexOperation + ctx.maintenance_for_db(db_name).send(DeleteIndexOperation(name)) + + +def get_index_state(ctx, db_name, name): + """Return the logical index state""" + from ravendb.documents.operations.indexes import GetIndexStatisticsOperation + stats = ctx.maintenance_for_db(db_name).send(GetIndexStatisticsOperation(name)) + return getattr(stats, "state", None) + + +def enable_index(ctx, db_name, name, cluster_wide, check_mode): + """Enable a RavenDB index, optionally cluster-wide.""" + from ravendb.documents.indexes.definitions import IndexState + from ravendb.documents.operations.indexes import EnableIndexOperation + + current = get_index_state(ctx, db_name, name) + if current != IndexState.DISABLED: + return False, msg.idx_already_enabled(name) + + if check_mode: + return True, msg.idx_would_enable(name, cluster_wide) + + ctx.maintenance_for_db(db_name).send(EnableIndexOperation(name, cluster_wide)) + return True, msg.idx_enabled(name, cluster_wide=cluster_wide) + + +def disable_index(ctx, db_name, name, cluster_wide, check_mode): + """Disable a RavenDB index, optionally cluster-wide.""" + from ravendb.documents.indexes.definitions import IndexState + from ravendb.documents.operations.indexes import DisableIndexOperation + + current = get_index_state(ctx, db_name, name) + if current == IndexState.DISABLED: + return False, msg.idx_already_disabled(name) + + if check_mode: + return True, msg.idx_would_disable(name, cluster_wide) + + ctx.maintenance_for_db(db_name).send(DisableIndexOperation(name, cluster_wide)) + return True, msg.idx_disabled(name, cluster_wide=cluster_wide) + + +def resume_index(ctx, db_name, name, check_mode): + """Resume a paused RavenDB index.""" + from ravendb.documents.operations.indexes import GetIndexingStatusOperation, StartIndexOperation + from ravendb.documents.indexes.definitions import IndexRunningStatus + + status = ctx.maintenance_for_db(db_name).send(GetIndexingStatusOperation()) + index = next((x for x in getattr(status, "indexes", []) if x.name == name), None) + if index and index.status == IndexRunningStatus.RUNNING: + return False, msg.idx_already_resumed(name) + + if check_mode: + return True, msg.idx_would_resume(name) + + ctx.maintenance_for_db(db_name).send(StartIndexOperation(name)) + return True, msg.idx_resumed(name) + + +def pause_index(ctx, db_name, name, check_mode): + """Pause a running RavenDB index.""" + from ravendb.documents.operations.indexes import GetIndexingStatusOperation, StopIndexOperation + from ravendb.documents.indexes.definitions import IndexRunningStatus + + status = ctx.maintenance_for_db(db_name).send(GetIndexingStatusOperation()) + index = next((x for x in getattr(status, "indexes", []) if x.name == name), None) + if index and index.status == IndexRunningStatus.PAUSED: + return False, msg.idx_already_paused(name) + + if check_mode: + return True, msg.idx_would_pause(name) + + ctx.maintenance_for_db(db_name).send(StopIndexOperation(name)) + return True, msg.idx_paused(name) + + +def reset_index(ctx, db_name, name, check_mode): + """Reset an existing index.""" + from ravendb.documents.operations.indexes import ResetIndexOperation + + if check_mode: + return True, msg.idx_would_reset(name) + + ctx.maintenance_for_db(db_name).send(ResetIndexOperation(name)) + return True, msg.idx_reset(name) + + +def apply_mode(ctx, db_name, name, mode, cluster_wide, check_mode): + """Dispatch mode operation.""" + if mode == "enabled": + return enable_index(ctx, db_name, name, cluster_wide, check_mode) + if mode == "disabled": + return disable_index(ctx, db_name, name, cluster_wide, check_mode) + if mode == "resumed": + return resume_index(ctx, db_name, name, check_mode) + if mode == "paused": + return pause_index(ctx, db_name, name, check_mode) + if mode == "reset": + return reset_index(ctx, db_name, name, check_mode) + return False, "Unsupported mode '{}' specified.".format(mode) diff --git a/plugins/module_utils/services/node_service.py b/plugins/module_utils/services/node_service.py new file mode 100644 index 0000000..2d284f8 --- /dev/null +++ b/plugins/module_utils/services/node_service.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- + +# Copyright (c), RavenDB +# GNU General Public License v3.0 or later (see COPYING or +# https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +def _requests(): + try: + import requests + return requests + except ImportError: + raise RuntimeError("Python 'requests' is required for node operations. Install 'requests'.") + + +def node_in_topology(topology, search_tag, search_url): + """ + Return tuple (present: bool, role: str|None, existing_tag: str|None, existing_url: str|None) + by scanning members/watchers/promotables. + """ + roles = [ + ("members", "Member"), + ("watchers", "Watcher"), + ("promotables", "Promotable"), + ] + for attr, role_name in roles: + group = getattr(topology, attr, None) or {} + for tag, url in group.items(): + if tag == search_tag or url == search_url: + return True, role_name, tag, url + return False, None, None, None + + +def add_node(ctx, tag, url, is_watcher, tls): + """ + PUT /admin/cluster/node on the leader the ctx is connected to. + Raises RuntimeError on HTTP error. + """ + base = ctx.store.urls[0].rstrip("/") + endpoint = "{}/admin/cluster/node".format(base) + + params = {"url": url, "tag": tag} + if is_watcher: + params["watcher"] = "true" + + cert, verify = tls.to_requests_tuple() + r = _requests().put(endpoint, params=params, headers={"Content-Type": "application/json"}, cert=cert, verify=verify) + if r.status_code not in (200, 201, 204): + try: + detail = r.json().get("Message", r.text) + except Exception: + detail = r.text + raise RuntimeError(detail) diff --git a/plugins/modules/database.py b/plugins/modules/database.py index 79419f1..4d3d735 100644 --- a/plugins/modules/database.py +++ b/plugins/modules/database.py @@ -1,4 +1,3 @@ -#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c), RavenDB @@ -13,74 +12,85 @@ module: database short_description: Manage RavenDB databases description: - - This module allows you to create or delete a RavenDB database. - - It supports providing a replication factor and secured connections using certificates. - - Check mode is supported to simulate database creation or deletion without applying changes. - - Supports creating encrypted databases by assigning a secret key (generated or user-provided) and distributing it to all cluster nodes. - - Supports applying per-database settings (database_settings) and triggering a safe database reload so changes take effect. + - Create or delete a RavenDB database, and optionally apply per-database settings. + - Supports secured connections using client certificates and optional CA verification. + - Check mode is supported to simulate creation, deletion, or settings changes without applying them. + - Supports creating encrypted databases by assigning a secret key (generated or user-provided) and distributing it to all cluster nodes. + - Supports fixed placement by specifying exact cluster node tags to host the database (topology members). + - Supports applying per-database settings (C(database_settings)) and triggering a safe database reload so changes take effect. version_added: "1.0.0" author: "Omer Ratsaby (@thegoldenplatypus)" extends_documentation_fragment: -- ravendb.ravendb.ravendb + - ravendb.ravendb.ravendb options: - replication_factor: - description: - - Number of server nodes to replicate the database to. - - Must be a positive integer. - - Only used when creating a database. - required: false - default: 1 - type: int - state: - description: - - Desired state of the database. - - If C(present), the database will be created if it does not exist. - - If C(absent), the database will be deleted if it exists. - required: false - type: str - choices: - - present - - absent - default: present - encrypted: - description: - - Create the database as encrypted. - - When C(true), the module ensures a secret key is assigned (generated or read from file) and distributed to all cluster nodes before creation. - - Requires C(certificate_path) to access admin endpoints. - required: false - default: false - type: bool - encryption_key: - description: - - Path to a file that contains the raw encryption key (plain text). - - Mutually exclusive with C(generate_encryption_key). - - Used only when C(encrypted=true). - required: false - type: str - generate_encryption_key: - description: - - If C(true), asks the server to generate a new encryption key via the admin API. - - Mutually exclusive with C(encryption_key). - - Used only when C(encrypted=true). - required: false - default: false - type: bool - encryption_key_output_path: - description: - - When C(generate_encryption_key=true), write the generated key to this local file with safe permissions (0600 umask). - - Ignored if C(generate_encryption_key=false). - required: false - type: str - database_settings: - description: - - Dictionary of database-level settings to apply. - - Values are normalized to strings and compared against current customized settings. - - If differences exist, the module updates settings and toggles the database state to reload them safely. - required: false - type: dict - default: {} + replication_factor: + description: + - Number of server nodes to replicate the database to. + - Must be a positive integer. + - Only used when creating a database. + - Required on creation; ignored for existing databases. + required: false + default: null + type: int + topology_members: + description: + - Optional list of cluster node tags to host this database (fixed placement). + - When provided, its length must equal C(replication_factor). + - Honored only on creation. If the database already exists, providing C(topology_members) will fail. + required: false + type: list + elements: str + default: [] + state: + description: + - Desired state of the database. + - If C(present), the database will be created if it does not exist, and settings will be reconciled. + - If C(absent), the database will be deleted if it exists. + - If omitted (C(null)), the module reconciles settings on an existing database but will not create or delete it. + - If the database does not exist and C(state) is omitted, the task fails with guidance to use C(state=present). + required: false + type: str + choices: [present, absent] + default: null + encrypted: + description: + - Create the database as encrypted. + - When C(true), the module ensures a secret key is assigned (generated or read from file) and distributed to all cluster nodes before creation. + - Requires C(certificate_path) to access admin endpoints. + required: false + default: false + type: bool + encryption_key: + description: + - Path to a file that contains the raw encryption key (plain text). + - Mutually exclusive with C(generate_encryption_key). + - Used only when C(encrypted=true). + required: false + type: str + generate_encryption_key: + description: + - If C(true), asks the server to generate a new encryption key via the admin API. + - Mutually exclusive with C(encryption_key). + - Used only when C(encrypted=true). + required: false + default: false + type: bool + encryption_key_output_path: + description: + - When C(generate_encryption_key=true), write the generated key to this local file with safe permissions. + - Ignored if C(generate_encryption_key=false). + required: false + type: str + database_settings: + description: + - Dictionary of database-level settings to apply. + - Keys and values are normalized to strings and compared against current customized settings. + - When differences exist, the module updates settings and toggles the database state to reload them safely. + required: false + type: dict + default: {} seealso: - name: RavenDB documentation @@ -97,562 +107,193 @@ replication_factor: 3 state: present -- name: Delete a RavenDB database +- name: Create RF=2 database on specific nodes A and C (fixed placement) ravendb.ravendb.database: url: "http://{{ ansible_host }}:8080" - database_name: "my_database" - state: absent + database_name: "placed_db" + replication_factor: 2 + topology_members: ["A", "C"] + state: present -- name: Create a RavenDB database (secured server with self-signed certificates) +- name: Create an encrypted database with a generated key and save it locally (requires client cert) become: true ravendb.ravendb.database: - url: "http://{{ ansible_host }}:443" - database_name: "my_secured_database" + url: "https://{{ ansible_host }}:443" + database_name: "secure_db" replication_factor: 1 - certificate_path: "combined_raven_cert.pem" + certificate_path: "admin.client.combined.pem" ca_cert_path: "ca_certificate.pem" + encrypted: true + generate_encryption_key: true + encryption_key_output_path: "/home/$USER/secure_db.key" state: present -- name: Delete a RavenDB database (secured server with self-signed certificates) - become: true +- name: Create an encrypted database using a pre-provisioned key file ravendb.ravendb.database: - url: "http://{{ ansible_host }}:443" - database_name: "my_secured_database" - certificate_path: "/etc/ravendb/security/combined_raven_cert.pem" - ca_cert_path: "/etc/ravendb/security/ca_certificate.pem" - state: absent + url: "https://{{ ansible_host }}:443" + database_name: "secure_db2" + replication_factor: 1 + certificate_path: "admin.client.combined.pem" + ca_cert_path: "ca_certificate.pem" + encrypted: true + encryption_key: "/home/$USER/secure_db2.key" + state: present -- name: Simulate creating a RavenDB database (check mode) +- name: Update database settings (idempotent) – will not create database if absent (state omitted) ravendb.ravendb.database: url: "http://{{ ansible_host }}:8080" database_name: "my_database" - replication_factor: 3 - state: present + database_settings: + Indexing.MapBatchSize: "64" + +- name: Apply settings in check mode (no changes will be made) + ravendb.ravendb.database: + url: "http://{{ ansible_host }}:8080" + database_name: "my_database" + database_settings: + Indexing.MapBatchSize: "64" check_mode: yes -- name: Simulate deleting a RavenDB database (check mode) +- name: Delete a RavenDB database ravendb.ravendb.database: url: "http://{{ ansible_host }}:8080" database_name: "my_database" state: absent - check_mode: yes ''' RETURN = ''' changed: - description: Indicates if any change was made (or would have been made in check mode). - type: bool - returned: always - sample: true + description: Indicates if any change was made (or would have been made in check mode). + type: bool + returned: always + sample: true msg: - description: Human-readable message describing the result or error. - type: str - returned: always - sample: Database 'my_database' created successfully. - version_added: "1.0.0" + description: Human-readable message describing the result or error. + type: str + returned: always + sample: Database 'my_database' created successfully. + version_added: "1.0.0" ''' import traceback -import os -import re - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse from ansible.module_utils.basic import AnsibleModule, missing_required_lib -HAS_REQUESTS = True -try: - import requests -except ImportError: - HAS_REQUESTS = False - -LIB_IMP_ERR = None +LIB_ERR = None try: from ansible_collections.ravendb.ravendb.plugins.module_utils.common_args import ravendb_common_argument_spec - from ravendb import DocumentStore, GetDatabaseNamesOperation - from ravendb.serverwide.operations.common import CreateDatabaseOperation, DeleteDatabaseOperation - from ravendb.serverwide.database_record import DatabaseRecord - from ravendb.exceptions.raven_exceptions import RavenException - from ravendb.serverwide.operations.configuration import GetDatabaseSettingsOperation, PutDatabaseSettingsOperation - from ravendb.documents.operations.server_misc import ToggleDatabasesStateOperation - from ravendb.serverwide.operations.common import GetDatabaseRecordOperation + from ansible_collections.ravendb.ravendb.plugins.module_utils.core.validation import ( + validate_url, validate_database_name, validate_replication_factor_optional, validate_paths_exist, + validate_state_optional, validate_topology_members, collect_errors + ) + from ansible_collections.ravendb.ravendb.plugins.module_utils.core.configuration import validate_kv + from ansible_collections.ravendb.ravendb.plugins.module_utils.core.client import DocumentStoreFactory + from ansible_collections.ravendb.ravendb.plugins.module_utils.core.tls import TLSConfig + from ansible_collections.ravendb.ravendb.plugins.module_utils.reconcilers.database_reconciler import DatabaseReconciler + from ansible_collections.ravendb.ravendb.plugins.module_utils.dto.database import DatabaseSpec, EncryptionSpec + from ansible_collections.ravendb.ravendb.plugins.module_utils.services.encryption_service import validate_encryption_params + from ansible_collections.ravendb.ravendb.plugins.module_utils.services import database_service as dbs HAS_LIB = True except ImportError: HAS_LIB = False - LIB_IMP_ERR = traceback.format_exc() - - -def create_store(url, database_name, certificate_path=None, ca_cert_path=None): - """Create and initialize a RavenDB DocumentStore with optional client and CA certificates.""" - store = DocumentStore(urls=[url]) - if certificate_path: - store.certificate_pem_path = certificate_path - if ca_cert_path: - store.trust_store_path = ca_cert_path - store.database = database_name - store.initialize() - return store - - -def get_existing_databases(store): - """Retrieve the list of existing RavenDB databases from the server.""" - return store.maintenance.server.send(GetDatabaseNamesOperation(0, 128)) - - -def handle_present_state(store, database_name, replication_factor, url, certificate_path, encrypted, generate_encryption_key, - encryption_key, encryption_key_output_path, db_settings, check_mode): - """ - Ensure the specified database exists. - Returns a tuple: (changed: bool, message: str) - """ - existing_databases = get_existing_databases(store) - - if database_name not in existing_databases: - if encrypted: - if check_mode: - return True, "Encrypted Database '{}' would be created.".format(database_name) - - ensure_secret_assigned( - url=url, - database_name=database_name, - certificate_path=certificate_path, - generate_encryption_key=generate_encryption_key, - encryption_key=encryption_key, - encryption_key_output_path=encryption_key_output_path, - check_mode=check_mode - ) - - if check_mode: - return True, "Database '{}' would be created.".format(database_name) - - create_database(store, database_name, replication_factor, encrypted) - created = True - created_msg = "Database '{}' created successfully{}.".format(database_name, " (encrypted)" if encrypted else "") - - else: - mismatch_result = verify_encryption_or_fail(store, database_name, encrypted, check_mode) - if mismatch_result is not None: - return mismatch_result - - created = False - created_msg = "Database '{}' already exists.".format(database_name) - - reconcile_result = reconcile_db_settings(store, database_name, db_settings, check_mode, created_msg) - if reconcile_result: - return reconcile_result - - if created: - return True, created_msg - return False, created_msg + " No changes." - - -def handle_absent_state(store, database_name, check_mode): - """ - Ensure the specified database is absent. - Returns a tuple: (changed: bool, message: str) - """ - existing_databases = get_existing_databases(store) - - if database_name not in existing_databases: - return False, "Database '{}' does not exist.".format(database_name) - - if check_mode: - return True, "Database '{}' would be deleted.".format(database_name) - - delete_database_operation = DeleteDatabaseOperation(database_name) - store.maintenance.server.send(delete_database_operation) - return True, "Database '{}' deleted successfully.".format(database_name) - - -def create_database(store, database_name, replication_factor, encrypted): - """ - Create a new database on the server. - Sets the encrypted flag if requested. - """ - database_record = DatabaseRecord(database_name) - if encrypted: - database_record.encrypted = True - - create_database_operation = CreateDatabaseOperation( - database_record=database_record, - replication_factor=replication_factor - ) - store.maintenance.server.send(create_database_operation) - - -def fetch_db_record(store, database_name): - """ - Fetch the database record for the specified database. - Returns a DatabaseRecord or None if not found. - """ - return store.maintenance.server.send(GetDatabaseRecordOperation(database_name)) - - -def verify_encryption_or_fail(store, database_name, desired_encrypted, check_mode): - """ - Verify that the encryption status of the database matches what is requested. - Returns None if status matches. - Returns (False, message) in check mode if it would fail. - Raises an Exception if mismatch is detected in normal mode. - """ - record = fetch_db_record(store, database_name) - if record is None: - raise Exception("Database '{}' is listed but its record could not be fetched.".format(database_name)) - - actual_flag = getattr(record, "encrypted", False) - actual_is_encrypted = (actual_flag is True) - desired_is_encrypted = (desired_encrypted is True) - if (desired_is_encrypted and not actual_is_encrypted) or (not desired_is_encrypted and actual_is_encrypted): - msg = ( - "Database '{name}' already exists but encryption status is '{actual}' while requested '{desired}'. " - "RavenDB does not support toggling encryption on an existing database. " - "Delete & recreate, or backup and restore with the desired key." - ).format( - name=database_name, - actual=actual_flag, - desired=desired_encrypted - ) - if check_mode: - return (False, "Would fail: " + msg) - raise Exception(msg) - - return None - - -def reconcile_db_settings(store, database_name, db_settings, check_mode, prefix_msg): - """ - Ensure the specified database has the desired settings. - Return either: - - a tuple: (changed: bool, message: str) - - None when no settings or no diffs - """ - if not db_settings: - return None - - current_settings = get_current_db_settings(store, database_name) - to_apply = diff_settings(db_settings, current_settings) - - if not to_apply: - return None - - keys_str = ", ".join(sorted(to_apply.keys())) - - if check_mode: - return True, "{} Would apply settings ({}) and reload.".format(prefix_msg, keys_str) - - store.maintenance.send(PutDatabaseSettingsOperation(database_name, to_apply)) - store.maintenance.server.send(ToggleDatabasesStateOperation(database_name, True)) - store.maintenance.server.send(ToggleDatabasesStateOperation(database_name, False)) - return True, "{} Applied settings ({}) and reloaded.".format(prefix_msg, keys_str) - - -def ensure_secret_assigned(url, database_name, certificate_path, generate_encryption_key, encryption_key, encryption_key_output_path, check_mode): - """ - Resolve/generate encryption key and POST it. - Returns a tuple: (changed: bool, message: str) - """ - if check_mode: - return True, "Would assign encryption key for database '{}'.".format(database_name) - - if generate_encryption_key: - key = fetch_generated_secret_key(url, certificate_path) - if encryption_key_output_path: - write_key_safe(encryption_key_output_path, key) - else: - key = read_from_file(encryption_key) - - if not key: - raise Exception("Encryption key is empty.") - - assign_secret_key(url, database_name, key, certificate_path) - - -def write_key_safe(path, key): - """ - Write the key to 'path'. - """ - directory = os.path.dirname(path) or "." - os.makedirs(directory, exist_ok=True) - prev_umask = os.umask(0o177) - - try: - with open(path, 'w') as f: - f.write(key + "\n") - finally: - os.umask(prev_umask) - - -def read_from_file(path): - """ - Read entire file and strip trailing whitespace/newlines. - """ - with open(path, 'r') as f: - return f.read().strip() - - -def fetch_generated_secret_key(base_url, cert_path): - """ - Ask the server to generate an encryption key. - """ - url = "{}/admin/secrets/generate".format(base_url.rstrip('/')) - response = requests.get( - url, - cert=cert_path, - verify=False - ) - response.raise_for_status() - return response.text.strip() - - -def normalize_topology_group(topology_group): - """ - Convert topology group into a {tag: url} mapping. - """ - if isinstance(topology_group, dict): - return topology_group - - mapping = {} - if isinstance(topology_group, list): - for item in topology_group: - if not isinstance(item, dict): - continue - - tag = item.get("Tag") or item.get("tag") - url = item.get("Url") or item.get("url") - - if tag and url: - mapping[tag] = url - - return mapping - - -def assign_secret_key(base_url, database_name, key, cert_path): - """ - Distribute the encryption key to ALL nodes in the cluster. - """ - topology_url = "{}/cluster/topology".format(base_url.rstrip('/')) - topology_response = requests.get( - topology_url, - cert=cert_path, - verify=False - ) - topology_response.raise_for_status() - data = topology_response.json() - topology = data.get("Topology") or data - - all_nodes = normalize_topology_group(topology.get("AllNodes", {})) - members = normalize_topology_group(topology.get("Members", {})) - promotables = normalize_topology_group(topology.get("Promotables", {})) - watchers = normalize_topology_group(topology.get("Watchers", {})) - - if all_nodes: - tags = sorted(all_nodes.keys()) - else: - tags = sorted(set(list(members.keys()) + list(promotables.keys()) + list(watchers.keys()))) - - if not tags: - raise Exception("No nodes found in cluster topology.") - - params = [("name", database_name)] - for t in tags: - params.append(("node", t)) - - distribute_url = "{}/admin/secrets/distribute".format(base_url.rstrip("/")) - headers = {"Content-Type": "text/plain"} - - response = requests.post( - distribute_url, - params=params, - data=key, - headers=headers, - cert=cert_path, - verify=False - ) - if response.status_code not in (200, 201, 204): - raise Exception("Assigning encryption key failed: HTTP {} - {}".format(response.status_code, response.text)) - return {"distributed_to": tags, "status": response.status_code} - - -def get_current_db_settings(store, db_name): - """ - Returns dict of customized settings - """ - s = store.maintenance.send(GetDatabaseSettingsOperation(db_name)) - return (s.settings or {}) if s else {} - - -def diff_settings(desired, current): - """ - Compare desired and current settings. - Returns dict of settings to apply. - """ - to_apply = {} - for k, v in (desired or {}).items(): - dv = "" if v is None else str(v) - cv = current.get(k) - if cv != dv: - to_apply[k] = dv - return to_apply - - -def is_valid_url(url): - """Return True if the given URL contains a valid scheme and netloc.""" - parsed = urlparse(url) - return all([parsed.scheme, parsed.netloc]) - - -def is_valid_database_name(name): - """Check if the database name is valid (letters, numbers, dashes, underscores).""" - return bool(re.match(r"^[a-zA-Z0-9_-]+$", name)) - - -def is_valid_replication_factor(factor): - """Return True if replication factor is a positive integer.""" - return isinstance(factor, int) and factor > 0 - - -def is_valid_bool(value): - """Return True if the value is a boolean.""" - return isinstance(value, bool) - - -def validate_paths(*paths): - """ - Validate that all given file paths exist on the filesystem. - Returns a tuple: (valid: bool, error_msg: Optional[str]) - """ - for path in paths: - if path and not os.path.isfile(path): - return False, "Path does not exist: {}".format(path) - return True, None - - -def is_valid_state(state): - """Return True if the state is either 'present' or 'absent'.""" - return state in ['present', 'absent'] - - -def validate_encryption_params(module, desired_state, certificate_path, encrypted, encryption_key, - generate_encryption_key, encryption_key_output_path): - """ - Validate parameters when creating an encrypted database. - """ - if desired_state == 'present' and encrypted: - if not certificate_path: - module.fail_json(msg="encrypted=true requires certificate_path for admin endpoints.") - - if not (generate_encryption_key or encryption_key): - module.fail_json(msg="encrypted=true requires either generate_encryption_key=true or encryption_key=.") - - if generate_encryption_key and encryption_key: - module.fail_json(msg="generate_encryption_key and encryption_key are mutually exclusive.") - - if encryption_key_output_path and not generate_encryption_key: - module.fail_json(msg="encryption_key_output_path can only be used when generate_encryption_key=true.") - - if encryption_key: - valid, error_msg = validate_paths(encryption_key) - if not valid: - module.fail_json(msg=error_msg) - - -def validate_database_settings(module, db_settings): - """Validate and normalize database_settings.""" - if not isinstance(db_settings, dict): - module.fail_json(msg="database_settings must be a dict.") - normalized = {} - for k, v in db_settings.items(): - if not isinstance(k, str): - module.fail_json(msg="database_settings keys must be strings. Bad key: {!r}".format(k)) - normalized[k] = "" if v is None else str(v) - return normalized + LIB_ERR = traceback.format_exc() def main(): module_args = ravendb_common_argument_spec() module_args.update( - replication_factor=dict(type='int', default=1), - state=dict(type='str', choices=['present', 'absent'], default='present'), + replication_factor=dict(type='int', default=None), + state=dict(type='str', choices=['present', 'absent'], default=None), encrypted=dict(type='bool', default=False), encryption_key=dict(type='str', required=False, no_log=True), generate_encryption_key=dict(type='bool', default=False), encryption_key_output_path=dict(type='str', required=False, no_log=True), database_settings=dict(type='dict', default={}), + topology_members=dict(type='list', elements='str', required=False, default=[]), ) - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True, - mutually_exclusive=[('generate_encryption_key', 'encryption_key')] - ) + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) if not HAS_LIB: - module.fail_json( - msg=missing_required_lib("ravendb"), - exception=LIB_IMP_ERR) - - if module.params.get('encrypted') and not HAS_REQUESTS: - module.fail_json(msg="Python 'requests' library is required for encrypted databases. Please install it.") + module.fail_json(msg=missing_required_lib("ravendb"), exception=LIB_ERR) url = module.params['url'] - database_name = module.params['database_name'] - replication_factor = module.params['replication_factor'] - certificate_path = module.params.get('certificate_path') - ca_cert_path = module.params.get('ca_cert_path') - desired_state = module.params['state'] + name = module.params['database_name'] + repl = module.params['replication_factor'] + cert_path = module.params.get('certificate_path') + ca_path = module.params.get('ca_cert_path') + state = module.params['state'] encrypted = module.params['encrypted'] - encryption_key = module.params.get('encryption_key') - generate_encryption_key = module.params.get('generate_encryption_key') - encryption_key_output_path = module.params.get('encryption_key_output_path') - db_settings = module.params.get('database_settings') - - if not is_valid_url(url): - module.fail_json(msg="Invalid URL: {}".format(url)) - - if not is_valid_database_name(database_name): - module.fail_json( - msg="Invalid database name: {}. Only letters, numbers, dashes, and underscores are allowed.".format(database_name)) - - if not is_valid_replication_factor(replication_factor): - module.fail_json( - msg="Invalid replication factor: {}. Must be a positive integer.".format(replication_factor)) - - valid, error_msg = validate_paths(certificate_path, ca_cert_path) - if not valid: - module.fail_json(msg=error_msg) + key_path = module.params.get('encryption_key') + gen_key = module.params.get('generate_encryption_key') + ekey_out_path = module.params.get('encryption_key_output_path') + db_settings = module.params.get('database_settings') or {} + topology_members = module.params.get('topology_members') or [] + + ok, err = collect_errors( + validate_url(url), + validate_database_name(name), + validate_replication_factor_optional(repl), + validate_paths_exist(cert_path, ca_path), + validate_state_optional(state), + validate_topology_members(topology_members, repl) + ) + if not ok: + module.fail_json(msg=err) - if not is_valid_state(desired_state): - module.fail_json( - msg="Invalid state: {}. Must be 'present' or 'absent'.".format(desired_state)) + tls = TLSConfig(certificate_path=cert_path, ca_cert_path=ca_path) + ok, err = validate_encryption_params(state, tls, encrypted, gen_key, key_path, ekey_out_path) + if not ok: + module.fail_json(msg=err) - validate_encryption_params(module, desired_state, certificate_path, encrypted, encryption_key, generate_encryption_key, encryption_key_output_path) + ok, normalized_settings, err = validate_kv(db_settings, "database_settings", allow_none=True) + if not ok: + module.fail_json(msg=err) - settings = validate_database_settings(module, db_settings) + ctx = None + db_name = name if state == "present" else None try: - store = create_store(url, database_name, certificate_path, ca_cert_path) - check_mode = module.check_mode + ctx = DocumentStoreFactory.create(url, db_name, cert_path, ca_path) + reconciler = DatabaseReconciler(ctx) + + spec = DatabaseSpec( + url=url, + name=name, + replication_factor=repl, + members=topology_members, + settings=normalized_settings or {}, + encryption=EncryptionSpec( + enabled=encrypted, + certificate_path=cert_path, + ca_cert_path=ca_path, + generate_key=gen_key, + key_path=key_path, + output_path=ekey_out_path, + ), + ) + + if state == "present": + res = reconciler.ensure_present(spec, tls, module.check_mode) + elif state == "absent": + res = reconciler.ensure_absent(name, module.check_mode) + else: + existing = set(dbs.list_databases(ctx)) + if name not in existing: + module.fail_json(msg="Database '{}' does not exist. Provide state=present to create it.".format(name)) - if desired_state == 'present': - changed, message = handle_present_state( - store, database_name, replication_factor, url, certificate_path, encrypted, generate_encryption_key, - encryption_key, encryption_key_output_path, settings, check_mode=check_mode) - elif desired_state == 'absent': - changed, message = handle_absent_state( - store, database_name, check_mode) + res = reconciler.ensure_present(spec, tls, module.check_mode) - module.exit_json(changed=changed, msg=message) + if res.failed: + module.fail_json(**res.to_ansible()) + else: + module.exit_json(**res.to_ansible()) - except RavenException as e: - module.fail_json(msg="RavenDB operation failed: {}".format(str(e))) except Exception as e: - module.fail_json(msg="An unexpected error occurred: {}".format(str(e))) + module.fail_json(msg="Unexpected error: {}".format(str(e))) finally: - if 'store' in locals(): - store.close() + if ctx: + ctx.close() if __name__ == '__main__': diff --git a/plugins/modules/index.py b/plugins/modules/index.py index 2a12d7c..8ba3060 100644 --- a/plugins/modules/index.py +++ b/plugins/modules/index.py @@ -1,4 +1,3 @@ -#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c), RavenDB @@ -13,81 +12,67 @@ module: index short_description: Manage RavenDB indexes description: - - This module allows you to create, delete, pause, resume, enable, disable, or reset RavenDB indexes. - - Supports check mode to simulate changes without applying them. - - Can create dynamic single-map and multi-map indexes based on a provided index definition. + - Create, delete, update, or apply operational modes to RavenDB indexes. + - Supports both single-map and multi-map index definitions (with optional reduce). + - Supports check mode to simulate changes without applying them. + - Can reconcile per-index configuration via C(index_configuration). version_added: "1.0.0" author: "Omer Ratsaby (@thegoldenplatypus)" extends_documentation_fragment: -- ravendb.ravendb.ravendb + - ravendb.ravendb.ravendb options: - url: - description: - - URL of the RavenDB server. - - Must include the scheme (http or https), hostname and port. - required: true - type: str - database_name: - description: - - Name of the database where the index resides/should be reside. - required: true - type: str - index_name: - description: - - Name of the index to create, delete, or modify. - - Must consist only of letters, numbers, dashes, and underscores. - required: true - type: str - index_definition: - description: - - Dictionary defining the index (maps and optional reduce). - - Required when creating a new index. - required: false - type: dict - certificate_path: - description: - - Path to a client certificate (PEM format) for secured communication. - required: false - type: str - ca_cert_path: - description: - - Path to a trusted CA certificate file to verify the RavenDB server's certificate. - required: false - type: str - state: - description: - - Desired state of the index. - - If C(present), the index will be created if it does not exist. - - If C(absent), the index will be deleted if it exists. - required: false - type: str - choices: - - present - - absent - mode: - description: - - Operational mode to apply to an existing index. - required: false - type: str - choices: - - resumed - - paused - - enabled - - disabled - - reset - cluster_wide: - description: - - Whether to apply enable/disable operations cluster-wide. - required: false - type: bool - default: false - + index_name: + description: + - Name of the index to create, delete, or modify. + - Must consist only of letters, numbers, dashes, and underscores. + required: true + type: str + index_definition: + description: + - Dictionary defining the index (C(map) list and optional C(reduce) string). + - Required when creating a new index. + - When present for an existing index, differences are applied idempotently. + required: false + type: dict + state: + description: + - Desired state of the index. + - If C(present), the index will be created if it does not exist, and the definition/configuration will be reconciled. + - If C(absent), the index will be deleted if it exists. + - If omitted (C(null)), the module operates in "reconcile" mode on existing indexes only (definition, configuration, and/or C(mode)). + - If the index does not exist and only C(mode) is provided, the task fails with guidance to use C(state=present). + required: false + type: str + choices: [present, absent] + default: null # CHANGED: explicitly document None-default reconcile behavior + mode: + description: + - Operational mode to apply to an existing index (one of enable/disable/pause/resume/reset). + - If the index does not exist and only C(mode) is provided, the task fails with guidance to create it first. + required: false + type: str + choices: [resumed, paused, enabled, disabled, reset] + cluster_wide: + description: + - Whether to apply enable/disable operations cluster-wide. + required: false + type: bool + default: false + index_configuration: + description: + - Per-index configuration key/value pairs to reconcile. + - Keys and values are normalized to strings and compared against the current index definition's configuration. + - If differences exist, the module updates the definition with the merged configuration. + required: false + type: dict + default: {} # ADDED seealso: - - name: RavenDB documentation - description: Official RavenDB documentation - link: https://ravendb.net/docs + - name: RavenDB documentation + description: Official RavenDB documentation + link: https://ravendb.net/docs + ''' EXAMPLES = ''' @@ -132,12 +117,13 @@ } state: present -- name: Delete a RavenDB index +- name: Reconcile per-index configuration (idempotent) ravendb.ravendb.index: url: "http://{{ ansible_host }}:8080" database_name: "my_database" index_name: "UsersByName" - state: absent + index_configuration: + Indexing.MapBatchSize: "128" - name: Disable a RavenDB index (cluster-wide) ravendb.ravendb.index: @@ -147,35 +133,15 @@ mode: disabled cluster_wide: true -- name: Enable a RavenDB index - ravendb.ravendb.index: - url: "http://{{ ansible_host }}:8080" - database_name: "my_database" - index_name: "Orders/ByCompany" - mode: enabled - -- name: Pause a RavenDB index +- name: Pause a RavenDB index (check mode) ravendb.ravendb.index: url: "http://{{ ansible_host }}:8080" database_name: "my_database" index_name: "Orders/ByCompany" mode: paused + check_mode: yes -- name: Resume a RavenDB index - ravendb.ravendb.index: - url: "http://{{ ansible_host }}:8080" - database_name: "my_database" - index_name: "Orders/ByCompany" - mode: resumed - -- name: Reset a RavenDB index - ravendb.ravendb.index: - url: "http://{{ ansible_host }}:8080" - database_name: "my_database" - index_name: "Orders/ByCompany" - mode: reset - -- name: Update an existing RavenDB index definition +- name: Update an existing RavenDB index definition (idempotent update) ravendb.ravendb.index: url: "http://{{ ansible_host }}:8080" database_name: "my_database" @@ -193,379 +159,61 @@ count = g.Sum(x => x.count) } state: present + +- name: Delete a RavenDB index + ravendb.ravendb.index: + url: "http://{{ ansible_host }}:8080" + database_name: "my_database" + index_name: "UsersByName" + state: absent + +- name: Create index with rolling deployment + ravendb.ravendb.index: + url: "http://{{ ansible_host }}:8080" + database_name: "my_database" + index_name: "Orders/ByCompany" + state: present + index_definition: + map: + - "from o in docs.Orders select new { o.Company }" + deployment_mode: rolling + ''' RETURN = ''' changed: - description: Indicates if any change was made (or would have been made in check mode). - type: bool - returned: always - sample: true + description: Indicates if any change was made (or would have been made in check mode). + type: bool + returned: always + sample: true msg: - description: Human-readable message describing the result or error. - type: str - returned: always - sample: Index 'Products_ByName' created successfully. - version_added: "1.0.0" + description: Human-readable message describing the result or error. + type: str + returned: always + sample: Index 'Products_ByName' created successfully. + version_added: "1.0.0" ''' import traceback -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse -import re -import os -import sys from ansible.module_utils.basic import AnsibleModule, missing_required_lib -LIB_IMP_ERR = None +LIB_ERR = None try: from ansible_collections.ravendb.ravendb.plugins.module_utils.common_args import ravendb_common_argument_spec - from ravendb import DocumentStore, AbstractIndexCreationTask - from ravendb.documents.indexes.abstract_index_creation_tasks import AbstractMultiMapIndexCreationTask - from ravendb.documents.operations.indexes import ( - GetIndexesOperation, - DeleteIndexOperation, - EnableIndexOperation, - DisableIndexOperation, - StartIndexOperation, - StopIndexOperation, - GetIndexingStatusOperation, - ResetIndexOperation, - GetIndexStatisticsOperation) - from ravendb.documents.indexes.definitions import IndexRunningStatus, IndexState - from ravendb.exceptions.raven_exceptions import RavenException + from ansible_collections.ravendb.ravendb.plugins.module_utils.core.client import DocumentStoreFactory + from ansible_collections.ravendb.ravendb.plugins.module_utils.core.validation import ( + validate_url, validate_database_name, validate_index_name, validate_dict, + validate_paths_exist, validate_state_optional, validate_mode, validate_bool, collect_errors + ) + from ansible_collections.ravendb.ravendb.plugins.module_utils.services import index_service as idxsvc + from ansible_collections.ravendb.ravendb.plugins.module_utils.services.index_config_service import validate_index_configuration + from ansible_collections.ravendb.ravendb.plugins.module_utils.reconcilers.index_reconciler import IndexReconciler + from ansible_collections.ravendb.ravendb.plugins.module_utils.dto.index import IndexSpec, IndexDefinitionSpec HAS_LIB = True except ImportError: HAS_LIB = False - LIB_IMP_ERR = traceback.format_exc() - - -def create_dynamic_index(name, definition): - """Dynamically create a single-map index class based on the given definition.""" - class DynamicIndex(AbstractIndexCreationTask): - def __init__(self): - super(DynamicIndex, self).__init__() - self.map = definition.get("map")[0] - reduce_def = definition.get("reduce") - if reduce_def: - self.reduce = reduce_def - - DynamicIndex.__name__ = name - return DynamicIndex - - -def create_dynamic_multimap_index(name, definition): - """Dynamically create a multi-map index class based on the given definition.""" - class DynamicIndex(AbstractMultiMapIndexCreationTask): - def __init__(self): - super(DynamicIndex, self).__init__() - maps_def = definition.get("map") - - for map_def in maps_def: - self._add_map(map_def) - - reduce_def = definition.get("reduce") - if reduce_def: - self.reduce = reduce_def - - DynamicIndex.__name__ = name - return DynamicIndex - - -def initialize_ravendb_store(params): - """Create and initialize a RavenDB DocumentStore from Ansible module parameters.""" - url = params['url'] - database_name = params['database_name'] - certificate_path = params.get('certificate_path') - ca_cert_path = params.get('ca_cert_path') - - store = DocumentStore(urls=[url], database=database_name) - if certificate_path: - store.certificate_pem_path = certificate_path - if ca_cert_path: - store.trust_store_path = ca_cert_path - - store.initialize() - return store - - -def reconcile_state(store, params, check_mode): - """ - Determine and apply the required state (present, absent, or mode-only) to an index. - Returns a tuple: (status, changed, message) - """ - database_name = params['database_name'] - index_name = params['index_name'] - desired_state = params.get('state') - desired_mode = params.get('mode') - cluster_wide = params['cluster_wide'] - - database_maintenance = store.maintenance.for_database(database_name) - existing_indexes = database_maintenance.send( - GetIndexesOperation(0, sys.maxsize)) - existing_index_names = [i.name for i in existing_indexes] - - if desired_state == 'absent': - return handle_absent_state( - database_maintenance, - index_name, - existing_index_names, - check_mode) - - if desired_state == 'present': - return handle_present_state( - store, - database_name, - params, - index_name, - existing_indexes, - existing_index_names, - check_mode) - - if desired_mode and desired_state is None: - return handle_mode_only( - store, - index_name, - desired_mode, - cluster_wide, - check_mode, - existing_index_names) - - return "error", False, "Invalid state or mode combination." - - -def handle_absent_state( - database_maintenance, - index_name, - existing_index_names, - check_mode): - """Delete the index if it exists. Respect Ansible check mode.""" - if index_name not in existing_index_names: - return "ok", False, "Index '{}' is already absent.".format(index_name) - - if check_mode: - return "ok", True, "Index '{}' would be deleted.".format(index_name) - - database_maintenance.send(DeleteIndexOperation(index_name)) - return "ok", True, "Index '{}' deleted successfully.".format(index_name) - - -def handle_present_state( - store, - database_name, - params, - index_name, - existing_indexes, - existing_index_names, - check_mode): - """Create or update the index if needed. Respect Ansible check mode.""" - index_definition = params.get('index_definition') - desired_mode = params.get('mode') - cluster_wide = params['cluster_wide'] - - if index_name in existing_index_names: - existing_index = next( - i for i in existing_indexes if i.name == index_name) - if index_matches(existing_index, index_definition): - if desired_mode: - return apply_mode( - store, - index_name, - desired_mode, - cluster_wide, - check_mode) - return "ok", False, "Index '{}' already exists and matches definition.".format(index_name) - - if check_mode: - return "ok", True, "Index '{}' would be created.".format(index_name) - - create_index(store, database_name, index_name, index_definition) - if desired_mode: - apply_mode(store, index_name, desired_mode, cluster_wide, check_mode) - - return "ok", True, "Index '{}' created successfully.".format(index_name) - - -def handle_mode_only( - store, - index_name, - desired_mode, - cluster_wide, - check_mode, - existing_index_names): - """Apply only the desired index mode if the index already exists.""" - if index_name not in existing_index_names: - return "error", False, "Index '{}' does not exist. Cannot apply mode.".format(index_name) - - return apply_mode( - store, - index_name, - desired_mode, - cluster_wide, - check_mode) - - -def create_index(store, database_name, index_name, index_definition): - """Create an index, handling both single-map and multi-map definitions.""" - if len(index_definition.get("map")) > 1: - DynamicIndexClass = create_dynamic_multimap_index( - index_name, index_definition) - else: - DynamicIndexClass = create_dynamic_index(index_name, index_definition) - index = DynamicIndexClass() - index.execute(store, database_name) - - -def index_matches(existing_index, index_definition): - """Check if an existing index matches the expected definition (map/reduce).""" - existing_maps = set(map(str.strip, existing_index.maps) - ) if existing_index.maps else set() - existing_reduce = getattr(existing_index, 'reduce', None) - - expected_maps = set(map(str.strip, index_definition.get("map", []))) - normalized_existing_reduce = existing_reduce.strip() if existing_reduce else None - normalized_expected_reduce = (index_definition.get("reduce") or "").strip() - if not normalized_expected_reduce: - normalized_expected_reduce = None - - return existing_maps == expected_maps and normalized_existing_reduce == normalized_expected_reduce - - -def enable_index(store, index_name, cluster_wide, check_mode): - """Enable a RavenDB index, optionally cluster-wide. Respect check mode.""" - current = get_index_state(store, index_name) - if current != IndexState.DISABLED: - return "ok", False, "Index '{}' is already enabled.".format(index_name) - - if check_mode: - return "ok", True, "Index '{}' would be enabled{}.".format(index_name, ' cluster-wide' if cluster_wide else '') - - enable_index_operation = EnableIndexOperation(index_name, cluster_wide) - store.maintenance.send(enable_index_operation) - - return "ok", True, "Index '{}' enabled successfully{}.".format(index_name, ' cluster-wide' if cluster_wide else '') - - -def disable_index(store, index_name, cluster_wide, check_mode): - """Disable a RavenDB index, optionally cluster-wide. Respect check mode.""" - current = get_index_state(store, index_name) - if current == IndexState.DISABLED: - return "ok", False, "Index '{}' is already disabled.".format(index_name) - - if check_mode: - return "ok", True, "Index '{}' would be disabled{}.".format(index_name, ' cluster-wide' if cluster_wide else '') - - disable_index_operation = DisableIndexOperation(index_name, cluster_wide) - store.maintenance.send(disable_index_operation) - - return "ok", True, "Index '{}' disbaled successfully{}.".format(index_name, ' cluster-wide' if cluster_wide else '') - - -def resume_index(store, index_name, check_mode): - """Resume a paused RavenDB index. Respect check mode.""" - indexing_status = store.maintenance.send(GetIndexingStatusOperation()) - index = [x for x in indexing_status.indexes if x.name == index_name][0] - if index.status == IndexRunningStatus.RUNNING: - return "ok", False, "Index '{}' is already resumed and executing.".format(index_name) - - if check_mode: - return "ok", True, "Index '{}' would be resumed.".format(index_name) - - resume_index_operation = StartIndexOperation(index_name) - store.maintenance.send(resume_index_operation) - - return "ok", True, "Index '{}' resumed successfully.".format(index_name) - - -def pause_index(store, index_name, check_mode): - """Pause a running RavenDB index. Respect check mode.""" - indexing_status = store.maintenance.send(GetIndexingStatusOperation()) - index = [x for x in indexing_status.indexes if x.name == index_name][0] - if index.status == IndexRunningStatus.PAUSED: - return "ok", False, "Index '{}' is already paused.".format(index_name) - - if check_mode: - return "ok", True, "Index '{}' would be paused.".format(index_name) - - pause_index_operation = StopIndexOperation(index_name) - store.maintenance.send(pause_index_operation) - - return "ok", True, "Index '{}' paused successfully.".format(index_name) - - -def reset_index(store, index_name, check_mode): - """Reset an existing index. Respect check mode.""" - if check_mode: - return "ok", True, "Index '{}' would be reset.".format(index_name) - - reset_index_operation = ResetIndexOperation(index_name) - store.maintenance.send(reset_index_operation) - - return "ok", True, "Index '{}' reset successfully.".format(index_name) - - -def get_index_state(store, index_name): - """Return the logical index state""" - stats = store.maintenance.send(GetIndexStatisticsOperation(index_name)) - return stats.state - - -def apply_mode(store, index_name, mode, cluster_wide, check_mode): - """Dispatch index mode operation based on the given mode string.""" - if mode == 'enabled': - return enable_index(store, index_name, cluster_wide, check_mode) - elif mode == 'disabled': - return disable_index(store, index_name, cluster_wide, check_mode) - elif mode == 'resumed': - return resume_index(store, index_name, check_mode) - elif mode == 'paused': - return pause_index(store, index_name, check_mode) - elif mode == 'reset': - return reset_index(store, index_name, check_mode) - else: - return "error", False, "Unsupported mode '{}' specified.".format(mode) - - -def is_valid_url(url): - """Return True if the URL has a valid scheme and network location.""" - parsed = urlparse(url) - return all([parsed.scheme, parsed.netloc]) - - -def is_valid_name(name): - """Return True if the name contains only alphanumeric characters, dashes, or underscores.""" - return bool(re.match(r"^[a-zA-Z0-9_-]+$", name)) - - -def is_valid_dict(value): - """Return True if the value is a dictionary or None.""" - return isinstance(value, dict) or value is None - - -def is_valid_bool(value): - """Return True if the value is a boolean.""" - return isinstance(value, bool) - - -def validate_paths(*paths): - """Check if all provided file paths exist. Return (True, None) or (False, error message).""" - for path in paths: - if path and not os.path.isfile(path): - return False, "Path does not exist: {}".format(path) - return True, None - - -def is_valid_state(state): - """Return True if the state is one of: None, 'present', 'absent'.""" - return state in [None, 'present', 'absent'] - - -def is_valid_mode(mode): - """Return True if the mode is one of: None, 'resumed', 'paused', 'enabled', 'disabled', 'reset'.""" - return mode in [None, 'resumed', 'paused', 'enabled', 'disabled', 'reset'] + LIB_ERR = traceback.format_exc() def main(): @@ -573,81 +221,84 @@ def main(): module_args.update( index_name=dict(type='str', required=True), index_definition=dict(type='dict', required=False), - state=dict(type='str', choices=['present', 'absent'], required=False), + state=dict(type='str', choices=['present', 'absent'], required=False, default=None), mode=dict(type='str', choices=['resumed', 'paused', 'enabled', 'disabled', 'reset'], required=False), - cluster_wide=dict(type='bool', default=False) + cluster_wide=dict(type='bool', default=False), + index_configuration=dict(type='dict', required=False, default={}) ) - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True - ) + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) if not HAS_LIB: - module.fail_json( - msg=missing_required_lib("ravendb"), - exception=LIB_IMP_ERR) + module.fail_json(msg=missing_required_lib("ravendb"), exception=LIB_ERR) url = module.params['url'] - database_name = module.params['database_name'] - index_name = module.params['index_name'] - index_definition = module.params.get('index_definition') - certificate_path = module.params.get('certificate_path') - ca_cert_path = module.params.get('ca_cert_path') + db_name = module.params['database_name'] + idx_name = module.params['index_name'] + raw_def = module.params.get('index_definition') + cert_path = module.params.get('certificate_path') + ca_path = module.params.get('ca_cert_path') state = module.params.get('state') mode = module.params.get('mode') cluster_wide = module.params['cluster_wide'] + idx_cfg = module.params.get('index_configuration') or {} + + ok, err = collect_errors( + validate_url(url), + validate_database_name(db_name), + validate_index_name(idx_name), + validate_dict("index definition", raw_def), + validate_paths_exist(cert_path, ca_path), + validate_state_optional(state), + validate_mode(mode), + validate_bool("cluster_wide", cluster_wide), + ) + if not ok: + module.fail_json(msg=err) + + ok, normalized_cfg, err = validate_index_configuration(idx_cfg) + if not ok: + module.fail_json(msg=err) + + def_spec = IndexDefinitionSpec.from_dict(raw_def) if raw_def else None + spec = IndexSpec( + db_name=db_name, + name=idx_name, + definition=def_spec, + mode=mode, + cluster_wide=cluster_wide, + configuration=normalized_cfg or {} + ) - if not is_valid_url(url): - module.fail_json(msg="Invalid URL: {}".format(url)) - - if not is_valid_name(database_name): - module.fail_json( - msg="Invalid database name: {}. Only letters, numbers, dashes, and underscores are allowed.".format(database_name)) - - if not is_valid_name(index_name): - module.fail_json( - msg="Invalid index name: {}. Only letters, numbers, dashes, and underscores are allowed.".format(index_name)) - - if not is_valid_dict(index_definition): - module.fail_json( - msg="Invalid index definition: Must be a dictionary.") - - valid, error_msg = validate_paths(certificate_path, ca_cert_path) - if not valid: - module.fail_json(msg=error_msg) - - if not is_valid_state(state): - module.fail_json( - msg="Invalid state: {}. Must be 'present' or 'absent'.".format(state)) - - if not is_valid_mode(mode): - module.fail_json( - msg="Invalid mode: {}. Must be one of 'resumed', 'paused', 'enabled', 'disabled', 'reset'.".format(mode)) + ctx = None + try: + ctx = DocumentStoreFactory.create(url, db_name, cert_path, ca_path) + reconciler = IndexReconciler(ctx, db_name) - if not is_valid_bool(cluster_wide): - module.fail_json( - msg="Invalid cluster_wide flag: {}. Must be a boolean.".format(cluster_wide)) + exists = idxsvc.get_definition(ctx, db_name, idx_name) is not None - try: - store = initialize_ravendb_store(module.params) - check_mode = module.check_mode + if state == "absent": + res = reconciler.ensure_absent(idx_name, module.check_mode) + elif state == "present": + res = reconciler.ensure_present(spec, module.check_mode) + else: + if not exists: + if mode: + module.fail_json(msg="Index '{}' does not exist. Provide state=present to create it before applying mode.".format(idx_name)) + module.fail_json(msg="Index '{}' does not exist. Provide state=present and index_definition to create it.".format(idx_name)) - type, changed, message = reconcile_state( - store, module.params, check_mode) + res = reconciler.ensure_present(spec, module.check_mode) - if type == "error": - module.fail_json(changed=changed, msg=message) + if res.failed: + module.fail_json(**res.to_ansible()) else: - module.exit_json(changed=changed, msg=message) + module.exit_json(**res.to_ansible()) - except RavenException as e: - module.fail_json(msg="RavenDB operation failed: {}".format(str(e))) except Exception as e: - module.fail_json(msg="An unexpected error occurred: {}".format(str(e))) + module.fail_json(msg="Unexpected error: {}".format(str(e))) finally: - if 'store' in locals(): - store.close() + if ctx: + ctx.close() if __name__ == '__main__': diff --git a/plugins/modules/node.py b/plugins/modules/node.py index 0ec8887..4925fe2 100644 --- a/plugins/modules/node.py +++ b/plugins/modules/node.py @@ -1,4 +1,3 @@ -#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c), RavenDB @@ -13,11 +12,10 @@ module: node short_description: Add a RavenDB node to an existing cluster description: - - This module adds a RavenDB node to a cluster, either as a member or a watcher. - - Requires specifying the leader node's URL. - - Supports check mode to simulate the addition without applying changes. - - Supports secured clusters with HTTPS, client certificates (PEM format), and optional CA bundle for verification. - - The module inspects cluster topology first and skips adding if the node is already present. + - Adds a RavenDB node to a cluster, either as a member or a watcher. + - Performs a topology check first and becomes a no-op if the node is already present (by tag or URL). + - Supports secured clusters with HTTPS, client certificates (PEM format), and optional CA bundle for verification. + - Check mode is supported to simulate the addition without applying changes. version_added: "1.0.0" author: "Omer Ratsaby (@thegoldenplatypus)" @@ -26,56 +24,46 @@ support: full description: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - options: - tag: - description: - - The unique tag for the node (uppercase alphanumeric). - required: true - type: str - type: - description: - - Node type. Use "Watcher" to add the node as a watcher instead of a full member. - required: false - type: str - default: Member - choices: [Member, Watcher] - url: - description: - - The HTTP/HTTPS URL of the node being added. - required: true - type: str - leader_url: - description: - - The HTTP/HTTPS URL of the cluster leader. - required: true - type: str - certificate_path: - description: - - Path to a client certificate in PEM format (combined certificate and key). - - Required for secured clusters (HTTPS with client authentication). - required: false - type: str - ca_cert_path: - description: - - Path to a CA certificate bundle to verify the server certificate. - required: false - type: str + tag: + description: + - The unique tag for the node (uppercase alphanumeric, 1–4 chars). + required: true + type: str + type: + description: + - Node type. Use C(Watcher) to add the node as a watcher instead of a full member. + required: false + type: str + default: Member + choices: [Member, Watcher] + url: + description: + - The HTTP/HTTPS URL of the node being added. + required: true + type: str + leader_url: + description: + - The HTTP/HTTPS URL of the cluster leader the module will contact to add the node. + required: true + type: str + certificate_path: + description: + - Path to a client certificate in PEM format (combined certificate and key). + - Required for secured clusters (HTTPS with client authentication). + required: false + type: str + ca_cert_path: + description: + - Path to a CA certificate bundle to verify the server certificate. + required: false + type: str -requirements: - - python >= 3.9 - - requests - - Role ravendb.ravendb.ravendb_python_client_prerequisites must be installed before using this module. seealso: - name: RavenDB documentation description: Official RavenDB documentation link: https://ravendb.net/docs -notes: - - The node C(tag) must be an uppercase, non-empty alphanumeric string. - - URLs must be valid HTTP or HTTPS addresses. - - Check mode is fully supported and simulates joining the node without actually performing the action. - - If the node is already part of the cluster (by tag or URL), the task is a no-op. - - Supports both unsecured (HTTP) and secured (HTTPS) RavenDB clusters. + ''' EXAMPLES = ''' @@ -92,7 +80,8 @@ type: "Watcher" url: "https://b.ravendbansible.development.run" leader_url: "https://a.ravendbansible.development.run" - certificate_path: /etc/ravendb/security/admin.client.combined.pem + certificate_path: admin.client.combined.pem + ca_cert_path: ca_certificate.pem - name: Simulate adding Node D (check mode) ravendb.ravendb.node: @@ -104,241 +93,93 @@ RETURN = ''' changed: - description: Indicates if the cluster topology was changed or would have changed (check mode). - type: bool - returned: always - sample: true + description: Indicates if the cluster topology was changed or would have changed (check mode). + type: bool + returned: always + sample: true msg: - description: Human-readable message describing the result or error. - type: str - returned: always - sample: Node B added to the cluster - version_added: "1.0.0" + description: Human-readable message describing the result or error. + type: str + returned: always + sample: Node 'B' added as Member. + version_added: "1.0.0" ''' -import os -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse -from ansible.module_utils.basic import AnsibleModule +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib -HAS_REQUESTS = True +LIB_ERR = None try: - import requests + from ansible_collections.ravendb.ravendb.plugins.module_utils.core.client import DocumentStoreFactory + from ansible_collections.ravendb.ravendb.plugins.module_utils.core.validation import ( + validate_url, validate_tag, validate_paths_exist, collect_errors + ) + from ansible_collections.ravendb.ravendb.plugins.module_utils.core.tls import TLSConfig + from ansible_collections.ravendb.ravendb.plugins.module_utils.dto.node import NodeSpec + from ansible_collections.ravendb.ravendb.plugins.module_utils.reconcilers.node_reconciler import NodeReconciler + HAS_LIB = True except ImportError: - HAS_REQUESTS = False - - -def is_valid_url(url): - """Return True if the given URL is a string with a valid HTTP or HTTPS scheme.""" - if not isinstance(url, str): - return False - parsed = urlparse(url) - return all([parsed.scheme in ["http", "https"], parsed.netloc]) - - -def is_valid_tag(tag): - """Return True if the tag is a non-empty uppercase alphanumeric string of max 4 chars.""" - return isinstance(tag, str) and tag.isalnum() and tag.isupper() and 1 <= len(tag) <= 4 - - -def validate_paths(*paths): - """Check that all non-empty paths exist as files.""" - for p in paths: - if p and not os.path.isfile(p): - return False, "Path does not exist: {}".format(p) - return True, None - - -def build_requests_tls_options(certificate_path, ca_cert_path): - """ - Decide what to pass to requests for TLS. - Returns a tuple: (cert, verify) - """ - cert = None - verify = True - - if certificate_path: - cert = certificate_path - if ca_cert_path: - verify = ca_cert_path - else: - verify = False - elif ca_cert_path: - verify = ca_cert_path - - return cert, verify - - -def normalize_topology_group(topology_group): - """ - Convert topology group into a {tag: url} mapping. - """ - if isinstance(topology_group, dict): - return topology_group - - mapping = {} - if isinstance(topology_group, list): - for item in topology_group: - if not isinstance(item, dict): - continue - - tag = item.get("Tag") or item.get("tag") - url = item.get("Url") or item.get("url") - - if tag and url: - mapping[tag] = url - - return mapping - - -def fetch_topology(leader_url, certificate_path=None, ca_cert_path=None): - """ - Query the leader node for cluster topology and return normalized groups. - """ - cert, verify = build_requests_tls_options(certificate_path, ca_cert_path) + HAS_LIB = False + LIB_ERR = traceback.format_exc() - url = "{}/cluster/topology".format(leader_url.rstrip('/')) - response = requests.get(url, cert=cert, verify=verify) - response.raise_for_status() - data = response.json() - topology = data.get("Topology") or data - - return { - "Members": normalize_topology_group(topology.get("Members", {})), - "Watchers": normalize_topology_group(topology.get("Watchers", {})), - "Promotables": normalize_topology_group(topology.get("Promotables", {})), - } - - -def find_node_in_topology(topology, search_tag, search_url): - """ - Return (present, role, existing_tag, existing_url) where role in {"Member","Watcher","Promotable"} or None. - Match by tag OR by url. - """ - roles = [ - ("Members", "Member"), - ("Watchers", "Watcher"), - ("Promotables", "Promotable"), - ] - for group_key, role_name in roles: - group = topology.get(group_key, {}) or {} - - for tag, url in group.items(): - if tag == search_tag or url == search_url: - return True, role_name, tag, url - - return False, None, None, None - - -def add_node(tag, node_type, url, leader_url, certificate_path, ca_cert_path, check_mode): - """ - Add a new node to a RavenDB cluster by making an HTTP(S) PUT request to the leader node. - Supports client certificate (PEM) and optional CA bundle. - """ - is_watcher = (node_type == "Watcher") - - if not leader_url: - return {"changed": False, "msg": "Leader URL must be specified"} - - if not is_valid_url(leader_url): - return {"changed": False, "msg": "Invalid Leader URL: {}".format(leader_url)} - - if not is_valid_tag(tag): - return { - "changed": False, - "msg": "Invalid tag: Node tag must be an uppercase non-empty alphanumeric string" - } - - if not is_valid_url(url): - return {"changed": False, "msg": "Invalid URL: must be a valid HTTP(S) URL"} +def main(): + module_args = dict( + tag=dict(type='str', required=True), + type=dict(type='str', required=False, default='Member', choices=['Member', 'Watcher']), + url=dict(type='str', required=True), + leader_url=dict(type='str', required=True), + certificate_path=dict(type='str', required=False, default=None), + ca_cert_path=dict(type='str', required=False, default=None), + ) - valid, err = validate_paths(certificate_path, ca_cert_path) - if not valid: - return {"changed": False, "msg": err} + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + if not HAS_LIB: + module.fail_json(msg=missing_required_lib("ravendb"), exception=LIB_ERR) + + tag = module.params['tag'] + node_type = module.params['type'] + node_url = module.params['url'] + leader_url = module.params['leader_url'] + cert_path = module.params.get('certificate_path') + ca_path = module.params.get('ca_cert_path') + + ok, err = collect_errors( + validate_tag(tag), + validate_url(node_url), + validate_url(leader_url), + validate_paths_exist(cert_path, ca_path), + ) + if not ok: + module.fail_json(msg=err) + + tls = TLSConfig(certificate_path=cert_path, ca_cert_path=ca_path) + ctx = None try: - topology = fetch_topology(leader_url, certificate_path, ca_cert_path) - present, role, existing_tag, existing_url = find_node_in_topology(topology, tag, url) - if present: - return { - "changed": False, - "msg": "Node {} already present in the cluster as {} ({}).".format(existing_tag, role, existing_url), - } - - except requests.RequestException: - pass - - if check_mode: - return {"changed": True, "msg": "Node {} would be added to the cluster as {}.".format(tag, node_type)} + ctx = DocumentStoreFactory.create(leader_url, None, cert_path, ca_path) - params = {"url": url, "tag": tag} - if is_watcher: - params["watcher"] = "true" - - endpoint = "{}/admin/cluster/node".format(leader_url.rstrip("/")) - cert, verify = build_requests_tls_options(certificate_path, ca_cert_path) - - try: - response = requests.put( - endpoint, - params=params, - headers={"Content-Type": "application/json"}, - cert=cert, - verify=verify, + spec = NodeSpec( + tag=tag, + url=node_url, + leader_url=leader_url, + node_type=node_type, ) - response.raise_for_status() - - except requests.HTTPError as e: - response = e.response - if response is not None: - try: - error_message = response.json().get("Message", response.text) - except ValueError: - error_message = response.text - else: - error_message = str(e) - return {"changed": False, "msg": "Failed to add node {}".format(tag), "error": error_message} - - except requests.RequestException as e: - return {"changed": False, "msg": "Failed to add node {}".format(tag), "error": str(e)} - - return {"changed": True, "msg": "Node {} added to the cluster as {}.".format(tag, node_type)} + reconciler = NodeReconciler(ctx) + res = reconciler.ensure_present(spec, tls, module.check_mode) -def main(): - module_args = { - "tag": {"type": "str", "required": True}, - "type": {"type": "str", "default": "Member", "choices": ["Member", "Watcher"]}, - "url": {"type": "str", "required": True}, - "leader_url": {"type": "str", "required": True}, - "certificate_path": {"type": "str", "required": False, "default": None}, - "ca_cert_path": {"type": "str", "required": False, "default": None}, - } - - module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) - - if not HAS_REQUESTS: - module.fail_json(msg="Python 'requests' library is required. Please install it.") - try: - result = add_node( - tag=module.params["tag"], - node_type=module.params["type"], - url=module.params["url"], - leader_url=module.params["leader_url"], - certificate_path=module.params.get("certificate_path"), - ca_cert_path=module.params.get("ca_cert_path"), - check_mode=module.check_mode, - ) - if result.get("error"): - module.fail_json(**result) + if res.failed: + module.fail_json(**res.to_ansible()) else: - module.exit_json(**result) + module.exit_json(**res.to_ansible()) + except Exception as e: - module.fail_json(msg="An error occurred: {}".format(str(e))) + module.fail_json(msg="Unexpected error: {}".format(str(e))) + finally: + if ctx: + ctx.close() if __name__ == '__main__': diff --git a/roles/ravendb_node/molecule/plugins-unsecured-nodes/converge.yml b/roles/ravendb_node/molecule/plugins-unsecured-nodes/converge.yml new file mode 100644 index 0000000..c147c9b --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured-nodes/converge.yml @@ -0,0 +1,99 @@ +--- +- name: Unsecured + hosts: all + gather_facts: true + + vars: + ravendb_state: present + ravendb_version: latest + ravendb_version_minor: 6.2 + ravendb_release_channel: stable + ravendb_arch: linux-x64 + ravendb_settings_preset: default + ravendb_license_file: "/home/omer/kobo/setup_package/license.json" + ravendb_settings_override: + Security.UnsecuredAccessAllowed: PublicNetwork + ServerUrl: "http://0.0.0.0:8080" + ServerUrl.Tcp: "tcp://0.0.0.0:38888" + PublicServerUrl: "http://{{ ansible_hostname }}:8080" + PublicServerUrl.Tcp: "tcp://{{ ansible_hostname }}:38888" + + tasks: + - name: "Include ravendb_node" + include_role: + name: ravendb.ravendb.ravendb_node + + - name: Ensure service is actually up + become: true + ansible.builtin.systemd: + name: ravendb.service + state: started + register: _svc + + - name: Assert service active + ansible.builtin.assert: + that: + - _svc.status.ActiveState == 'active' + + - name: Wait for RavenDB to be responsive (inside container) + ansible.builtin.uri: + url: http://localhost:8080/studio/index.html + status_code: 200 + validate_certs: no + register: _probe + retries: 60 + delay: 2 + until: _probe.status == 200 + +- name: Build docker hosts block on controller + hosts: localhost + connection: local + gather_facts: no + tasks: + - name: Inspect Docker network (controller) + command: docker network inspect ravendbnet + changed_when: false + register: ravendbnet_inspect + + - name: Build hosts block using container names (controller) + set_fact: + docker_hosts_block: |- + {% set containers = (ravendbnet_inspect.stdout | from_json)[0].Containers | default({}) %} + {% for c in containers | dict2items | map(attribute='value') | list | sort(attribute='Name') %} + {{ c.IPv4Address.split('/')[0] }} {{ c.Name }} + {% endfor %} + +- name: Update /etc/hosts inside each container + hosts: all + gather_facts: no + become: true + tasks: + - name: Append docker hosts block into /etc/hosts (on target) + become: true + ansible.builtin.shell: | + {% for line in hostvars['localhost'].docker_hosts_block.splitlines() %} + grep -qxF "{{ line }}" /etc/hosts || echo "{{ line }}" >> /etc/hosts + {% endfor %} + + # ENABLE FOR CI / COMMENT OUT FOR LOCAL TESTING + # - name: Update /etc/hosts on controller + # delegate_to: localhost + # run_once: true + # become: true + # ansible.builtin.blockinfile: + # path: /etc/hosts + # marker: "# {mark} ANSIBLE RAVENDBNET" + # block: "{{ hostvars['localhost'].docker_hosts_block | trim }}" + + - name: Verify Studio is reachable from controller for each host + delegate_to: localhost + become: false + ansible.builtin.uri: + url: "http://{{ inventory_hostname }}:8080/studio/index.html" + method: GET + status_code: 200 + validate_certs: no + follow_redirects: all + register: verify_result + failed_when: verify_result.status != 200 + changed_when: false diff --git a/roles/ravendb_node/molecule/plugins-unsecured-nodes/molecule.yml b/roles/ravendb_node/molecule/plugins-unsecured-nodes/molecule.yml new file mode 100644 index 0000000..7c542c3 --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured-nodes/molecule.yml @@ -0,0 +1,64 @@ +--- +dependency: + name: galaxy +driver: + name: docker + port_bindings: + '8080': 80 + '38888': 38888 + +platforms: + - name: ubuntu-bionic-node-a + hostname: ubuntu-bionic-node-a + image: docker.io/geerlingguy/docker-ubuntu2204-ansible:latest + privileged: true + command: /usr/sbin/init + volumes: + - '/sys/fs/cgroup:/sys/fs/cgroup:rw' + tmpfs: + - /run + - /tmp + cgroupns_mode: host + networks: + - name: ravendbnet + + - name: ubuntu-bionic-node-b + hostname: ubuntu-bionic-node-b + image: docker.io/geerlingguy/docker-ubuntu2204-ansible:latest + privileged: true + command: /usr/sbin/init + volumes: + - '/sys/fs/cgroup:/sys/fs/cgroup:rw' + tmpfs: + - /run + - /tmp + cgroupns_mode: host + networks: + - name: ravendbnet + + - name: ubuntu-bionic-node-c + hostname: ubuntu-bionic-node-c + image: docker.io/geerlingguy/docker-ubuntu2204-ansible:latest + privileged: true + command: /usr/sbin/init + volumes: + - '/sys/fs/cgroup:/sys/fs/cgroup:rw' + tmpfs: + - /run + - /tmp + cgroupns_mode: host + networks: + - name: ravendbnet + +provisioner: + name: ansible + log: true +scenario: + name: plugins-unsecured-nodes + test_sequence: + - destroy + - create + - converge + - verify +verifier: + name: ansible diff --git a/roles/ravendb_node/molecule/plugins-unsecured-nodes/tasks/db_affinity.yml b/roles/ravendb_node/molecule/plugins-unsecured-nodes/tasks/db_affinity.yml new file mode 100644 index 0000000..c123098 --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured-nodes/tasks/db_affinity.yml @@ -0,0 +1,174 @@ +--- + +- name: Create DB on specific nodes (A,C) (check_mode) + ravendb.ravendb.database: + url: "{{ node_a_cluster_url }}" + database_name: "db_a_c" + replication_factor: 2 + topology_members: + - "{{ node_a_tag }}" + - "{{ node_c_tag }}" + state: present + check_mode: yes + register: db_a_c_check + +- name: Assert would create on A,C + ansible.builtin.assert: + that: + - db_a_c_check.changed + - (db_a_c_check.failed is not defined) or (not db_a_c_check.failed) + +- name: Create DB on specific nodes (A,C) + ravendb.ravendb.database: + url: "{{ node_a_cluster_url }}" + database_name: "db_a_c" + replication_factor: 2 + topology_members: + - "{{ node_a_tag }}" + - "{{ node_c_tag }}" + state: present + register: db_a_c_msg + +- name: Assert DB created on A,C + ansible.builtin.assert: + that: + - db_a_c_msg.changed + - (db_a_c_msg.failed is not defined) or (not db_a_c_msg.failed) + +- name: List DBs + ansible.builtin.uri: + url: "{{ node_a_cluster_url }}/databases" + method: GET + validate_certs: no + register: dbs + +- name: Build member_tags for db_a_c + ansible.builtin.set_fact: + member_tags: >- + {{ (((dbs.json.Databases + | selectattr('Name','==', 'db_a_c') + | list + | first + | default({})).get('NodesTopology', {})).get('Members', [])) + | map(attribute='NodeTag') | list }} + +- name: Assert on A & C not on B + ansible.builtin.assert: + that: + - "'A' in member_tags" + - "'C' in member_tags" + - "'B' not in member_tags" + +- name: Create DB on all nodes explicit (check_mode) + ravendb.ravendb.database: + url: "{{ node_a_cluster_url }}" + database_name: "db_a_b_c_explicit" + replication_factor: 3 + topology_members: + - "{{ node_a_tag }}" + - "{{ node_b_tag }}" + - "{{ node_c_tag }}" + state: present + check_mode: yes + register: db_a_b_c_explicit_check + +- name: Assert would create on all nodes explcit + ansible.builtin.assert: + that: + - db_a_b_c_explicit_check.changed + - (db_a_b_c_explicit_check.failed is not defined) or (not db_a_b_c_explicit_check.failed) + +- name: Create DB on on all nodes explicit + ravendb.ravendb.database: + url: "{{ node_a_cluster_url }}" + database_name: "db_a_b_c_explicit" + replication_factor: 3 + topology_members: + - "{{ node_a_tag }}" + - "{{ node_b_tag }}" + - "{{ node_c_tag }}" + state: present + register: db_a_b_c_explicit_msg + +- name: Assert DB created on all nodes explicit + ansible.builtin.assert: + that: + - db_a_b_c_explicit_msg.changed + - (db_a_b_c_explicit_msg.failed is not defined) or (not db_a_b_c_explicit_msg.failed) + +- name: List DBs + ansible.builtin.uri: + url: "{{ node_a_cluster_url }}/databases" + method: GET + validate_certs: no + register: dbs + +- name: Build member_tags for db_a_b_c_explicit + ansible.builtin.set_fact: + member_tags: >- + {{ (((dbs.json.Databases + | selectattr('Name','==', 'db_a_b_c_explicit') + | list + | first + | default({})).get('NodesTopology', {})).get('Members', [])) + | map(attribute='NodeTag') | list }} + +- name: Assert on A, B & C + ansible.builtin.assert: + that: + - "'A' in member_tags" + - "'B' in member_tags" + - "'C' in member_tags" + +- name: Create DB on all nodes members ommited (check_mode) + ravendb.ravendb.database: + url: "{{ node_a_cluster_url }}" + database_name: "db_a_b_c_ommited" + replication_factor: 3 + state: present + check_mode: yes + register: db_a_b_c_ommited_check + +- name: Assert would create on all nodes ommited + ansible.builtin.assert: + that: + - db_a_b_c_ommited_check.changed + - (db_a_b_c_ommited_check.failed is not defined) or (not db_a_b_c_ommited_check.failed) + +- name: Create DB on all nodes members ommited + ravendb.ravendb.database: + url: "{{ node_a_cluster_url }}" + database_name: "db_a_b_c_ommited" + replication_factor: 3 + state: present + register: db_a_b_c_ommited_msg + +- name: Assert DB created on all nodes ommited + ansible.builtin.assert: + that: + - db_a_b_c_ommited_msg.changed + - (db_a_b_c_ommited_msg.failed is not defined) or (not db_a_b_c_ommited_msg.failed) + +- name: List DBs + ansible.builtin.uri: + url: "{{ node_a_cluster_url }}/databases" + method: GET + validate_certs: no + register: dbs + +- name: Build member_tags for db_a_b_c_ommited + ansible.builtin.set_fact: + member_tags: >- + {{ (((dbs.json.Databases + | selectattr('Name','==', 'db_a_b_c_ommited') + | list + | first + | default({})).get('NodesTopology', {})).get('Members', [])) + | map(attribute='NodeTag') | list }} + +- name: Assert on A, B & C + ansible.builtin.assert: + that: + - "'A' in member_tags" + - "'B' in member_tags" + - "'C' in member_tags" diff --git a/roles/ravendb_node/molecule/plugins-unsecured-nodes/tasks/init.yml b/roles/ravendb_node/molecule/plugins-unsecured-nodes/tasks/init.yml new file mode 100644 index 0000000..d37c005 --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured-nodes/tasks/init.yml @@ -0,0 +1,11 @@ +--- +- name: Set cluster-visible URLs & tags + ansible.builtin.set_fact: + leader_tag: "A" + node_a_tag: "A" + leader_cluster_url: "http://ubuntu-bionic-node-a:8080" + node_a_cluster_url: "http://ubuntu-bionic-node-a:8080" + node_b_tag: "B" + node_b_cluster_url: "http://ubuntu-bionic-node-b:8080" + node_c_tag: "C" + node_c_cluster_url: "http://ubuntu-bionic-node-c:8080" diff --git a/roles/ravendb_node/molecule/plugins-unsecured-nodes/tasks/node.yml b/roles/ravendb_node/molecule/plugins-unsecured-nodes/tasks/node.yml new file mode 100644 index 0000000..f839c5d --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured-nodes/tasks/node.yml @@ -0,0 +1,124 @@ +--- + +- name: Add B as Member (check_mode) + ravendb.ravendb.node: + tag: "{{ node_b_tag }}" + type: "Member" + url: "{{ node_b_cluster_url }}" + leader_url: "{{ leader_cluster_url }}" + check_mode: yes + register: member_b_check + +- name: Assert Member B would be added (check-mode) + ansible.builtin.assert: + that: + - member_b_check.changed + - (member_b_check.failed is not defined) or (not member_b_check.failed) + +- name: Add B as Member + ravendb.ravendb.node: + tag: "{{ node_b_tag }}" + type: "Member" + url: "{{ node_b_cluster_url }}" + leader_url: "{{ leader_cluster_url }}" + register: member_b_real + +- name: Assert Member B added + ansible.builtin.assert: + that: + - member_b_real.changed + - (member_b_real.failed is not defined) or (not member_b_real.failed) + +- name: Fetch topology after adding Member B + ansible.builtin.uri: + url: "{{ leader_cluster_url }}/cluster/topology" + method: GET + status_code: 200 + return_content: yes + validate_certs: no + register: topo + +- name: Extract member tags + ansible.builtin.set_fact: + member_tags: "{{ topo.json.Topology.Members.keys() | list | unique }}" + + +- name: Assert B present in Members + ansible.builtin.assert: + that: + - "'B' in member_tags" + +- name: Re-run add B (idempotency, also omit type - should default to Member) + ravendb.ravendb.node: + tag: "{{ node_b_tag }}" + url: "{{ node_b_cluster_url }}" + leader_url: "{{ leader_cluster_url }}" + register: member_b_again + +- name: Assert Member B idempotent + ansible.builtin.assert: + that: + - not member_b_again.changed + - (member_b_again.failed is not defined) or (not member_b_again.failed) + + +- name: Add C as Watcher (check_mode) + ravendb.ravendb.node: + tag: "{{ node_c_tag }}" + type: "Watcher" + url: "{{ node_c_cluster_url }}" + leader_url: "{{ leader_cluster_url }}" + check_mode: yes + register: watcher_c_check + +- name: Assert Watcher C would be added (check-mode) + ansible.builtin.assert: + that: + - watcher_c_check.changed + - (watcher_c_check.failed is not defined) or (not watcher_c_check.failed) + +- name: Add C as Watcher + ravendb.ravendb.node: + tag: "{{ node_c_tag }}" + type: "Watcher" + url: "{{ node_c_cluster_url }}" + leader_url: "{{ leader_cluster_url }}" + register: watcher_c_real + +- name: Assert Watcher C added (real) + ansible.builtin.assert: + that: + - watcher_c_real.changed + - (watcher_c_real.failed is not defined) or (not watcher_c_real.failed) + +- name: Fetch topology after adding Watcher C + ansible.builtin.uri: + url: "{{ leader_cluster_url }}/cluster/topology" + method: GET + status_code: 200 + return_content: yes + validate_certs: no + register: topo + +- name: Extract watcher tags + ansible.builtin.set_fact: + watcher_tags: "{{ topo.json.Topology.Watchers.keys() | list | unique }}" + +- name: Assert C present in Watchers + ansible.builtin.assert: + that: + - "'C' in watcher_tags" + +- name: Re-run add C (idempotency) + ravendb.ravendb.node: + tag: "{{ node_c_tag }}" + type: "Watcher" + url: "{{ node_c_cluster_url }}" + leader_url: "{{ leader_cluster_url }}" + register: watcher_c_again + +- name: Assert Watcher C idempotent + ansible.builtin.assert: + that: + - not watcher_c_again.changed + - (watcher_c_again.failed is not defined) or (not watcher_c_again.failed) diff --git a/roles/ravendb_node/molecule/plugins-unsecured-nodes/verify.yml b/roles/ravendb_node/molecule/plugins-unsecured-nodes/verify.yml new file mode 100644 index 0000000..e7b400c --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured-nodes/verify.yml @@ -0,0 +1,16 @@ +--- +- name: Verify + hosts: localhost + gather_facts: false + # vars: + # ravendb_venv_path: "/root/.ravendb_ansible" + + tasks: + - import_tasks: tasks/init.yml + tags: [always] + + - import_tasks: tasks/node.yml + tags: [node] + + - import_tasks: tasks/db_affinity.yml + tags: [db_affinity] \ No newline at end of file diff --git a/roles/ravendb_node/molecule/plugins-unsecured/converge.yml b/roles/ravendb_node/molecule/plugins-unsecured/converge.yml new file mode 100644 index 0000000..9f9e23a --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured/converge.yml @@ -0,0 +1,36 @@ +--- +- name: Unsecured + hosts: all + tasks: + - name: "Include ravendb_node" + include_role: + name: ravendb.ravendb.ravendb_node + vars: + ravendb_state: present + ravendb_version: latest + ravendb_version_minor: 6.2 + ravendb_release_channel: stable + ravendb_arch: linux-x64 + ravendb_settings_preset: default + + - name: Ensure service is actually up + become: true + ansible.builtin.systemd: + name: ravendb.service + state: started + register: _svc + + - name: Assert service active + ansible.builtin.assert: + that: + - _svc.status.ActiveState == 'active' + + - name: Wait for RavenDB to be responsive + ansible.builtin.uri: + url: http://localhost:8080/studio/index.html + status_code: 200 + validate_certs: no + register: _probe + retries: 60 + delay: 2 + until: _probe.status == 200 \ No newline at end of file diff --git a/roles/ravendb_node/molecule/plugins-unsecured/molecule.yml b/roles/ravendb_node/molecule/plugins-unsecured/molecule.yml new file mode 100644 index 0000000..272fe4d --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured/molecule.yml @@ -0,0 +1,31 @@ +dependency: + name: galaxy +driver: + name: docker + port_bindings: + '8080': 80 + '38888': 38888 + '44443': 443 +platforms: + - name: ubuntu-bionic + image: docker.io/geerlingguy/docker-ubuntu2204-ansible:latest + privileged: true + command: /usr/sbin/init + volumes: + - '/sys/fs/cgroup:/sys/fs/cgroup:rw' + tmpfs: + - /run + - /tmp + cgroupns_mode: host +provisioner: + name: ansible + log: true +scenario: + name: plugins-unsecured + test_sequence: + - destroy + - create + - converge + - verify +verifier: + name: ansible diff --git a/roles/ravendb_node/molecule/plugins-unsecured/tasks/db.yml b/roles/ravendb_node/molecule/plugins-unsecured/tasks/db.yml new file mode 100644 index 0000000..8e761c1 --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured/tasks/db.yml @@ -0,0 +1,18 @@ +--- +- name: DB | Create ( + check_mode & idempotency) + include_tasks: + file: db/10_create.yml + apply: + tags: [db, db:create] + +- name: DB | Settings (check_mode/apply/idempotency/modify) + include_tasks: + file: db/20_settings.yml + apply: + tags: [db, db:settings] + +- name: DB | Delete (+ check_mode & idempotency) + include_tasks: + file: db/30_delete.yml + apply: + tags: [db, db:delete] diff --git a/roles/ravendb_node/molecule/plugins-unsecured/tasks/db/10_create.yml b/roles/ravendb_node/molecule/plugins-unsecured/tasks/db/10_create.yml new file mode 100644 index 0000000..f90f0a0 --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured/tasks/db/10_create.yml @@ -0,0 +1,167 @@ +--- +- name: List databases (initial) + ansible.builtin.uri: + url: "{{ base_url }}/databases" + method: GET + status_code: 200 + validate_certs: no + register: db_list_initial + +- name: Create DB (check mode) + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_check }}" + replication_factor: 1 + state: present + check_mode: yes + register: db_check_create + +- name: Assert would created DB msg (check mode) + ansible.builtin.assert: + that: + - db_check_create.changed + - "'would be created' in db_check_create.msg | lower" + +- name: Verify DB not created on check mode + ansible.builtin.uri: + url: "{{ base_url }}/databases" + method: GET + validate_certs: no + register: dbs_after_check_create + +- name: Assert DB not created + ansible.builtin.assert: + that: + - "db_check not in (dbs_after_check_create.json.Databases | map(attribute='Name') | list | default([]))" + +- name: Create DB + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_main }}" + replication_factor: 1 + database_settings: + Indexing.MapTimeoutAfterEtagReachedInMin: "20" + state: present + register: db_main_created + +- name: Assert DB created + ansible.builtin.assert: + that: + - db_main_created.changed + - "'created successfully' in db_main_created.msg | lower" + +- name: Verify DB created + ansible.builtin.uri: + url: "{{ base_url }}/databases" + method: GET + validate_certs: no + register: dbs_after_create + +- name: Assert DB present + ansible.builtin.assert: + that: + - "db_main in (dbs_after_create.json.Databases | map(attribute='Name') | list)" + +- name: Re-run create DB (idempotency) + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_main }}" + replication_factor: 1 + state: present + register: db_main_again + +- name: Assert DB creation idempotent + ansible.builtin.assert: + that: + - "not db_main_again.changed" + - "'already exists' in db_main_again.msg | lower" + - "'no changes' in db_main_again.msg | lower" + +- name: Try create without replication_factor (should fail) + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_missing_rf }}" + state: present + register: create_no_rf + ignore_errors: yes + +- name: Assert create without RF failed with proper message + ansible.builtin.assert: + that: + - create_no_rf is failed + - "'replication_factor is required when creating a database' in (create_no_rf.msg | lower)" + +- name: Try create without replication_factor in check mode (should fail) + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_missing_rf }}" + state: present + check_mode: yes + register: create_no_rf_check + ignore_errors: yes + +- name: Assert create without RF failed in check mode with same message + ansible.builtin.assert: + that: + - create_no_rf_check is failed + - "'replication_factor is required when creating a database' in (create_no_rf_check.msg | lower)" + +- name: Existing DB present without RF (no-op) + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_main }}" + state: present + register: existing_no_rf + +- name: Assert existing DB without RF is no-op + ansible.builtin.assert: + that: + - not existing_no_rf.changed + - "'already exists' in (existing_no_rf.msg | lower)" + - "'no changes' in (existing_no_rf.msg | lower)" + +- name: Existing DB with different RF ( should be no-op) + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_main }}" + replication_factor: 3 + state: present + register: existing_diff_rf + +- name: Assert RF ignored on existing DB + ansible.builtin.assert: + that: + - not existing_diff_rf.changed + - "'already exists' in (existing_diff_rf.msg | lower)" + - "'no changes' in (existing_diff_rf.msg | lower)" + +- name: Create with invalid replication_factor=0 (should fail) + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_invalid_rf }}" + replication_factor: 0 + state: present + register: invalid_rf_res + ignore_errors: yes + +- name: Assert invalid RF rejected + ansible.builtin.assert: + that: + - invalid_rf_res is failed + - "'invalid replication factor: 0' in (invalid_rf_res.msg | lower)" + +- name: Existing DB different RF in check mode (no-op) + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_main }}" + replication_factor: 99 + state: present + check_mode: yes + register: existing_diff_rf_check + +- name: Assert existing DB no-op in check mode + ansible.builtin.assert: + that: + - not existing_diff_rf_check.changed + - "'already exists' in (existing_diff_rf_check.msg | lower)" + - "'no changes' in (existing_diff_rf_check.msg | lower)" diff --git a/roles/ravendb_node/molecule/plugins-unsecured/tasks/db/20_settings.yml b/roles/ravendb_node/molecule/plugins-unsecured/tasks/db/20_settings.yml new file mode 100644 index 0000000..22f1abb --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured/tasks/db/20_settings.yml @@ -0,0 +1,123 @@ +--- +- name: Apply settings on created DB (check mode) + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_main }}" + replication_factor: 1 + state: present + database_settings: + Indexing.MapBatchSize: "128" + check_mode: yes + register: db_settings_check + +- name: Assert settings would apply (check-mode) + ansible.builtin.assert: + that: + - db_settings_check.changed + - "'would apply settings' in db_settings_check.msg | lower" + - "'indexing.mapbatchsize' in db_settings_check.msg | lower" + +- name: Verify DB settings not really applied + ansible.builtin.uri: + url: "{{ base_url }}/admin/databases?name={{ db_main }}" + method: GET + validate_certs: no + register: db_record + +- name: Extract db settings (pre-apply) + ansible.builtin.set_fact: + db_settings_map: >- + {{ db_record.json.Settings + | default( db_record.json.Results[0].Settings | default({}) ) }} + +- name: Assert key not applied yet + ansible.builtin.assert: + that: + - "'Indexing.MapBatchSize' not in db_settings_map" + +- name: Settings apply + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_main }}" + replication_factor: 1 + state: present + database_settings: + Indexing.MapBatchSize: "128" + register: db_settings_apply + +- name: Assert settings applied + ansible.builtin.assert: + that: + - db_settings_apply.changed + - "'applied settings' in db_settings_apply.msg | lower" + - "'indexing.mapbatchsize' in db_settings_apply.msg | lower" + +- name: Verify db settings applied + ansible.builtin.uri: + url: "{{ base_url }}/admin/databases?name={{ db_main }}" + method: GET + validate_certs: no + register: db_record + +- name: Extract db settings (post-apply) + ansible.builtin.set_fact: + db_settings_map: >- + {{ db_record.json.Settings + | default( db_record.json.Results[0].Settings | default({}) ) }} + +- name: Assert key applied (128) + ansible.builtin.assert: + that: + - "'Indexing.MapBatchSize' in db_settings_map" + - "db_settings_map['Indexing.MapBatchSize'] | string == '128'" + +- name: Re-run settings (idempotency) + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_main }}" + replication_factor: 1 + state: present + database_settings: + Indexing.MapBatchSize: "128" + register: db_settings_again + +- name: Assert idempotent (no changes) + ansible.builtin.assert: + that: + - "not db_settings_again.changed" + - "'no changes' in db_settings_again.msg | lower" + +- name: Modify setting to new value + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_main }}" + replication_factor: 1 + state: present + database_settings: + Indexing.MapBatchSize: "256" + register: db_settings_modified + +- name: Assert modified + ansible.builtin.assert: + that: + - db_settings_modified.changed + - "'applied settings' in db_settings_modified.msg | lower" + +- name: Verify DB settings changed + ansible.builtin.uri: + url: "{{ base_url }}/admin/databases?name={{ db_main }}" + method: GET + validate_certs: no + register: db_record + +- name: Extract DB settings (post-modify) + ansible.builtin.set_fact: + db_settings_map: >- + {{ db_record.json.Settings + | default( db_record.json.Results[0].Settings | default({}) ) }} + +- name: Assert expected value (256) + ansible.builtin.assert: + that: + - "'Indexing.MapBatchSize' in db_settings_map" + - "db_settings_map['Indexing.MapBatchSize'] | string == '256'" diff --git a/roles/ravendb_node/molecule/plugins-unsecured/tasks/db/30_delete.yml b/roles/ravendb_node/molecule/plugins-unsecured/tasks/db/30_delete.yml new file mode 100644 index 0000000..72ab1b5 --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured/tasks/db/30_delete.yml @@ -0,0 +1,52 @@ +--- +- name: Delete DB (check mode) + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_main }}" + state: absent + check_mode: yes + register: db_delete_check + +- name: Assert DB would be deleted + ansible.builtin.assert: + that: + - db_delete_check.changed + - "'would be deleted' in db_delete_check.msg | lower" + +- name: Delete DB + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_main }}" + state: absent + register: db_deleted + +- name: Assert DB deleted + ansible.builtin.assert: + that: + - db_deleted.changed + - "'deleted successfully' in db_deleted.msg | lower" + +- name: Verify DB absent via API + ansible.builtin.uri: + url: "{{ base_url }}/databases" + method: GET + validate_certs: no + register: dbs_after_delete + +- name: Assert DB absent + ansible.builtin.assert: + that: + - "db_main not in (dbs_after_delete.json.Databases | map(attribute='Name') | list | default([]))" + +- name: Delete DB again (idempotent) + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_main }}" + state: absent + register: db_deleted_again + +- name: Assert DB already absent + ansible.builtin.assert: + that: + - "not db_deleted_again.changed" + - "'does not exist' in db_deleted_again.msg | lower" diff --git a/roles/ravendb_node/molecule/plugins-unsecured/tasks/index.yml b/roles/ravendb_node/molecule/plugins-unsecured/tasks/index.yml new file mode 100644 index 0000000..43b9553 --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured/tasks/index.yml @@ -0,0 +1,30 @@ +--- +- name: IDX | Create (+ check mode & idempotency) + include_tasks: + file: index/10_create.yml + apply: + tags: [idx, idx:create] + +- name: IDX | Update (map, multi-map, deployment mode) + include_tasks: + file: index/20_update.yml + apply: + tags: [idx, idx:update] + +- name: IDX | Config (+ edge cases) + include_tasks: + file: index/30_config_and_edges.yml + apply: + tags: [idx, idx:config] + +- name: IDX | Modes (+ reset) + include_tasks: + file: index/40_modes_and_reset.yml + apply: + tags: [idx, idx:modes] + +- name: IDX | Delete (+ idempotency) + include_tasks: + file: index/50_delete.yml + apply: + tags: [idx, idx:delete] diff --git a/roles/ravendb_node/molecule/plugins-unsecured/tasks/index/10_create.yml b/roles/ravendb_node/molecule/plugins-unsecured/tasks/index/10_create.yml new file mode 100644 index 0000000..7ac5c45 --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured/tasks/index/10_create.yml @@ -0,0 +1,97 @@ +--- +- name: Create database for index tests + ravendb.ravendb.database: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + replication_factor: 1 + state: present + register: ix_db_created + +- name: Assert DB created/present + ansible.builtin.assert: + that: + - "'created successfully' in ix_db_created.msg | lower" + +- name: Fetch ALL indexes (initial) + ansible.builtin.uri: + url: "{{ base_url }}/databases/{{ db_ix }}/indexes?start=0" + method: GET + validate_certs: no + register: ix_all_initial + changed_when: false + +- name: Normalize list (initial) + ansible.builtin.set_fact: + ix_list_initial: "{{ ix_all_initial.json.Results | default(ix_all_initial.json) | default([]) }}" + +- name: Pick index (initial) + ansible.builtin.set_fact: + ix_pick_initial: "{{ (ix_list_initial | selectattr('Name','equalto', idx_name) | list | first) | default({}) }}" + +- name: Assert index not listed + ansible.builtin.assert: + that: + - "(ix_list_initial | selectattr('Name','equalto', idx_name) | list | length) == 0" + +- name: Create index (check mode) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + index_definition: "{{ idx_def_initial }}" + state: present + check_mode: yes + register: ix_check_create + +- name: Assert would be created + ansible.builtin.assert: + that: + - ix_check_create.changed + - "'would be created' in ix_check_create.msg | lower" + +- name: Create index + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + index_definition: "{{ idx_def_initial }}" + state: present + register: ix_created + +- name: Assert created + ansible.builtin.assert: + that: + - ix_created.changed + - "'created successfully' in ix_created.msg | lower" + +- name: Fetch ALL indexes (after create) + ansible.builtin.uri: + url: "{{ base_url }}/databases/{{ db_ix }}/indexes?start=0" + method: GET + validate_certs: no + register: ix_all_after_create + changed_when: false + +- name: Normalize list (after create) + ansible.builtin.set_fact: + ix_list_after_create: "{{ ix_all_after_create.json.Results | default(ix_all_after_create.json) | default([]) }}" + +- name: Assert index listed + ansible.builtin.assert: + that: + - "idx_name in (ix_list_after_create | map(attribute='Name') | list)" + +- name: Re-run create index (idempotency) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + index_definition: "{{ idx_def_initial }}" + state: present + register: ix_again + +- name: Assert idempotent + ansible.builtin.assert: + that: + - "not ix_again.changed" + - "'already exists' in ix_again.msg | lower" diff --git a/roles/ravendb_node/molecule/plugins-unsecured/tasks/index/20_update.yml b/roles/ravendb_node/molecule/plugins-unsecured/tasks/index/20_update.yml new file mode 100644 index 0000000..2b35f30 --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured/tasks/index/20_update.yml @@ -0,0 +1,287 @@ +--- +- name: Update index definition (map changed) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + index_definition: "{{ idx_def_updated_map }}" + state: present + register: ix_updated_map + +- name: Assert changed + ansible.builtin.assert: + that: + - ix_updated_map.changed + - "'created successfully' in ix_updated_map.msg | lower" + +- name: Fetch ALL indexes (after updating map definition) + ansible.builtin.uri: + url: "{{ base_url }}/databases/{{ db_ix }}/indexes?start=0" + method: GET + validate_certs: no + register: ix_all_after_map + changed_when: false + +- name: Normalize list (after updating map definition) + ansible.builtin.set_fact: + ix_list_after_modifying_map: "{{ ix_all_after_map.json.Results | default(ix_all_after_map.json) | default([]) }}" + +- name: Pick our index (after updating map definition) + ansible.builtin.set_fact: + ix_after_map_pick: "{{ (ix_list_after_modifying_map | selectattr('Name','equalto', idx_name) | list | first) }}" + +- name: Extract map strings (after updating map definition) + ansible.builtin.set_fact: + ix_maps_after_map: "{{ ix_after_map_pick.Maps | default([]) }}" + +- name: Assert map contains 'count = 13' + ansible.builtin.assert: + that: + - ix_maps_after_map | length > 0 + - "(ix_maps_after_map | join('\n')) is search('count\\s*=\\s*13')" + +- name: Convert to multi-map + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + index_definition: "{{ idx_def_multimap }}" + state: present + register: ix_updated_multimap + +- name: Assert changed + ansible.builtin.assert: + that: + - ix_updated_multimap.changed + +- name: Fetch ALL indexes (after multi-map) + ansible.builtin.uri: + url: "{{ base_url }}/databases/{{ db_ix }}/indexes?start=0" + method: GET + validate_certs: no + register: ix_all_after_multimap + changed_when: false + +- name: Normalize list (after multi-map) + ansible.builtin.set_fact: + ix_list_after_multimap: "{{ ix_all_after_multimap.json.Results | default(ix_all_after_multimap.json) | default([]) }}" + +- name: Pick our index (after multi-map) + ansible.builtin.set_fact: + ix_def_after_multimap: "{{ (ix_list_after_multimap | selectattr('Name','equalto', idx_name) | list | first) | default({}) }}" + +- name: Extract maps (after multi-map) + ansible.builtin.set_fact: + ix_maps_after_multimap: "{{ ix_def_after_multimap.Maps | default([]) | map('string') | list }}" + +- name: Assert both map clauses present + ansible.builtin.assert: + that: + - ix_maps_after_multimap | length == 2 + - "ix_maps_after_multimap | select('search','from c in docs\\.Users') | list | length == 1" + - "ix_maps_after_multimap | select('search','from o in docs\\.Orders') | list | length == 1" + +- name: Map clause + ansible.builtin.set_fact: + _map_clause: "from c in docs.Users select new { c.name }" + +- name: Define deployment mode variants + set_fact: + idx_def_dm_rolling: + map: ["{{ _map_clause }}"] + deployment_mode: rolling + idx_def_dm_parallel: + map: ["{{ _map_clause }}"] + deployment_mode: parallel + +- name: Apply index with deployment_mode=rolling + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + index_definition: "{{ idx_def_dm_rolling }}" + state: present + register: ix_dm_roll + +- name: Assert changed on rolling apply (create or update) + ansible.builtin.assert: + that: + - ix_dm_roll.changed | bool + - "'created successfully' in ix_dm_roll.msg | lower" + +- name: Fetch ALL indexes (DM=rolling) + ansible.builtin.uri: + url: "{{ base_url }}/databases/{{ db_ix }}/indexes?start=0" + method: GET + validate_certs: no + register: ix_dm_roll_api + changed_when: false + +- name: Normalize list (DM=rolling) + ansible.builtin.set_fact: + ix_list_dm_roll: "{{ ix_dm_roll_api.json.Results | default(ix_dm_roll_api.json) | default([]) }}" + +- name: Pick definition (DM=rolling) + ansible.builtin.set_fact: + ix_def_dm_roll: "{{ (ix_list_dm_roll | selectattr('Name','equalto', idx_name) | list | first) | default({}) }}" + +- name: Extract deployment_mode (DM=rolling) + ansible.builtin.set_fact: + ix_dm_roll_value: "{{ (ix_def_dm_roll.DeploymentMode | default(ix_def_dm_roll.deploymentMode) | default(ix_def_dm_roll.deployment_mode) | string) | lower }}" + +- name: Assert DM=rolling + ansible.builtin.assert: + that: + - ix_dm_roll_value == 'rolling' + +- name: Re-apply deployment_mode=rolling (idempotent) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + index_definition: "{{ idx_def_dm_rolling }}" + state: present + register: ix_dm_roll_idem + +- name: Assert unchanged on re-apply rolling + ansible.builtin.assert: + that: + - not ix_dm_roll_idem.changed | bool + +- name: Switch deployment_mode -> parallel + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + index_definition: "{{ idx_def_dm_parallel }}" + state: present + register: ix_dm_par + +- name: Assert changed on parallel switch + ansible.builtin.assert: + that: + - ix_dm_par.changed | bool + +- name: Fetch ALL indexes (DM=parallel) + ansible.builtin.uri: + url: "{{ base_url }}/databases/{{ db_ix }}/indexes?start=0" + method: GET + validate_certs: no + register: ix_dm_par_api + changed_when: false + +- name: Normalize list (DM=parallel) + ansible.builtin.set_fact: + ix_list_dm_par: "{{ ix_dm_par_api.json.Results | default(ix_dm_par_api.json) | default([]) }}" + +- name: Pick definition (DM=parallel) + ansible.builtin.set_fact: + ix_def_dm_par: "{{ (ix_list_dm_par | selectattr('Name','equalto', idx_name) | list | first) | default({}) }}" + +- name: Extract deployment_mode (DM=parallel) + ansible.builtin.set_fact: + ix_dm_par_value: "{{ (ix_def_dm_par.DeploymentMode | default(ix_def_dm_par.deploymentMode) | default(ix_def_dm_par.deployment_mode) | string) | lower }}" + +- name: Assert DM=parallel + ansible.builtin.assert: + that: + - ix_dm_par_value == 'parallel' + +- name: Apply index without deployment_mode (should preserve parallel) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + state: present + index_definition: + map: ["{{ _map_clause }}"] + register: ix_dm_omit + +- name: Assert no change on omit + ansible.builtin.assert: + that: + - not ix_dm_omit.changed | bool + +- name: Fetch ALL indexes (after omit) + ansible.builtin.uri: + url: "{{ base_url }}/databases/{{ db_ix }}/indexes?start=0" + method: GET + validate_certs: no + register: ix_dm_omit_api + changed_when: false + +- name: Normalize list (after omit) + ansible.builtin.set_fact: + ix_list_dm_omit: "{{ ix_dm_omit_api.json.Results | default(ix_dm_omit_api.json) | default([]) }}" + +- name: Pick definition (after omit) + ansible.builtin.set_fact: + ix_def_dm_omit: "{{ (ix_list_dm_omit | selectattr('Name','equalto', idx_name) | list | first) | default({}) }}" + +- name: Extract deployment_mode (after omit) + ansible.builtin.set_fact: + ix_dm_omit_value: "{{ (ix_def_dm_omit.DeploymentMode | default(ix_def_dm_omit.deploymentMode) | default(ix_def_dm_omit.deployment_mode) | string) | lower }}" + +- name: Assert still parallel (after omit) + ansible.builtin.assert: + that: + - ix_dm_omit_value == 'parallel' + +- name: Would switch to rolling (check mode) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + index_definition: "{{ idx_def_dm_rolling }}" + state: present + check_mode: yes + register: ix_dm_ck + +- name: Assert check-mode reports change + ansible.builtin.assert: + that: + - ix_dm_ck.changed | bool + +- name: Fetch ALL indexes (post check-mode) + ansible.builtin.uri: + url: "{{ base_url }}/databases/{{ db_ix }}/indexes?start=0" + method: GET + validate_certs: no + register: ix_dm_ck_api + changed_when: false + +- name: Normalize list (post check-mode) + ansible.builtin.set_fact: + ix_list_dm_ck: "{{ ix_dm_ck_api.json.Results | default(ix_dm_ck_api.json) | default([]) }}" + +- name: Pick definition (post check-mode) + ansible.builtin.set_fact: + ix_def_dm_ck: "{{ (ix_list_dm_ck | selectattr('Name','equalto', idx_name) | list | first) | default({}) }}" + +- name: Extract deployment_mode (post check-mode) + ansible.builtin.set_fact: + ix_dm_ck_value: "{{ (ix_def_dm_ck.DeploymentMode | default(ix_def_dm_ck.deploymentMode) | default(ix_dm_ck.deployment_mode) | string) | lower }}" + +- name: Assert still parallel (post check-mode) + ansible.builtin.assert: + that: + - ix_dm_ck_value == 'parallel' + +- name: Invalid deployment_mode -> expect failure + block: + - name: Apply invalid deployment_mode + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + index_definition: + map: ["{{ _map_clause }}"] + deployment_mode: invalid_value + state: present + register: ix_dm_invalid + failed_when: false + +- name: Assert invalid mode message present + ansible.builtin.assert: + that: + - "'unknown deployment_mode' in (ix_dm_invalid.msg | lower)" diff --git a/roles/ravendb_node/molecule/plugins-unsecured/tasks/index/30_config_and_edges.yml b/roles/ravendb_node/molecule/plugins-unsecured/tasks/index/30_config_and_edges.yml new file mode 100644 index 0000000..816676f --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured/tasks/index/30_config_and_edges.yml @@ -0,0 +1,148 @@ +--- +- name: Index configuration (check mode) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + index_configuration: + Indexing.MapBatchSize: "128" + check_mode: yes + register: ix_cfg_check + +- name: Assert would apply config + ansible.builtin.assert: + that: + - ix_cfg_check.changed + - "'would apply configuration' in ix_cfg_check.msg | lower" + +- name: Fetch ALL indexes (before cfg apply) + ansible.builtin.uri: + url: "{{ base_url }}/databases/{{ db_ix }}/indexes?start=0" + method: GET + validate_certs: no + register: ix_all_before_cfg + changed_when: false + +- name: Normalize list (before cfg apply) + ansible.builtin.set_fact: + ix_list_before_cfg: "{{ ix_all_before_cfg.json.Results | default(ix_all_before_cfg.json) | default([]) }}" + +- name: Pick our index (before cfg apply) + ansible.builtin.set_fact: + ix_def_before_cfg: "{{ (ix_list_before_cfg | selectattr('Name','equalto', idx_name) | list | first) | default({}) }}" + +- name: Extract configuration (before) + ansible.builtin.set_fact: + ix_cfg_before: "{{ ix_def_before_cfg.Configuration | default(ix_def_before_cfg.configuration | default({})) }}" + +- name: Assert key not visible yet + ansible.builtin.assert: + that: + - "'Indexing.MapBatchSize' not in ix_cfg_before" + +- name: Apply configuration + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + index_configuration: + Indexing.MapBatchSize: "128" + register: ix_cfg_applied + +- name: Assert applied + ansible.builtin.assert: + that: + - ix_cfg_applied.changed + - "'applied configuration' in ix_cfg_applied.msg | lower" + +- name: Fetch ALL indexes (after cfg) + ansible.builtin.uri: + url: "{{ base_url }}/databases/{{ db_ix }}/indexes?start=0" + method: GET + validate_certs: no + register: ix_all_after_cfg + changed_when: false + +- name: Normalize list (after cfg) + ansible.builtin.set_fact: + ix_list_after_cfg: "{{ ix_all_after_cfg.json.Results | default(ix_all_after_cfg.json) | default([]) }}" + +- name: Pick our index (after cfg) + ansible.builtin.set_fact: + ix_def_after_cfg: "{{ (ix_list_after_cfg | selectattr('Name','equalto', idx_name) | list | first) | default({}) }}" + +- name: Extract configuration (after) + ansible.builtin.set_fact: + ix_cfg_after: "{{ ix_def_after_cfg.Configuration | default(ix_def_after_cfg.configuration | default({})) }}" + +- name: Assert cfg rendered + ansible.builtin.assert: + that: + - "'Indexing.MapBatchSize' in ix_cfg_after" + - "ix_cfg_after['Indexing.MapBatchSize'] | string == '128'" + +- name: Re-run config (idempotency) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + index_configuration: + Indexing.MapBatchSize: "128" + register: ix_cfg_again + +- name: Assert idempotent + ansible.builtin.assert: + that: + - "not ix_cfg_again.changed" + +- name: Re-create index for reconcile test + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + index_definition: "{{ idx_def_initial }}" + state: present + register: ix_recreated + +- name: Omit state and apply mode paused + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + mode: paused + register: ix_omit_state_pause + +- name: Assert changed (paused) + ansible.builtin.assert: + that: + - ix_omit_state_pause.changed + - "'paused successfully' in ix_omit_state_pause.msg | lower" + +- name: Omit state on missing index with mode + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "NoSuchIndex{{ sfx }}" + mode: paused + register: ix_omit_state_missing + failed_when: false + +- name: Assert failure instructing to create first + ansible.builtin.assert: + that: + - "'does not exist' in ix_omit_state_missing.msg | lower" + - "'state=present' in ix_omit_state_missing.msg | lower" + +- name: Create index without definition (should fail) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "MissingDef{{ sfx }}" + state: present + register: ix_missing_def + failed_when: false + +- name: Assert missing definition error + ansible.builtin.assert: + that: + - "'index_definition is required' in ix_missing_def.msg | lower" diff --git a/roles/ravendb_node/molecule/plugins-unsecured/tasks/index/40_modes_and_reset.yml b/roles/ravendb_node/molecule/plugins-unsecured/tasks/index/40_modes_and_reset.yml new file mode 100644 index 0000000..7ffcc59 --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured/tasks/index/40_modes_and_reset.yml @@ -0,0 +1,240 @@ +--- +- name: Disable (check mode) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + mode: disabled + check_mode: yes + register: ix_disable_check + +- name: Assert would be disabled + ansible.builtin.assert: + that: + - ix_disable_check.changed + - "'would be disabled' in ix_disable_check.msg | lower" + +- name: Disable (real) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + mode: disabled + register: ix_disabled + +- name: Assert disabled + ansible.builtin.assert: + that: + - ix_disabled.changed + - "'disabled successfully' in ix_disabled.msg | lower" + +- name: Fetch stats (after disable) + ansible.builtin.uri: + url: "{{ base_url }}/databases/{{ db_ix }}/indexes/stats" + method: GET + validate_certs: no + register: ix_stats_after_disable + changed_when: false + +- name: Normalize stats list (after disable) + ansible.builtin.set_fact: + ix_stats_list_after_disable: "{{ ix_stats_after_disable.json.Results | default(ix_stats_after_disable.json) | default([]) }}" + +- name: Pick stats item (after disable) + ansible.builtin.set_fact: + ix_stats_item_after_disable: "{{ (ix_stats_list_after_disable | selectattr('Name','equalto', idx_name) | list | first) | default({}) }}" + +- name: Assert Disabled in stats + ansible.builtin.assert: + that: + - "ix_stats_item_after_disable.State | default('') == 'Disabled'" + +- name: Disable again (idempotent) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + mode: disabled + register: ix_disabled_again + +- name: Assert already disabled + ansible.builtin.assert: + that: + - "not ix_disabled_again.changed" + - "'already disabled' in ix_disabled_again.msg | lower" + +- name: Enable (real) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + mode: enabled + register: ix_enabled + +- name: Assert enabled + ansible.builtin.assert: + that: + - ix_enabled.changed + - "'enabled successfully' in ix_enabled.msg | lower" + +- name: Fetch stats (after enable) + ansible.builtin.uri: + url: "{{ base_url }}/databases/{{ db_ix }}/indexes/stats" + method: GET + validate_certs: no + register: ix_stats_after_enable + changed_when: false + +- name: Normalize stats list (after enable) + ansible.builtin.set_fact: + ix_stats_list_after_enable: "{{ ix_stats_after_enable.json.Results | default(ix_stats_after_enable.json) | default([]) }}" + +- name: Pick stats item (after enable) + ansible.builtin.set_fact: + ix_stats_item_after_enable: "{{ (ix_stats_list_after_enable | selectattr('Name','equalto', idx_name) | list | first) | default({}) }}" + +- name: Assert not Disabled + ansible.builtin.assert: + that: + - "(ix_stats_item_after_enable.State | default('')) != 'Disabled'" + +- name: Enable again (idempotent) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + mode: enabled + register: ix_enabled_again + +- name: Assert already enabled + ansible.builtin.assert: + that: + - "not ix_enabled_again.changed" + - "'already enabled' in ix_enabled_again.msg | lower" + +- name: Pause (real) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + mode: paused + register: ix_paused + +- name: Assert paused + ansible.builtin.assert: + that: + - ix_paused.changed + - "'paused successfully' in ix_paused.msg | lower" + +- name: Verify Status == Paused + ansible.builtin.uri: + url: "{{ base_url }}/databases/{{ db_ix }}/indexes/status" + method: GET + validate_certs: no + register: ix_status_after_pause + changed_when: false + +- name: Normalize status list (after pause) + ansible.builtin.set_fact: + ix_status_list: "{{ ix_status_after_pause.json.indexes | default(ix_status_after_pause.json.Indexes | default([])) }}" + +- name: Pick status item (after pause) + ansible.builtin.set_fact: + ix_status_item: "{{ (ix_status_list | selectattr('Name','equalto', idx_name) | list | first) | default({}) }}" + +- name: Assert status Paused + ansible.builtin.assert: + that: + - "(ix_status_item.status | default(ix_status_item.Status)) == 'Paused'" + +- name: Pause again (idempotent) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + mode: paused + register: ix_paused_again + +- name: Assert already paused + ansible.builtin.assert: + that: + - "not ix_paused_again.changed" + - "'already paused' in ix_paused_again.msg | lower" + +- name: Resume (real) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + mode: resumed + register: ix_resumed + +- name: Assert resumed + ansible.builtin.assert: + that: + - ix_resumed.changed + - "'resumed successfully' in ix_resumed.msg | lower" + +- name: Verify Status == Running + ansible.builtin.uri: + url: "{{ base_url }}/databases/{{ db_ix }}/indexes/status" + method: GET + validate_certs: no + register: ix_status_after_resume + changed_when: false + +- name: Normalize status list (after resume) + ansible.builtin.set_fact: + ix_status_list2: "{{ ix_status_after_resume.json.indexes | default(ix_status_after_resume.json.Indexes | default([])) }}" + +- name: Pick status item (after resume) + ansible.builtin.set_fact: + ix_status_item2: "{{ (ix_status_list2 | selectattr('Name','equalto', idx_name) | list | first) | default({}) }}" + +- name: Assert status Running + ansible.builtin.assert: + that: + - "(ix_status_item2.status | default(ix_status_item2.Status)) == 'Running'" + +- name: Resume again (idempotent) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + mode: resumed + register: ix_resumed_again + +- name: Assert already running + ansible.builtin.assert: + that: + - "not ix_resumed_again.changed" + - "'already running' in ix_resumed_again.msg | lower" + +- name: Reset (check mode) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + mode: reset + check_mode: yes + register: ix_reset_check + +- name: Assert would be reset + ansible.builtin.assert: + that: + - ix_reset_check.changed + - "'would be reset' in ix_reset_check.msg | lower" + +- name: Reset (real) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + mode: reset + register: ix_reset_real + +- name: Assert reset success + ansible.builtin.assert: + that: + - ix_reset_real.changed + - "'reset successfully' in ix_reset_real.msg | lower" diff --git a/roles/ravendb_node/molecule/plugins-unsecured/tasks/index/50_delete.yml b/roles/ravendb_node/molecule/plugins-unsecured/tasks/index/50_delete.yml new file mode 100644 index 0000000..bdd49fe --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured/tasks/index/50_delete.yml @@ -0,0 +1,41 @@ +--- +- name: Delete index (real) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + state: absent + register: ix_deleted + +- name: Assert deleted + ansible.builtin.assert: + that: + - ix_deleted.changed + - "'deleted successfully' in ix_deleted.msg | lower" + +- name: Fetch ALL indexes (after delete) + ansible.builtin.uri: + url: "{{ base_url }}/databases/{{ db_ix }}/indexes?start=0" + method: GET + validate_certs: no + register: ix_all_after_delete + changed_when: false + +- name: Assert missing + ansible.builtin.assert: + that: + - "((ix_all_after_delete.json.Results | default(ix_all_after_delete.json) | default([])) | selectattr('Name','equalto', idx_name) | list | length) == 0" + +- name: Delete again (idempotent) + ravendb.ravendb.index: + url: "{{ base_url }}" + database_name: "{{ db_ix }}" + index_name: "{{ idx_name }}" + state: absent + register: ix_deleted_again + +- name: Assert already absent + ansible.builtin.assert: + that: + - "not ix_deleted_again.changed" + - "'already absent' in ix_deleted_again.msg | lower" diff --git a/roles/ravendb_node/molecule/plugins-unsecured/tasks/init.yml b/roles/ravendb_node/molecule/plugins-unsecured/tasks/init.yml new file mode 100644 index 0000000..8132843 --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured/tasks/init.yml @@ -0,0 +1,55 @@ +--- +- name: Set common vars + ansible.builtin.set_fact: + base_url: "http://localhost:8080" + idx_def_initial: + map: + - "from c in docs.Users select new { c.name, count = 5 }" + reduce: > + from result in results + group result by result.name + into g + select new + { + name = g.Key, + count = g.Sum(x => x.count) + } + idx_def_updated_map: + map: + - "from c in docs.Users select new { c.name, count = 13 }" + reduce: > + from result in results + group result by result.name + into g + select new + { + name = g.Key, + count = g.Sum(x => x.count) + } + idx_def_multimap: + map: + - "from c in docs.Users select new { Name = c.name, UserCount = 1, OrderCount = 0, TotalCount = 1 }" + - "from o in docs.Orders select new { Name = o.customer, UserCount = 0, OrderCount = 1, TotalCount = 1 }" + reduce: > + from result in results + group result by result.Name + into g + select new + { + Name = g.Key, + UserCount = g.Sum(x => x.UserCount), + OrderCount = g.Sum(x => x.OrderCount), + TotalCount = g.Sum(x => x.TotalCount) + } + +- name: Randomize names + ansible.builtin.set_fact: + sfx: "{{ 1000000 | random }}" + db_check: "db_check_{{ 1000000 | random }}" + db_main: "db_main_{{ 1000000 | random }}" + db_missing: "db_missing_{{ 1000000 | random }}" + db_ix: "db_ix_{{ 1000000 | random }}" + idx_name: "UsersByName{{ 1000000 | random }}" + db_missing_rf: "db_missing_rf" + db_mismatch_rf_members: "db_mismatch_rf_members" + db_invalid_rf: "db_invalid_rf" \ No newline at end of file diff --git a/roles/ravendb_node/molecule/plugins-unsecured/verify.yml b/roles/ravendb_node/molecule/plugins-unsecured/verify.yml new file mode 100644 index 0000000..4752911 --- /dev/null +++ b/roles/ravendb_node/molecule/plugins-unsecured/verify.yml @@ -0,0 +1,30 @@ +--- +- name: Verify + hosts: all + gather_facts: false + vars: + ravendb_venv_path: "/root/.ravendb_ansible" + + tasks: + - name: Install RavenDB Python client prerequisites + include_role: + name: ravendb.ravendb.ravendb_python_client_prerequisites + + - name: Point Ansible at venv interpreter + set_fact: + ansible_python_interpreter: "{{ ravendb_venv_path }}/bin/python" + + - name: Reset SSH connection to pick up interpreter + ansible.builtin.meta: reset_connection + + - name: Include common init + import_tasks: tasks/init.yml + tags: [always] + + - name: Database suite + import_tasks: tasks/db.yml + tags: [db] + + - name: Index suite + import_tasks: tasks/index.yml + tags: [index] diff --git a/roles/ravendb_python_client_prerequisites/tasks/main.yml b/roles/ravendb_python_client_prerequisites/tasks/main.yml index 1b304f8..202db02 100644 --- a/roles/ravendb_python_client_prerequisites/tasks/main.yml +++ b/roles/ravendb_python_client_prerequisites/tasks/main.yml @@ -31,13 +31,11 @@ register: venv_integrity - name: Recreate virtual environment if it is incomplete - become: true ansible.builtin.command: cmd: rm -rf "{{ ravendb_venv_path }}" when: not venv_integrity.stat.exists - name: Create a virtual environment for Python - become: true ansible.builtin.command: cmd: python3 -m venv "{{ ravendb_venv_path }}" creates: "{{ ravendb_venv_path }}/bin/python" @@ -47,17 +45,14 @@ ansible_python_interpreter: "{{ ravendb_venv_path }}/bin/python" - name: Install pip explicitly after ensurepip - become: true ansible.builtin.command: cmd: "{{ ravendb_venv_path }}/bin/python -m ensurepip --default-pip" - name: Upgrade pip in the virtual environment - become: true ansible.builtin.command: cmd: "{{ ravendb_venv_path }}/bin/python -m pip install --upgrade pip" - name: Check Python version and pip location in virtual environment - become: true ansible.builtin.command: cmd: "{{ ravendb_venv_path }}/bin/python -m pip --version" changed_when: false @@ -69,10 +64,10 @@ changed_when: false - name: Install RavenDB Python client in virtual environment - become: true - ansible.builtin.command: - cmd: "{{ ravendb_venv_path }}/bin/pip install ravendb" - when: ravendb_installed.rc == 1 + ansible.builtin.pip: + name: ravendb + virtualenv: "{{ ravendb_venv_path }}" + virtualenv_command: python3 -m venv - name: Check if requests is installed in virtual environment command: "{{ ravendb_venv_path }}/bin/pip show requests" @@ -81,7 +76,6 @@ changed_when: false - name: Install requests in virtual environment - become: true ansible.builtin.command: cmd: "{{ ravendb_venv_path }}/bin/pip install requests" when: requests_installed.rc == 1 \ No newline at end of file diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 386a8a8..34b1d1d 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -5,8 +5,15 @@ from unittest import TestCase from unittest.mock import patch, Mock -from ansible_collections.ravendb.ravendb.plugins.modules.node import add_node, is_valid_url, is_valid_tag -import requests +from types import SimpleNamespace + +from ansible_collections.ravendb.ravendb.plugins.module_utils.core.validation import ( + is_valid_url, is_valid_tag +) + +from ansible_collections.ravendb.ravendb.plugins.module_utils.reconcilers.node_reconciler import NodeReconciler +from ansible_collections.ravendb.ravendb.plugins.module_utils.dto.node import NodeSpec +from ansible_collections.ravendb.ravendb.plugins.module_utils.core.tls import TLSConfig class TestAddNodeWithRavenDB(TestCase): @@ -14,106 +21,103 @@ class TestAddNodeWithRavenDB(TestCase): def setUp(self): self.leader_url = "http://localhost:8080" + def _ctx(self): + return SimpleNamespace(store=SimpleNamespace(urls=[self.leader_url])) + + def _empty_topology(self): + return {"Topology": {"Members": {}, "Watchers": {}, "Promotables": {}}} + def test_add_node_success(self): with patch("requests.get") as mock_get, patch("requests.put") as mock_put: mock_get.return_value = Mock(status_code=200) - mock_get.return_value.json.return_value = {"Topology": {"Members": {}, "Watchers": {}, "Promotables": {}}} - - mock_response = Mock() - mock_response.raise_for_status = Mock() - mock_put.return_value = mock_response - - result = add_node( - tag="B", - node_type="Member", - url="http://localhost:8081", - leader_url=self.leader_url, - certificate_path=None, - ca_cert_path=None, - check_mode=False, - ) - self.assertTrue(result["changed"]) - self.assertEqual(result["msg"], "Node B added to the cluster as Member.") + mock_get.return_value.json.return_value = self._empty_topology() + + mock_put.return_value = Mock(status_code=200) + + spec = NodeSpec(tag="B", node_type="Member", url="http://localhost:8081", leader_url=self.leader_url) + rec = NodeReconciler(self._ctx()) + res = rec.ensure_present(spec, TLSConfig(), check_mode=False) + + self.assertTrue(res.changed) + self.assertEqual(res.msg, "Node 'B' added as Member.") def test_add_node_check_mode(self): with patch("requests.get") as mock_get: mock_get.return_value = Mock(status_code=200) - mock_get.return_value.json.return_value = {"Topology": {"Members": {}, "Watchers": {}, "Promotables": {}}} - - result = add_node( - tag="B", - node_type="Member", - url="http://localhost:8081", - leader_url=self.leader_url, - certificate_path=None, - ca_cert_path=None, - check_mode=True, - ) - self.assertTrue(result["changed"]) - self.assertEqual(result["msg"], "Node B would be added to the cluster as Member.") + mock_get.return_value.json.return_value = self._empty_topology() + + spec = NodeSpec(tag="B", node_type="Member", url="http://localhost:8081", leader_url=self.leader_url) + rec = NodeReconciler(self._ctx()) + res = rec.ensure_present(spec, TLSConfig(), check_mode=True) + + self.assertTrue(res.changed) + self.assertEqual(res.msg, "Node 'B' would be added as Member.") def test_add_watcher_node(self): with patch("requests.get") as mock_get, patch("requests.put") as mock_put: mock_get.return_value = Mock(status_code=200) - mock_get.return_value.json.return_value = {"Topology": {"Members": {}, "Watchers": {}, "Promotables": {}}} - - mock_response = Mock() - mock_response.raise_for_status = Mock() - mock_put.return_value = mock_response - - result = add_node( - tag="D", - node_type="Watcher", - url="http://localhost:8083", - leader_url=self.leader_url, - certificate_path=None, - ca_cert_path=None, - check_mode=False, - ) - self.assertTrue(result["changed"]) - self.assertEqual(result["msg"], "Node D added to the cluster as Watcher.") + mock_get.return_value.json.return_value = self._empty_topology() + + mock_put.return_value = Mock(status_code=200) + + spec = NodeSpec(tag="D", node_type="Watcher", url="http://localhost:8083", leader_url=self.leader_url) + rec = NodeReconciler(self._ctx()) + res = rec.ensure_present(spec, TLSConfig(), check_mode=False) + + self.assertTrue(res.changed) + self.assertEqual(res.msg, "Node 'D' added as Watcher.") def test_add_already_added_node(self): with patch("requests.get") as mock_get, patch("requests.put") as mock_put: - mock_get.side_effect = requests.RequestException("ex") - - mock_response = Mock() - mock_response.raise_for_status.side_effect = requests.HTTPError( - "System.InvalidOperationException: Can't add a new node") - mock_put.return_value = mock_response - - result = add_node( - tag="A", - node_type="Member", - url="http://localhost:8081", - leader_url=self.leader_url, - certificate_path=None, - ca_cert_path=None, - check_mode=False, - ) - self.assertFalse(result["changed"]) - self.assertIn("Failed to add node A", result["msg"]) + mock_get.return_value = Mock(status_code=200) + mock_get.return_value.json.return_value = self._empty_topology() + + err_resp = Mock(status_code=400) + err_resp.json.return_value = {"Message": "System.InvalidOperationException: Can't add a new node"} + err_resp.text = "System.InvalidOperationException: Can't add a new node" + mock_put.return_value = err_resp + + spec = NodeSpec(tag="A", node_type="Member", url="http://localhost:8081", leader_url=self.leader_url) + rec = NodeReconciler(self._ctx()) + res = rec.ensure_present(spec, TLSConfig(), check_mode=False) + + self.assertFalse(res.changed) + self.assertIn("Failed to add node 'A'", res.msg) def test_add_node_with_existing_tag_different_url(self): with patch("requests.get") as mock_get, patch("requests.put") as mock_put: - mock_get.side_effect = requests.RequestException("ex") - - mock_response = Mock() - mock_response.raise_for_status.side_effect = requests.HTTPError( - "System.InvalidOperationException: Was requested to modify the topology for node...") - mock_put.return_value = mock_response - - result = add_node( - tag="A", - node_type="Member", - url="http://localhost:9090", - leader_url=self.leader_url, - certificate_path=None, - ca_cert_path=None, - check_mode=False, - ) - self.assertFalse(result["changed"]) - self.assertIn("Failed to add node A", result["msg"]) + mock_get.return_value = Mock(status_code=200) + mock_get.return_value.json.return_value = self._empty_topology() + + err_resp = Mock(status_code=409) + err_resp.json.side_effect = Exception("no json") + err_resp.text = "System.InvalidOperationException: Was requested to modify the topology for node..." + mock_put.return_value = err_resp + + spec = NodeSpec(tag="A", node_type="Member", url="http://localhost:9090", leader_url=self.leader_url) + rec = NodeReconciler(self._ctx()) + res = rec.ensure_present(spec, TLSConfig(), check_mode=False) + + self.assertFalse(res.changed) + self.assertIn("Failed to add node 'A'", res.msg) + + def test_node_already_present(self): + with patch("requests.get") as mock_get: + mock_get.return_value = Mock(status_code=200) + mock_get.return_value.json.return_value = { + "Topology": { + "Members": {"B": "http://localhost:8081"}, + "Watchers": {}, + "Promotables": {} + } + } + + spec = NodeSpec(tag="B", node_type="Member", url="http://localhost:8081", leader_url=self.leader_url) + rec = NodeReconciler(self._ctx()) + res = rec.ensure_present(spec, TLSConfig(), check_mode=False) + + self.assertFalse(res.changed) + self.assertEqual(res.msg, "Node 'B' already present as Member at http://localhost:8081.") class TestValidationFunctions(TestCase): diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index 0ce9df4..d090fe5 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -1,3 +1,4 @@ +# tests/unit/test_database.py # Copyright (c), RavenDB # GNU General Public License v3.0 or later (see COPYING or # https://www.gnu.org/licenses/gpl-3.0.txt) @@ -5,84 +6,139 @@ import os from ravendb_test_driver import RavenTestDriver from unittest import TestCase -from ansible_collections.ravendb.ravendb.plugins.modules.database import ( - handle_present_state, - handle_absent_state, +from unittest.mock import patch + +from ansible_collections.ravendb.ravendb.plugins.module_utils.core.validation import ( is_valid_url, is_valid_database_name, is_valid_replication_factor, - validate_paths, + validate_paths_exist, is_valid_state, - is_valid_database_name, - is_valid_replication_factor, - is_valid_state ) +from ansible_collections.ravendb.ravendb.plugins.module_utils.reconcilers.database_reconciler import ( + DatabaseReconciler, +) +from ansible_collections.ravendb.ravendb.plugins.module_utils.dto.database import ( + DatabaseSpec, + EncryptionSpec, +) +from ansible_collections.ravendb.ravendb.plugins.module_utils.core.client import StoreContext +from ansible_collections.ravendb.ravendb.plugins.module_utils.core.tls import TLSConfig +from ansible_collections.ravendb.ravendb.plugins.module_utils.services import db_settings_service as setsvc class TestDBStateValidator(TestCase): - def setUp(self): super().setUp() self.test_driver = RavenTestDriver() + self.url = "http://localhost:8080" + + def _ctx(self, store): + return StoreContext(store=store) + + def _spec( + self, + name, + *, + repl=1, + encrypted=False, + settings=None, + ): + return DatabaseSpec( + url=self.url, + name=name, + replication_factor=repl, + settings=settings or {}, + encryption=EncryptionSpec(enabled=encrypted), + ) def test_create_database(self): - - store = self.test_driver.get_document_store( - database="test_create_database") + store = self.test_driver.get_document_store(database="test_create_database") + self.addCleanup(store.close) + ctx = self._ctx(store) db_name = "test_db" - check_mode = False - - changed, message = handle_present_state(store, db_name, 1, "http://localhost:8080", None, False, False, None, None, {}, check_mode) + rec = DatabaseReconciler(ctx) + res = rec.ensure_present(self._spec(db_name), TLSConfig(), check_mode=False) - self.assertTrue(changed) - self.assertIn("Database '{}' created successfully.".format(db_name), message) + self.assertTrue(res.changed) + self.assertIn("Database '{}' created successfully".format(db_name), res.msg) def test_create_already_created_database(self): - - store = self.test_driver.get_document_store( - database="test_create_already_created_database") - + store = self.test_driver.get_document_store(database="test_create_already_created_database") + self.addCleanup(store.close) + ctx = self._ctx(store) db_name = "test_db1" - check_mode = False - changed, message = handle_present_state(store, db_name, 1, "http://localhost:8080", None, False, False, None, None, {}, check_mode) - changed, message = handle_present_state(store, db_name, 1, "http://localhost:8080", None, False, False, None, None, {}, check_mode) + rec = DatabaseReconciler(ctx) + res1 = rec.ensure_present(self._spec(db_name), TLSConfig(), check_mode=False) + res2 = rec.ensure_present(self._spec(db_name), TLSConfig(), check_mode=False) - self.assertFalse(changed) - self.assertIn("Database '{}' already exists.".format(db_name), message) + self.assertTrue(res1.changed) + self.assertFalse(res2.changed) + self.assertIn("already exists", res2.msg) def test_delete_database(self): - - store = self.test_driver.get_document_store( - database="test_delete_database") - + store = self.test_driver.get_document_store(database="test_delete_database") + self.addCleanup(store.close) + ctx = self._ctx(store) db_name = "test_db2" - replication_factor = 1 - check_mode = False - changed, message = handle_present_state(store, db_name, 1, "http://localhost:8080", None, False, False, None, None, {}, check_mode) - changed, message = handle_absent_state(store, db_name, check_mode) + rec = DatabaseReconciler(ctx) + rec.ensure_present(self._spec(db_name), TLSConfig(), check_mode=False) + res = rec.ensure_absent(db_name, check_mode=False) - self.assertTrue(changed) - self.assertIn("Database '{}' deleted successfully.".format(db_name), message) + self.assertTrue(res.changed) + self.assertIn("Database '{}' deleted successfully".format(db_name), res.msg) def test_delete_non_exist_database(self): + store = self.test_driver.get_document_store(database="test_delete_non_exist_database") + self.addCleanup(store.close) + ctx = self._ctx(store) + db_name = "test_db3" - store = self.test_driver.get_document_store( - database="test_delete_non_exist_database") + rec = DatabaseReconciler(ctx) + res = rec.ensure_absent(db_name, check_mode=False) - db_name = "test_db3" - check_mode = False + self.assertFalse(res.changed) + self.assertIn("Database '{}' does not exist".format(db_name), res.msg) - changed, message = handle_absent_state(store, db_name, check_mode) + def test_apply_database_settings(self): + store = self.test_driver.get_document_store(database="test_apply_database_settings") + self.addCleanup(store.close) + ctx = self._ctx(store) + db_name = "test_db_settings" - self.assertFalse(changed) - self.assertIn("Database '{}' does not exist.".format(db_name), message) + rec = DatabaseReconciler(ctx) + rec.ensure_present(self._spec(db_name), TLSConfig(), check_mode=False) + desired_settings = {"Indexing.MapBatchSize": "64"} -class TestValidationFunctions(TestCase): + with patch.object(setsvc, "get_current", return_value={}), patch.object(setsvc, "apply", return_value=None): + res = rec.ensure_present(self._spec(db_name, settings=desired_settings), TLSConfig(), check_mode=False) + + self.assertTrue(res.changed) + self.assertIn("Applied settings (Indexing.MapBatchSize) and reloaded", res.msg) + + def test_apply_database_settings_check_mode(self): + store = self.test_driver.get_document_store(database="test_apply_database_settings_check") + self.addCleanup(store.close) + ctx = self._ctx(store) + db_name = "test_db_settings_check" + + rec = DatabaseReconciler(ctx) + rec.ensure_present(self._spec(db_name), TLSConfig(), check_mode=False) + desired_settings = {"Indexing.MapBatchSize": "64"} + + with patch.object(setsvc, "get_current", return_value={}): + res = rec.ensure_present(self._spec(db_name, settings=desired_settings), TLSConfig(), check_mode=True) + + self.assertTrue(res.changed) + self.assertIn("Would apply settings (Indexing.MapBatchSize) and reload", res.msg) + + +class TestValidationFunctions(TestCase): def test_valid_url(self): self.assertTrue(is_valid_url("https://example.com")) self.assertTrue(is_valid_url("http://localhost:8080")) @@ -108,11 +164,11 @@ def test_valid_certificate_paths(self): with open("test_ca.pem", "w") as f: f.write("dummy CA content") + self.assertEqual(validate_paths_exist("test_cert.pem", "test_ca.pem"), (True, None)) self.assertEqual( - validate_paths( - "test_cert.pem", "test_ca.pem"), (True, None)) - self.assertEqual(validate_paths("non_existing.pem"), - (False, "Path does not exist: non_existing.pem")) + validate_paths_exist("non_existing.pem"), + (False, "Path does not exist: non_existing.pem"), + ) os.remove("test_cert.pem") os.remove("test_ca.pem") diff --git a/tests/unit/test_index.py b/tests/unit/test_index.py index 092d8ba..a63be58 100644 --- a/tests/unit/test_index.py +++ b/tests/unit/test_index.py @@ -6,32 +6,44 @@ import sys from ravendb_test_driver import RavenTestDriver from unittest import TestCase -from ansible_collections.ravendb.ravendb.plugins.modules.index import ( - reconcile_state, + +from ansible_collections.ravendb.ravendb.plugins.module_utils.core.validation import ( is_valid_url, is_valid_name, is_valid_dict, - validate_paths, + validate_paths_exist, is_valid_state, is_valid_mode, - is_valid_bool + is_valid_bool, + validate_state_optional +) +from ansible_collections.ravendb.ravendb.plugins.module_utils.reconcilers.index_reconciler import ( + IndexReconciler, +) +from ansible_collections.ravendb.ravendb.plugins.module_utils.dto.index import ( + IndexSpec, + IndexDefinitionSpec, ) +from ansible_collections.ravendb.ravendb.plugins.module_utils.core.client import StoreContext from ravendb.documents.operations.indexes import GetIndexesOperation INDEX_DEFINITION = { "map": [ - "from c in docs.Users select new { Name = c.name, UserCount = 1, OrderCount = 0, TotalCount = 1 }"] + "from c in docs.Users select new { Name = c.name, UserCount = 1, OrderCount = 0, TotalCount = 1 }" + ] } UPDATED_INDEX_DEFINITION = { "map": [ - "from c in docs.Users select new { Name = c.name, UserCount = 1, OrderCount = 0, TotalCount = 3 }"] + "from c in docs.Users select new { Name = c.name, UserCount = 1, OrderCount = 0, TotalCount = 3 }" + ] } MAP_REDUCE_INDEX_DEFINITION = { "map": [ - "from c in docs.Users select new { Name = c.name, UserCount = 1, OrderCount = 0, TotalCount = 1 }"], + "from c in docs.Users select new { Name = c.name, UserCount = 1, OrderCount = 0, TotalCount = 1 }" + ], "reduce": """ from result in results group result by result.Name @@ -43,95 +55,77 @@ OrderCount = g.Sum(x => x.OrderCount), TotalCount = g.Sum(x => x.TotalCount) } - """ - + """, } MULTI_MAP_INDEX_DEFINITION = { - "map": ["from c in docs.Users select new { Name = c.name, UserCount = 1, OrderCount = 0, TotalCount = 1 }", - "from o in docs.Orders select new { Name = o.customer, UserCount = 0, OrderCount = 1, TotalCount = 1 }" - ] + "map": [ + "from c in docs.Users select new { Name = c.name, UserCount = 1, OrderCount = 0, TotalCount = 1 }", + "from o in docs.Orders select new { Name = o.customer, UserCount = 0, OrderCount = 1, TotalCount = 1 }", + ] } class TestReconcileState(TestCase): - index_name = "test/index" def setUp(self): super().setUp() self.test_driver = RavenTestDriver() + def _ctx(self, store): + return StoreContext(store=store) + + def _spec(self, db, name, *, definition=None, mode=None, cluster_wide=False, configuration=None): + return IndexSpec( + db_name=db, + name=name, + definition=(IndexDefinitionSpec.from_dict(definition) if definition else None), + mode=mode, + cluster_wide=cluster_wide, + configuration=configuration or {}, + ) + def test_create_index(self): - store = self.test_driver.get_document_store( - database="test_create_index") - - params = { - "database_name": store.database, - "index_name": "test_index", - "index_definition": INDEX_DEFINITION, - "state": "present", - "cluster_wide": False, - } - - status, changed, message = reconcile_state( - store, params, check_mode=False) - self.assertEqual(status, "ok") - self.assertTrue(changed) - self.assertIn("Index 'test_index' created successfully.", message) + store = self.test_driver.get_document_store(database="test_create_index") + self.addCleanup(store.close) + ctx = self._ctx(store) + rec = IndexReconciler(ctx, store.database) + + status = rec.ensure_present(self._spec(store.database, "test_index", definition=INDEX_DEFINITION), check_mode=False) + self.assertEqual(status.changed, True) + self.assertIn("Index 'test_index' created successfully.", status.msg) def test_create_already_exists_index(self): - store = self.test_driver.get_document_store( - database="test_create_already_exists_index") + store = self.test_driver.get_document_store(database="test_create_already_exists_index") + self.addCleanup(store.close) + ctx = self._ctx(store) + rec = IndexReconciler(ctx, store.database) - params = { - "database_name": store.database, - "index_name": "myindex", - "index_definition": INDEX_DEFINITION, - "state": "present", - "cluster_wide": False, - } + res1 = rec.ensure_present(self._spec(store.database, "myindex", definition=INDEX_DEFINITION), check_mode=False) + self.assertTrue(res1.changed) + self.assertIn("Index 'myindex' created successfully.", res1.msg) - result, changed, message = reconcile_state(store, params, check_mode=False) - self.assertTrue(changed) - self.assertIn("Index 'myindex' created successfully.", message) + res2 = rec.ensure_present(self._spec(store.database, "myindex", definition=INDEX_DEFINITION), check_mode=False) + self.assertFalse(res2.changed) + self.assertIn("Index 'myindex' already exists.", res2.msg) - # self.test_driver.wait_for_user_to_continue_the_test(store) + def test_update_existing_index_with_modified_map(self): + store = self.test_driver.get_document_store(database="test_update_existing_index_with_modified_map") + self.addCleanup(store.close) + ctx = self._ctx(store) + rec = IndexReconciler(ctx, store.database) - result, changed, message = reconcile_state(store, params, check_mode=False) - self.assertFalse(changed) - self.assertIn( - "Index 'myindex' already exists and matches definition.", - message) + r1 = rec.ensure_present(self._spec(store.database, "test/index", definition=INDEX_DEFINITION), check_mode=False) + self.assertTrue(r1.changed) + self.assertIn("Index 'test/index' created successfully.", r1.msg) - def test_update_existing_index_with_modified_map(self): - store = self.test_driver.get_document_store( - database="test_update_existing_index_with_modified_map") - - params = { - "database_name": store.database, - "index_name": "test/index", - "index_definition": INDEX_DEFINITION, - "state": "present", - "cluster_wide": False, - } - - status, changed, message = reconcile_state( - store, params, check_mode=False) - self.assertEqual(status, "ok") - self.assertTrue(changed) - self.assertIn("Index 'test/index' created successfully.", message) - - params["index_definition"] = UPDATED_INDEX_DEFINITION - status, changed, message = reconcile_state( - store, params, check_mode=False) - self.assertEqual(status, "ok") - self.assertTrue(changed) - self.assertIn("Index 'test/index' created successfully.", message) + r2 = rec.ensure_present(self._spec(store.database, "test/index", definition=UPDATED_INDEX_DEFINITION), check_mode=False) + self.assertTrue(r2.changed) + self.assertIn("Index 'test/index' created successfully.", r2.msg) database_maintenance = store.maintenance.for_database(store.database) - existing_indexes = database_maintenance.send( - GetIndexesOperation(0, sys.maxsize)) + existing_indexes = database_maintenance.send(GetIndexesOperation(0, sys.maxsize)) index = existing_indexes[0] existing_maps = list(map(str.strip, index.maps)) if index.maps else [] @@ -140,128 +134,83 @@ def test_update_existing_index_with_modified_map(self): self.assertEqual(existing_maps[0], expected_map_definition[0]) def test_update_existing_map_index_into_multi_map_index(self): - store = self.test_driver.get_document_store( - database="test_update_existing_map_index_into_multi_map_index") - - params = { - "database_name": store.database, - "index_name": "test/index", - "index_definition": INDEX_DEFINITION, - "state": "present", - "cluster_wide": False, - } - - status, changed, message = reconcile_state( - store, params, check_mode=False) - self.assertEqual(status, "ok") - self.assertTrue(changed) - self.assertIn("Index 'test/index' created successfully.", message) - - params["index_definition"] = MULTI_MAP_INDEX_DEFINITION - status, changed, message = reconcile_state( - store, params, check_mode=False) - self.assertEqual(status, "ok") - self.assertTrue(changed) - self.assertIn("Index 'test/index' created successfully.", message) + store = self.test_driver.get_document_store(database="test_update_existing_map_index_into_multi_map_index") + self.addCleanup(store.close) + ctx = self._ctx(store) + rec = IndexReconciler(ctx, store.database) + + r1 = rec.ensure_present(self._spec(store.database, "test/index", definition=INDEX_DEFINITION), check_mode=False) + self.assertTrue(r1.changed) + self.assertIn("Index 'test/index' created successfully.", r1.msg) + + r2 = rec.ensure_present(self._spec(store.database, "test/index", definition=MULTI_MAP_INDEX_DEFINITION), check_mode=False) + self.assertTrue(r2.changed) + self.assertIn("Index 'test/index' created successfully.", r2.msg) database_maintenance = store.maintenance.for_database(store.database) - existing_indexes = database_maintenance.send( - GetIndexesOperation(0, sys.maxsize)) + existing_indexes = database_maintenance.send(GetIndexesOperation(0, sys.maxsize)) index = existing_indexes[0] - existing_maps = sorted( - list(map(str.strip, index.maps)) if index.maps else []) + existing_maps = sorted(list(map(str.strip, index.maps)) if index.maps else []) expected_map_definition = sorted(MULTI_MAP_INDEX_DEFINITION["map"]) self.assertEqual(existing_maps[0], expected_map_definition[0]) self.assertEqual(existing_maps[1], expected_map_definition[1]) def test_update_existing_map_index_into_map_reduce_index(self): - store = self.test_driver.get_document_store( - database="test_update_existing_map_index_into_map_reduce_index") - - params = { - "database_name": store.database, - "index_name": "test/index", - "index_definition": INDEX_DEFINITION, - "state": "present", - "cluster_wide": False, - } - - status, changed, message = reconcile_state( - store, params, check_mode=False) - self.assertEqual(status, "ok") - self.assertTrue(changed) - self.assertIn("Index 'test/index' created successfully.", message) - - params["index_definition"] = MAP_REDUCE_INDEX_DEFINITION - status, changed, message = reconcile_state( - store, params, check_mode=False) - self.assertEqual(status, "ok") - self.assertTrue(changed) - self.assertIn("Index 'test/index' created successfully.", message) + store = self.test_driver.get_document_store(database="test_update_existing_map_index_into_map_reduce_index") + self.addCleanup(store.close) + ctx = self._ctx(store) + rec = IndexReconciler(ctx, store.database) + + r1 = rec.ensure_present(self._spec(store.database, "test/index", definition=INDEX_DEFINITION), check_mode=False) + self.assertTrue(r1.changed) + self.assertIn("Index 'test/index' created successfully.", r1.msg) + + r2 = rec.ensure_present(self._spec(store.database, "test/index", definition=MAP_REDUCE_INDEX_DEFINITION), check_mode=False) + self.assertTrue(r2.changed) + self.assertIn("Index 'test/index' created successfully.", r2.msg) database_maintenance = store.maintenance.for_database(store.database) - existing_indexes = database_maintenance.send( - GetIndexesOperation(0, sys.maxsize)) + existing_indexes = database_maintenance.send(GetIndexesOperation(0, sys.maxsize)) index = existing_indexes[0] existing_maps = list(map(str.strip, index.maps)) if index.maps else [] - existing_reduce = getattr(index, 'reduce', None) + existing_reduce = getattr(index, "reduce", None) expected_map_definition = MAP_REDUCE_INDEX_DEFINITION["map"] - expected_redcue_definition = MAP_REDUCE_INDEX_DEFINITION["reduce"] + expected_reduce_definition = MAP_REDUCE_INDEX_DEFINITION["reduce"] self.assertEqual(existing_maps[0], expected_map_definition[0]) - self.assertEqual(existing_reduce, expected_redcue_definition) + self.assertEqual(existing_reduce, expected_reduce_definition) def test_delete_index(self): - store = self.test_driver.get_document_store( - database="test_delete_index") - - params = { - "database_name": store.database, - "index_name": self.index_name, - "index_definition": INDEX_DEFINITION, - "state": "present", - "cluster_wide": False, - } - - status, changed, message = reconcile_state( - store, params, check_mode=False) - self.assertEqual(status, "ok") - self.assertTrue(changed) - self.assertIn("Index 'test/index' created successfully.", message) - - params["state"] = "absent" - status, changed, message = reconcile_state( - store, params, check_mode=False) - self.assertEqual(status, "ok") - self.assertTrue(changed) - self.assertIn("Index 'test/index' deleted successfully.", message) + store = self.test_driver.get_document_store(database="test_delete_index") + self.addCleanup(store.close) + ctx = self._ctx(store) + rec = IndexReconciler(ctx, store.database) + + r1 = rec.ensure_present(self._spec(store.database, self.index_name, definition=INDEX_DEFINITION), check_mode=False) + self.assertTrue(r1.changed) + self.assertIn("Index 'test/index' created successfully.", r1.msg) + + r2 = rec.ensure_absent(self.index_name, check_mode=False) + self.assertTrue(r2.changed) + self.assertIn("Index 'test/index' deleted successfully.", r2.msg) def test_delete_nonexistent_index(self): - store = self.test_driver.get_document_store( - database="test_delete_nonexistent_index") + store = self.test_driver.get_document_store(database="test_delete_nonexistent_index") + self.addCleanup(store.close) - params = { - "database_name": store.database, - "index_name": self.index_name, - "index_definition": INDEX_DEFINITION, - "state": "present", - "cluster_wide": False, - } + ctx = self._ctx(store) + rec = IndexReconciler(ctx, store.database) - params["state"] = "absent" - status, changed, message = reconcile_state( - store, params, check_mode=False) - self.assertEqual(status, "ok") - self.assertFalse(changed) - self.assertIn("Index 'test/index' is already absent.", message) + r = rec.ensure_absent(self.index_name, check_mode=False) + self.assertFalse(r.changed) + self.assertIn("Index 'test/index' is already absent.", r.msg) class TestValidationFunctions(TestCase): - def test_valid_url(self): self.assertTrue(is_valid_url("https://example.com")) self.assertTrue(is_valid_url("http://localhost:8080")) @@ -292,11 +241,8 @@ def test_valid_certificate_paths(self): with open("test_ca.pem", "w") as f: f.write("dummy CA content") - self.assertEqual( - validate_paths( - "test_cert.pem", "test_ca.pem"), (True, None)) - self.assertEqual(validate_paths("non_existing.pem"), - (False, "Path does not exist: non_existing.pem")) + self.assertEqual(validate_paths_exist("test_cert.pem", "test_ca.pem"), (True, None)) + self.assertEqual(validate_paths_exist("non_existing.pem"), (False, "Path does not exist: non_existing.pem")) os.remove("test_cert.pem") os.remove("test_ca.pem") @@ -304,8 +250,9 @@ def test_valid_certificate_paths(self): def test_valid_state(self): self.assertTrue(is_valid_state("present")) self.assertTrue(is_valid_state("absent")) - self.assertTrue(is_valid_state(None)) self.assertFalse(is_valid_state("running")) + ok, x = validate_state_optional(None) + self.assertTrue(ok) def test_valid_mode(self): self.assertTrue(is_valid_mode("resumed")) diff --git a/tests/unit/test_index_modes.py b/tests/unit/test_index_modes.py index 7a3c519..8738d17 100644 --- a/tests/unit/test_index_modes.py +++ b/tests/unit/test_index_modes.py @@ -1,22 +1,17 @@ +# tests/unit/test_index_modes.py # Copyright (c), RavenDB # GNU General Public License v3.0 or later (see COPYING or # https://www.gnu.org/licenses/gpl-3.0.txt) -from ansible_collections.ravendb.ravendb.plugins.modules.index import ( - enable_index, - disable_index, - resume_index, - pause_index, - reset_index, - create_dynamic_index) from ravendb_test_driver import RavenTestDriver from unittest import TestCase -from ravendb.documents.indexes.definitions import IndexRunningStatus -from ravendb.documents.operations.indexes import GetIndexingStatusOperation +from ansible_collections.ravendb.ravendb.plugins.module_utils.core.client import StoreContext +from ansible_collections.ravendb.ravendb.plugins.module_utils.reconcilers.index_reconciler import IndexReconciler +from ansible_collections.ravendb.ravendb.plugins.module_utils.dto.index import IndexSpec, IndexDefinitionSpec -class TestIndexModes(TestCase): +class TestIndexModes(TestCase): index_name = "TestIndex" index_definition = { "map": [ @@ -31,135 +26,98 @@ class TestIndexModes(TestCase): name = g.Key, count = g.Sum(x => x.count) } - """ + """ } def setUp(self): super().setUp() self.test_driver = RavenTestDriver() - def create_and_execute_index(self, store): - DynamicIndexClass = create_dynamic_index( - self.index_name, self.index_definition) - index = DynamicIndexClass() - index.execute(store, store.database) + def _ctx(self, store): + return StoreContext(store=store) + + def _rec(self, store): + return IndexReconciler(self._ctx(store), store.database) + + def _create_index(self, store): + spec = IndexSpec( + db_name=store.database, + name=self.index_name, + definition=IndexDefinitionSpec.from_dict(self.index_definition), + mode=None, + cluster_wide=False, + configuration={} + ) + res = self._rec(store).ensure_present(spec, check_mode=False) + self.assertFalse(res.failed) def test_disable_index(self): - cls = self.__class__ - store = self.test_driver.get_document_store( - database="test_disable_index") - self.create_and_execute_index(store) - - status, changed, message = disable_index( - store, cls.index_name, cluster_wide=False, check_mode=False) - self.assertEqual(status, "ok") - self.assertTrue(changed) - self.assertIn( - "Index '{}' disbaled successfully".format(cls.index_name), - message) + store = self.test_driver.get_document_store(database="test_disable_index") + self.addCleanup(store.close) + self._create_index(store) + + spec = IndexSpec(db_name=store.database, name=self.index_name, mode="disabled", cluster_wide=False) + res = self._rec(store).ensure_present(spec, check_mode=False) + self.assertTrue(res.changed) + self.assertIn("disabled successfully", res.msg.lower()) def test_enable_index(self): - cls = self.__class__ - store = self.test_driver.get_document_store( - database="test_enable_index") - self.create_and_execute_index(store) - - status, changed, message = disable_index( - store, cls.index_name, cluster_wide=False, check_mode=False) - status, changed, message = enable_index( - store, cls.index_name, cluster_wide=False, check_mode=False) - self.assertEqual(status, "ok") - self.assertTrue(changed) - self.assertIn( - "Index '{}' enabled successfully".format(cls.index_name), - message) - - def test_pause_index(self): - cls = self.__class__ - store = self.test_driver.get_document_store( - database="test_pause_index") - self.create_and_execute_index(store) - - resume_index(store, cls.index_name, check_mode=False) - status, changed, message = pause_index( - store, cls.index_name, check_mode=False) - self.assertEqual(status, "ok") - self.assertTrue(changed) - self.assertIn("Index '{}' paused successfully".format(cls.index_name), message) - - indexing_status = store.maintenance.send(GetIndexingStatusOperation()) - paused_index = [ - x for x in indexing_status.indexes if x.name == cls.index_name][0] - self.assertEqual(paused_index.status, IndexRunningStatus.PAUSED) + store = self.test_driver.get_document_store(database="test_enable_index") + self.addCleanup(store.close) + self._create_index(store) + + spec = IndexSpec(db_name=store.database, name=self.index_name, mode="disabled", cluster_wide=False) + res = self._rec(store).ensure_present(spec, check_mode=False) + self.assertTrue(res.changed) + + spec = IndexSpec(db_name=store.database, name=self.index_name, mode="enabled", cluster_wide=False) + res = self._rec(store).ensure_present(spec, check_mode=False) + self.assertTrue(res.changed) + self.assertIn("enabled successfully", res.msg.lower()) def test_pause_already_paused_index(self): - cls = self.__class__ - store = self.test_driver.get_document_store( - database="test_pause_already_paused_index") - self.create_and_execute_index(store) - - pause_index(store, cls.index_name, check_mode=False) - status, changed, message = pause_index( - store, cls.index_name, check_mode=False) - self.assertEqual(status, "ok") - self.assertFalse(changed) - self.assertIn("Index '{}' is already paused".format(cls.index_name), message) - - indexing_status = store.maintenance.send(GetIndexingStatusOperation()) - paused_index = [ - x for x in indexing_status.indexes if x.name == cls.index_name][0] - self.assertEqual(paused_index.status, IndexRunningStatus.PAUSED) + store = self.test_driver.get_document_store(database="test_pause_already_paused_index") + self.addCleanup(store.close) + self._create_index(store) + + spec = IndexSpec(db_name=store.database, name=self.index_name, mode="paused") + res = self._rec(store).ensure_present(spec, check_mode=False) + self.assertTrue(res.changed) + self.assertIn("paused successfully", res.msg.lower()) + + res = self._rec(store).ensure_present(spec, check_mode=False) + self.assertFalse(res.changed) + self.assertIn("already paused", res.msg.lower()) def test_resume_index(self): - cls = self.__class__ - store = self.test_driver.get_document_store( - database="test_resume_index") - self.create_and_execute_index(store) - - pause_index(store, cls.index_name, check_mode=False) - status, changed, message = resume_index( - store, cls.index_name, check_mode=False) - self.assertEqual(status, "ok") - self.assertTrue(changed) - self.assertIn( - "Index '{}' resumed successfully".format(cls.index_name), - message) - - indexing_status = store.maintenance.send(GetIndexingStatusOperation()) - resumed_index = [ - x for x in indexing_status.indexes if x.name == cls.index_name][0] - self.assertEqual(resumed_index.status, IndexRunningStatus.RUNNING) + store = self.test_driver.get_document_store(database="test_resume_index") + self.addCleanup(store.close) + self._create_index(store) + + pause_spec = IndexSpec(db_name=store.database, name=self.index_name, mode="paused") + self._rec(store).ensure_present(pause_spec, check_mode=False) + + spec = IndexSpec(db_name=store.database, name=self.index_name, mode="resumed") + res = self._rec(store).ensure_present(spec, check_mode=False) + self.assertTrue(res.changed) + self.assertIn("resumed successfully", res.msg.lower()) def test_resume_already_resumed_index(self): - cls = self.__class__ - store = self.test_driver.get_document_store( - database="test_resume_already_resumed_index") - self.create_and_execute_index(store) - - status, changed, message = resume_index( - store, cls.index_name, check_mode=False) - - status, changed, message = resume_index( - store, cls.index_name, check_mode=False) - self.assertEqual(status, "ok") - self.assertFalse(changed) - self.assertIn( - "Index '{}' is already resumed and executing".format(cls.index_name), - message) - - indexing_status = store.maintenance.send(GetIndexingStatusOperation()) - resumed_index = [ - x for x in indexing_status.indexes if x.name == cls.index_name][0] - self.assertEqual(resumed_index.status, IndexRunningStatus.RUNNING) + store = self.test_driver.get_document_store(database="test_resume_already_resumed_index") + self.addCleanup(store.close) + self._create_index(store) + + spec = IndexSpec(db_name=store.database, name=self.index_name, mode="resumed") + res = self._rec(store).ensure_present(spec, check_mode=False) + self.assertFalse(res.changed) + self.assertIn("already running", res.msg.lower()) def test_reset_index(self): - cls = self.__class__ - store = self.test_driver.get_document_store( - database="test_reset_index") - self.create_and_execute_index(store) - - status, changed, message = reset_index( - store, cls.index_name, check_mode=False) - self.assertEqual(status, "ok") - self.assertTrue(changed) - self.assertIn("Index '{}' reset successfully".format(cls.index_name), message) + store = self.test_driver.get_document_store(database="test_reset_index") + self.addCleanup(store.close) + self._create_index(store) + + spec = IndexSpec(db_name=store.database, name=self.index_name, mode="reset") + res = self._rec(store).ensure_present(spec, check_mode=False) + self.assertTrue(res.changed) + self.assertIn("reset successfully", res.msg.lower())