diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_resources.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_resources.py new file mode 100644 index 00000000..96ae4ed7 --- /dev/null +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_resources.py @@ -0,0 +1,339 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Jeet Ram (@jeeram) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Resources endpoint models. + +This module contains endpoint definitions for resource management operations +in the ND Manage API. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import logging +from typing import Optional + +log = logging.getLogger(__name__) + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( + CompositeQueryParams, + EndpointQueryParams, + LuceneQueryParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + BaseModel, + ConfigDict, + Field, +) + +# Common config for basic validation +COMMON_CONFIG = ConfigDict(validate_assignment=True) + + +class ResourcesQueryParams(EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for resources endpoint. + + ## Parameters + + - switch_id: Serial Number or Id of the switch/leaf (optional) + - pool_name: Name of the Pool (optional) + - tenant_name: Name of the tenant (optional, used for POST) + + ## Usage + + ```python + params = ResourcesQueryParams(switch_id="leaf-101", pool_name="networkVlan") + query_string = params.to_query_string() + # Returns: "switchId=leaf-101&poolName=networkVlan" + ``` + """ + + switch_id: Optional[str] = Field( + default=None, min_length=1, description="Serial Number or Id of the switch/leaf" + ) + pool_name: Optional[str] = Field( + default=None, min_length=1, description="Name of the Pool" + ) + tenant_name: Optional[str] = Field( + default=None, min_length=1, description="Name of the tenant" + ) + + +# ============================================================================= +# RESOURCES ENDPOINTS +# ============================================================================= + + +class EpManageFabricResourcesGet(BaseModel): + """ + # Summary + + ND Manage Fabrics Resources GET Endpoint + + ## Description + + Endpoint to retrieve all resources for the given fabric. + Supports both endpoint-specific parameters (switch_id, pool_name) and + Lucene-style filtering (filter, max, offset, sort). + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/resources + - /api/v1/manage/fabrics/{fabricName}/resources?switchId=leaf-101 + - /api/v1/manage/fabrics/{fabricName}/resources?poolName=networkVlan + - /api/v1/manage/fabrics/{fabricName}/resources?filter=isPreAllocated:true + - /api/v1/manage/fabrics/{fabricName}/resources?max=10&offset=0&sort=poolName:asc + + ## Verb + + - GET + + ## Usage + + ```python + # Get all resources in a fabric + request = EpManageFabricResourcesGet() + request.fabric_name = "fabric1" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/fabric1/resources + + # Get resources filtered by switch + request = EpManageFabricResourcesGet() + request.fabric_name = "fabric1" + request.endpoint_params.switch_id = "leaf-101" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/fabric1/resources?switchId=leaf-101 + + # Get resources with pagination + request = EpManageFabricResourcesGet() + request.fabric_name = "fabric1" + request.endpoint_params.pool_name = "networkVlan" + request.lucene_params.max = 10 + request.lucene_params.offset = 0 + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/fabric1/resources?poolName=networkVlan&max=10&offset=0 + ``` + """ + + model_config = COMMON_CONFIG + + fabric_name: str = Field( + min_length=1, max_length=64, description="Name of the fabric" + ) + endpoint_params: ResourcesQueryParams = Field( + default_factory=ResourcesQueryParams, + description="Endpoint-specific query parameters", + ) + lucene_params: LuceneQueryParams = Field( + default_factory=LuceneQueryParams, + description="Lucene-style filtering query parameters", + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + log.debug( + "Building path for EpManageFabricResourcesGet: fabric_name=%s, switch_id=%s, pool_name=%s", + self.fabric_name, + self.endpoint_params.switch_id, + self.endpoint_params.pool_name, + ) + base_path = BasePath.path("fabrics", self.fabric_name, "resources") + + # Build composite query string + composite = CompositeQueryParams() + composite.add(self.endpoint_params) + composite.add(self.lucene_params) + + query_string = composite.to_query_string() + if query_string: + return f"{base_path}?{query_string}" + return base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + log.debug( + "Returning HTTP verb for EpManageFabricResourcesGet: verb=%s", + HttpVerbEnum.GET, + ) + return HttpVerbEnum.GET + + +class EpManageFabricResourcesPost(BaseModel): + """ + # Summary + + ND Manage Fabrics Resources POST Endpoint + + ## Description + + Endpoint to allocate an ID or IP/Subnet resource from the specified pool. + If a specific resource value is provided in the request, that exact value + will be allocated. Otherwise, the next available resource will be + automatically allocated. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/resources + - /api/v1/manage/fabrics/{fabricName}/resources?tenantName=tenant1 + + ## Verb + + - POST + + ## Usage + + ```python + # Allocate resource + request = EpManageFabricResourcesPost() + request.fabric_name = "fabric1" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/fabric1/resources + + # Allocate resource with tenant + request = EpManageFabricResourcesPost() + request.fabric_name = "fabric1" + request.endpoint_params.tenant_name = "tenant1" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/fabric1/resources?tenantName=tenant1 + ``` + """ + + model_config = COMMON_CONFIG + + fabric_name: str = Field( + min_length=1, max_length=64, description="Name of the fabric" + ) + endpoint_params: ResourcesQueryParams = Field( + default_factory=ResourcesQueryParams, + description="Endpoint-specific query parameters", + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + log.debug( + "Building path for EpManageFabricResourcesPost: fabric_name=%s, tenant_name=%s", + self.fabric_name, + self.endpoint_params.tenant_name, + ) + base_path = BasePath.path("fabrics", self.fabric_name, "resources") + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base_path}?{query_string}" + return base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + log.debug( + "Returning HTTP verb for EpManageFabricResourcesPost: verb=%s", + HttpVerbEnum.POST, + ) + return HttpVerbEnum.POST + + +# ============================================================================= +# RESOURCES ACTIONS ENDPOINTS +# ============================================================================= + + +class EpManageFabricResourcesActionsRemovePost(BaseModel): + """ + # Summary + + ND Manage Fabrics Resources Actions Remove POST Endpoint + + ## Description + + Endpoint to release allocated resource IDs from the fabric, returning them + to the available resource pool. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/resources/actions/remove + + ## Verb + + - POST + + ## Usage + + ```python + # Release resource IDs + request = EpManageFabricResourcesActionsRemovePost() + request.fabric_name = "fabric1" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/fabric1/resources/actions/remove + ``` + """ + + model_config = COMMON_CONFIG + + fabric_name: str = Field( + min_length=1, max_length=64, description="Name of the fabric" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path. + + ## Returns + + - Complete endpoint path string + """ + log.debug( + "Building path for EpManageFabricResourcesActionsRemovePost: fabric_name=%s", + self.fabric_name, + ) + return BasePath.path( + "fabrics", self.fabric_name, "resources", "actions", "remove" + ) + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + log.debug( + "Returning HTTP verb for EpManageFabricResourcesActionsRemovePost: verb=%s", + HttpVerbEnum.POST, + ) + return HttpVerbEnum.POST diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py new file mode 100644 index 00000000..4860a0b5 --- /dev/null +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Jeet Ram (@jeeram) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Switches endpoint models. + +This module contains endpoint definitions for switch query operations +in the ND Manage API. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import logging + +log = logging.getLogger(__name__) + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + BaseModel, + ConfigDict, + Field, +) + +# Common config for basic validation +COMMON_CONFIG = ConfigDict(validate_assignment=True) + + +class EpManageFabricSwitchesGet(BaseModel): + """ + # Summary + + ND Manage Fabrics Switches GET Endpoint + + ## Description + + Endpoint to retrieve all switches for the given fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switches + - /api/v1/manage/fabrics/{fabricName}/switches?max=10000 + + ## Verb + + - GET + + ## Usage + + ```python + ep = EpManageFabricSwitchesGet(fabric_name="fabric1") + path = ep.path + verb = ep.verb + # Path will be: /api/v1/manage/fabrics/fabric1/switches?max=10000 + + ep = EpManageFabricSwitchesGet(fabric_name="fabric1", max=500) + path = ep.path + # Path will be: /api/v1/manage/fabrics/fabric1/switches?max=500 + ``` + """ + + model_config = COMMON_CONFIG + + fabric_name: str = Field( + min_length=1, max_length=64, description="Name of the fabric" + ) + max: int = Field( + default=10000, ge=1, description="Maximum number of switches to return" + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with max query parameter. + + ## Returns + + - Complete endpoint path string including max query parameter + """ + log.debug( + "Building path for EpManageFabricSwitchesGet: fabric_name=%s, max=%s", + self.fabric_name, + self.max, + ) + base_path = BasePath.path("fabrics", self.fabric_name, "switches") + return f"{base_path}?max={self.max}" + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + log.debug( + "Returning HTTP verb for EpManageFabricSwitchesGet: verb=%s", + HttpVerbEnum.GET, + ) + return HttpVerbEnum.GET diff --git a/plugins/module_utils/manage_resource_manager/__init__.py b/plugins/module_utils/manage_resource_manager/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/plugins/module_utils/manage_resource_manager/nd_manage_resource_manager_resources.py b/plugins/module_utils/manage_resource_manager/nd_manage_resource_manager_resources.py new file mode 100644 index 00000000..9ed8a17b --- /dev/null +++ b/plugins/module_utils/manage_resource_manager/nd_manage_resource_manager_resources.py @@ -0,0 +1,2495 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Cisco and/or its affiliates. +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import copy +import ipaddress +import logging +from typing import Any, Dict, List, Optional, Tuple, Union + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ValidationError +from ansible_collections.cisco.nd.plugins.module_utils.nd_v2 import NDModule +from ansible_collections.cisco.nd.plugins.module_utils.rest.results import Results +from ansible_collections.cisco.nd.plugins.module_utils.nd_output import NDOutput +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_resource_manager.resource_manager_config_model import ( + ResourceManagerConfigModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_resource_manager.resource_manager_response_model import ResourceManagerResponse +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_resource_manager.remove_resource_by_id_request_model import ( + RemoveResourcesByIdsRequest, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_resource_manager.remove_resource_by_id_response_model import ( + RemoveResourcesByIdsResponse, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_resource_manager.resource_manager_request_model import ( + ResourceManagerBatchRequest, + ResourceManagerRequest, + FabricScope, + DeviceScope, + DeviceInterfaceScope, + DevicePairScope, + LinkScope, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_resource_manager.resource_manager_response_model import ( + ResourcesManagerBatchResponse, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_resources import ( + EpManageFabricResourcesGet, + EpManageFabricResourcesPost, + EpManageFabricResourcesActionsRemovePost, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_switches import ( + EpManageFabricSwitchesGet, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_resource_manager.switchs_response_model import ( + GetAllSwitchesResponse, + SwitchRecord, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.exceptions import NDModuleError +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_resource_manager.constants import ( + API_SCOPE_TYPE_TO_PLAYBOOK, + POOL_SCOPE_MAP, +) + + +# ========================================================================= +# Validation & Diff +# ========================================================================= + +class ResourceManagerDiffEngine: + """Provide stateless validation and diff computation helpers.""" + + @staticmethod + def _normalize_entity_key(entity_name: str) -> str: + """Normalize entity_name for order-insensitive comparison. + + Args: + entity_name: Raw entity name string. + + Returns: + Tilde-separated string with parts sorted alphabetically. + """ + return "~".join(sorted(entity_name.split("~"))) + + @staticmethod + def _extract_scope_switch_key_val(scope_details, switch_key, src_switch_key) -> Optional[str]: + """Extract a switch identifier from a scope_details model using the correct attribute name. + + Selects between ``switch_key`` (for single-switch scopes: device, device_interface) + and ``src_switch_key`` (for dual-switch scopes: device_pair, link). Returns None + for fabric-scoped resources which carry no switch identity. + + Args: + scope_details: A scope model instance (FabricScope, DeviceScope, + DeviceInterfaceScope, DevicePairScope, LinkScope) or None. + switch_key: Attribute name to read for single-switch scopes + (e.g. ``'switch_id'`` or ``'switch_ip'``). + src_switch_key: Attribute name to read for dual-switch scopes + (e.g. ``'src_switch_id'`` or ``'src_switch_ip'``). + + Returns: + The switch identifier string, or None if the scope is fabric-level + or ``scope_details`` is None. + """ + if scope_details is None: + return None + if isinstance(scope_details, FabricScope): + return None + if isinstance(scope_details, (DeviceScope, DeviceInterfaceScope)): + return getattr(scope_details, switch_key, None) + if isinstance(scope_details, (DevicePairScope, LinkScope)): + return getattr(scope_details, src_switch_key, None) + # Fallback: try common attribute names + return getattr(scope_details, switch_key, None) or getattr(scope_details, src_switch_key, None) + + @staticmethod + def _extract_scope_type(scope_details) -> Optional[str]: + """Extract and map the playbook-style scope_type from a scope_details model. + + Args: + scope_details: A scope model instance. + + Returns: + Playbook-style scope_type string (e.g. 'device_interface'), or None. + """ + if scope_details is None: + return None + raw = getattr(scope_details, "scope_type", None) + return API_SCOPE_TYPE_TO_PLAYBOOK.get(raw, raw) if raw else None + + @staticmethod + def _compare_resource_values(have: Optional[str], want: Optional[str]) -> bool: + """Compare resource values with IPv4/IPv6 network awareness. + + Args: + have: Existing resource value from the API. + want: Proposed resource value from the playbook. + + Returns: + True if the values are functionally equivalent, False otherwise. + """ + if have is None and want is None: + return True + if have is None or want is None: + return False + + have = str(have).strip() + want = str(want).strip() + + def _classify(val): + if "/" in val: + try: + return "network", ipaddress.ip_network(val, strict=False) + except ValueError: + pass + try: + return "address", ipaddress.ip_address(val) + except ValueError: + pass + return "raw", val + + th, vh = _classify(have) + tw, vw = _classify(want) + + if th == tw == "address": + return vh.exploded == vw.exploded + if th == tw == "network": + return vh == vw + return have == want + + @staticmethod + def _make_resource_key( + entity_name: Optional[str], + pool_name: Optional[str], + scope_type: Optional[str], + switch_ip: Optional[str], + ) -> Tuple: + """Build a normalized deduplication key for a resource entry. + + Args: + entity_name: Resource entity name (will be tilde-normalized). + pool_name: Pool name. + scope_type: Playbook-style scope type. + switch_ip: Switch IP, or None for fabric-scoped resources. + + Returns: + Tuple used as a dict key for matching proposed vs existing. + """ + norm_entity = ( + ResourceManagerDiffEngine._normalize_entity_key(entity_name) + if entity_name else None + ) + # device_pair and link encode both endpoints in entity_name; + # normalize switch to None so existing_index and proposed lookups align. + norm_switch = None if scope_type in ("device_pair", "link") else switch_ip + return (norm_entity, pool_name, scope_type, norm_switch) + + @staticmethod + def validate_configs( + config: Union[Dict[str, Any], List[Dict[str, Any]]], + state: str, + nd: NDModule, + log: logging.Logger, + ) -> List[ResourceManagerConfigModel]: + """Validate raw module config and return typed resource configurations. + + Args: + config: Raw config dict or list of dicts from module parameters. + state: Requested module state. + nd: ND module wrapper used for failure handling. + log: Logger instance. + + Returns: + List of validated ``ResourceManagerConfigModel`` objects. + """ + log.debug("ENTER: validate_configs()") + + configs_list = config if isinstance(config, list) else [config] + log.debug("Normalized to %s configuration(s)", len(configs_list)) + + validated_configs: List[ResourceManagerConfigModel] = [] + for idx, cfg in enumerate(configs_list): + try: + validated = ResourceManagerConfigModel.model_validate( + cfg, context={"state": state} + ) + validated_configs.append(validated) + except ValidationError as e: + error_detail = e.errors() if hasattr(e, "errors") else str(e) + error_msg = ( + f"Configuration validation failed for " + f"config index {idx}: {error_detail}" + ) + log.error(error_msg) + if hasattr(nd, "module"): + nd.module.fail_json(msg=error_msg) + else: + raise ValueError(error_msg) from e + except Exception as e: + error_msg = ( + f"Configuration validation failed for " + f"config index {idx}: {str(e)}" + ) + log.error(error_msg) + if hasattr(nd, "module"): + nd.module.fail_json(msg=error_msg) + else: + raise ValueError(error_msg) from e + + if not validated_configs: + log.warning("No valid configurations found in input") + return validated_configs + + # Duplicate check: (entity_name, pool_name, scope_type, frozenset(switch)) + seen_keys: set = set() + duplicate_keys: set = set() + log.debug( + "validate_configs: starting duplicate check on %s validated config(s)", + len(validated_configs), + ) + for cfg_dup_idx, cfg in enumerate(validated_configs): + key = ( + cfg.entity_name, + cfg.pool_name, + cfg.scope_type, + frozenset(cfg.switch or []), + ) + log.debug( + "validate_configs: duplicate-check [%s] — entity_name='%s', pool_name='%s', scope_type='%s', switch=%s, key_seen_before=%s", + cfg_dup_idx, + cfg.entity_name, + cfg.pool_name, + cfg.scope_type, + list(cfg.switch or []), + key in seen_keys, + ) + if key in seen_keys: + log.warning( + "validate_configs: [%s] duplicate key detected — entity_name='%s', pool_name='%s', scope_type='%s'", + cfg_dup_idx, + cfg.entity_name, + cfg.pool_name, + cfg.scope_type, + ) + duplicate_keys.add(key) + else: + log.debug( + "validate_configs: [%s] key is unique so far — entity_name='%s'", + cfg_dup_idx, + cfg.entity_name, + ) + seen_keys.add(key) + + if duplicate_keys: + error_msg = ( + f"Duplicate config entries found: " + f"{[str(k) for k in duplicate_keys]}. " + f"Each resource must appear only once." + ) + log.error(error_msg) + if hasattr(nd, "module"): + nd.module.fail_json(msg=error_msg) + else: + raise ValueError(error_msg) + + log.info( + "Successfully validated %s configuration(s)", + len(validated_configs), + ) + log.debug("EXIT: validate_configs() -> %s configs", len(validated_configs)) + return validated_configs + + @staticmethod + def compute_changes( + proposed: List[ResourceManagerConfigModel], + existing: List[ResourceManagerResponse], + log: logging.Logger, + ) -> Dict[str, List]: + """Compare proposed and existing resources and categorize changes. + + Uses ``ResourceManagerResponse`` fields (``entity_name``, ``pool_name``, + ``scope_details``, ``resource_value``) to build a matching index and + classify each proposed entry. + + Args: + proposed: Validated ``ResourceManagerConfigModel`` objects + representing desired state. + existing: ``ResourceManagerResponse`` models from the ND API + representing current state. + log: Logger instance. + + Returns: + Dict mapping change buckets to item lists: + - ``to_add``: ``(ResourceManagerConfigModel, switch_ip)`` tuples + - ``to_update``: ``(ResourceManagerConfigModel, switch_ip)`` tuples + - ``to_delete``: ``ResourceManagerResponse`` items + - ``idempotent``: ``(ResourceManagerConfigModel, switch_ip)`` tuples + """ + log.debug("ENTER: compute_changes()") + log.debug( + "Comparing %s proposed vs %s existing resources", + len(proposed), + len(existing), + ) + log.debug( + "Comparing proposed : %s vs existing : %s existing resources", + proposed, + existing, + ) + + # Build index of existing resources keyed by + # (normalized_entity, pool_name, playbook_scope_type, switch_id) + existing_index: Dict[Tuple, ResourceManagerResponse] = {} + for res in existing: + entity = res.entity_name + pool = res.pool_name + scope_type = ResourceManagerDiffEngine._extract_scope_type(res.scope_details) + switch_id = ResourceManagerDiffEngine._extract_scope_switch_key_val(res.scope_details, switch_key="switch_id", src_switch_key="src_switch_id") + key = ResourceManagerDiffEngine._make_resource_key(entity, pool, scope_type, switch_id) + existing_index[key] = res + log.debug( + "Existing index entry: entity=%s, pool=%s, scope_type=%s, switch_id=%s", + entity, + pool, + scope_type, + switch_id, + ) + + log.debug("Built existing index with %s entries", len(existing_index)) + + changes: Dict[str, List] = { + "to_add": [], + "to_update": [], + "to_delete": [], + "idempotent": [], + "debugs": [], + } + + # Build a secondary index keyed by normalised entity_name only. + # Used to detect partial matches (same entity, different pool/scope/switch) + # and populate the debugs bucket to mirror ND's mismatch logging. + entity_only_index: Dict[str, List[ResourceManagerResponse]] = {} + for res in existing: + norm = ResourceManagerDiffEngine._normalize_entity_key(res.entity_name or "") + entity_only_index.setdefault(norm, []).append(res) + log.debug( + "entity_only_index: added entity='%s' under norm_key='%s' (total under key: %s)", + res.entity_name, + norm, + len(entity_only_index[norm]), + ) + + log.debug("Built entity_only_index with %s unique normalised key(s)", len(entity_only_index)) + + # Track which existing keys matched at least one proposed entry + matched_existing_keys: set = set() + + # Categorise proposed resources + for cfg in proposed: + scope_type = cfg.scope_type + pool_name = cfg.pool_name + entity_name = cfg.entity_name + resource_value = cfg.resource + + log.debug( + "Processing proposed cfg: entity=%s, pool=%s, scope=%s, resource=%s, switch=%s", + entity_name, + pool_name, + scope_type, + resource_value, + cfg.switch, + ) + + # device_pair and link encode both endpoints in entity_name; one lookup covers the pair. + if scope_type in ("device_pair", "link"): + switches = [None] + log.debug( + "scope_type='%s' is multi-endpoint — using single switch=None lookup for entity='%s'", + scope_type, + entity_name, + ) + else: + switches = cfg.switch if (scope_type != "fabric" and cfg.switch) else [None] + log.debug( + "scope_type='%s' — resolved switches=%s for entity='%s'", + scope_type, + switches, + entity_name, + ) + + for sw in switches: + key = ResourceManagerDiffEngine._make_resource_key( + entity_name, pool_name, scope_type, sw + ) + log.debug( + "Lookup key=%s for entity='%s', pool='%s', scope='%s', switch=%s", + key, + entity_name, + pool_name, + scope_type, + sw, + ) + existing_res = existing_index.get(key) + + if existing_res is None: + log.info( + "Resource (entity=%s, pool=%s, scope=%s, switch=%s) not found in existing — marking to_add", + entity_name, + pool_name, + scope_type, + sw, + ) + changes["to_add"].append((cfg, sw, None)) + + # GAP-7: Partial-match detection — same entity_name, different + # pool_name / scope_type / switch_ip. Mirrors ND's + # nd_rm_get_mismatched_values() / changed_dict["debugs"] logic. + norm = ResourceManagerDiffEngine._normalize_entity_key(entity_name) + partials = entity_only_index.get(norm, []) + log.debug( + "Partial-match scan for entity='%s' (norm='%s'): %s candidate(s)", + entity_name, + norm, + len(partials), + ) + for partial in partials: + partial_pool = partial.pool_name + partial_scope = ResourceManagerDiffEngine._extract_scope_type( + partial.scope_details + ) + partial_sw = ResourceManagerDiffEngine._extract_scope_switch_key_val( + partial.scope_details, switch_key="switch_ip", src_switch_key="src_switch_ip" + ) + mismatch = { + "have_pool_name": partial_pool, + "want_pool_name": pool_name, + "have_scope_type": partial_scope, + "want_scope_type": scope_type, + "have_switch_ip": partial_sw, + } + log.debug( + "compute_changes: partial match for entity='%s': %s", + entity_name, + mismatch, + ) + changes["debugs"].append( + {"Entity Name": entity_name, "MISMATCHED_VALUES": mismatch} + ) + else: + log.debug( + "Resource (entity=%s, pool=%s, scope=%s, switch=%s) found in existing — resource_id=%s, existing_value='%s'", + entity_name, + pool_name, + scope_type, + sw, + getattr(existing_res, 'resource_id', None), + existing_res.resource_value, + ) + matched_existing_keys.add(key) + existing_value = existing_res.resource_value + + if ResourceManagerDiffEngine._compare_resource_values( + existing_value, resource_value + ): + log.debug( + "Resource (entity=%s, pool=%s, scope=%s, switch=%s) is idempotent (value=%s)", + entity_name, + pool_name, + scope_type, + sw, + existing_value, + ) + changes["idempotent"].append((cfg, sw, existing_res)) + else: + log.info( + "Resource (entity=%s, pool=%s, scope=%s, switch=%s) value differs (existing=%s, desired=%s) — marking to_update", + entity_name, + pool_name, + scope_type, + sw, + existing_value, + resource_value, + ) + changes["to_update"].append((cfg, sw, existing_res)) + + log.debug( + "Proposed scan complete — matched_existing_keys=%s, total existing_index keys=%s", + len(matched_existing_keys), + len(existing_index), + ) + + # Resources in existing but not matched by any proposed entry → to_delete + for key, res in existing_index.items(): + if key not in matched_existing_keys: + log.info( + "Existing resource (entity=%s, pool=%s) not in proposed — marking to_delete", + res.entity_name, + res.pool_name, + ) + changes["to_delete"].append(res) + else: + log.debug( + "Existing resource (entity=%s, pool=%s, key=%s) was matched by a proposed entry — skipping to_delete", + res.entity_name, + res.pool_name, + key, + ) + + log.info( + "Compute changes summary: to_add=%s, to_update=%s, to_delete=%s, idempotent=%s, debugs=%s", + len(changes['to_add']), + len(changes['to_update']), + len(changes['to_delete']), + len(changes['idempotent']), + len(changes['debugs']), + ) + log.debug("EXIT: compute_changes()") + return changes + + @staticmethod + def validate_resource_api_fields( + nd: NDModule, + resource_cfg: ResourceManagerConfigModel, + api_resource: ResourceManagerResponse, + log: logging.Logger, + context: str, + ) -> None: + """Validate user-supplied resource fields against the ND API response. + + Only fields that are non-None in ``resource_cfg`` are validated. + Fields omitted by the user are silently accepted from the API response. + Uses ``ResourceManagerResponse`` model attributes directly for + field access (``entity_name``, ``pool_name``, ``resource_value``, + ``scope_details``). + + Args: + nd: ND module wrapper used for failure handling. + resource_cfg: Validated resource config from the playbook. + api_resource: Matching ``ResourceManagerResponse`` from the ND API. + log: Logger instance. + context: Label used in error messages (e.g. ``"Resource"``). + + Returns: + None. + """ + mismatches: List[str] = [] + + # entity_name: tilde-order-insensitive comparison + if resource_cfg.entity_name is not None: + cfg_norm = ResourceManagerDiffEngine._normalize_entity_key( + resource_cfg.entity_name + ) + api_norm = ( + ResourceManagerDiffEngine._normalize_entity_key(api_resource.entity_name) + if api_resource.entity_name else None + ) + log.debug( + "validate_resource_api_fields: checking entity_name — cfg_norm='%s', api_norm='%s'", + cfg_norm, + api_norm, + ) + if cfg_norm != api_norm: + log.debug( + "validate_resource_api_fields: entity_name MISMATCH — provided='%s', API='%s'", + resource_cfg.entity_name, + api_resource.entity_name, + ) + mismatches.append( + f"entity_name: provided '{resource_cfg.entity_name}', " + f"API reports '{api_resource.entity_name}'" + ) + else: + log.debug( + "validate_resource_api_fields: entity_name OK — '%s' matches API", + resource_cfg.entity_name, + ) + else: + log.debug( + "validate_resource_api_fields: entity_name not provided in cfg — skipping check (api_entity_name='%s')", + api_resource.entity_name, + ) + + # pool_name: exact match + if resource_cfg.pool_name is not None: + log.debug( + "validate_resource_api_fields: checking pool_name — cfg='%s', api='%s'", + resource_cfg.pool_name, + api_resource.pool_name, + ) + if resource_cfg.pool_name != api_resource.pool_name: + log.debug( + "validate_resource_api_fields: pool_name MISMATCH — provided='%s', API='%s'", + resource_cfg.pool_name, + api_resource.pool_name, + ) + mismatches.append( + f"pool_name: provided '{resource_cfg.pool_name}', " + f"API reports '{api_resource.pool_name}'" + ) + else: + log.debug( + "validate_resource_api_fields: pool_name OK — '%s' matches API", + resource_cfg.pool_name, + ) + else: + log.debug( + "validate_resource_api_fields: pool_name not provided in cfg — skipping check (api_pool_name='%s')", + api_resource.pool_name, + ) + + # resource vs resource_value: IPv4/v6-aware comparison + if resource_cfg.resource is not None: + log.debug( + "validate_resource_api_fields: checking resource value — cfg='%s', api='%s'", + resource_cfg.resource, + api_resource.resource_value, + ) + if not ResourceManagerDiffEngine._compare_resource_values( + api_resource.resource_value, resource_cfg.resource + ): + log.debug( + "validate_resource_api_fields: resource value MISMATCH — provided='%s', API='%s'", + resource_cfg.resource, + api_resource.resource_value, + ) + mismatches.append( + f"resource: provided '{resource_cfg.resource}', " + f"API reports '{api_resource.resource_value}'" + ) + else: + log.debug( + "validate_resource_api_fields: resource value OK — '%s' matches API '%s'", + resource_cfg.resource, + api_resource.resource_value, + ) + else: + log.debug( + "validate_resource_api_fields: resource not provided in cfg — skipping check (api_resource_value='%s')", + api_resource.resource_value, + ) + + if mismatches: + nd.module.fail_json( + msg=( + f"{context} field mismatch for entity '{resource_cfg.entity_name}'. " + f"The following provided values do not match the API data:\n" + + "\n".join(f" - {m}" for m in mismatches) + ) + ) + + log.debug( + "validate_resource_api_fields: all provided fields match API for entity='%s', pool='%s'", + resource_cfg.entity_name, + resource_cfg.pool_name, + ) + + +class NDResourceManagerModule: + """ + Manage resources in Cisco Nexus Dashboard via the ND Manage v1 API. + + Uses pydantic models for input validation and smart endpoints for path/verb generation. + Preserves the same business logic as nd_manage_resource_manager.py. + """ + + def __init__( + self, + nd: NDModule, + results: Results, + logger: Optional[logging.Logger] = None, + ): + """Initialise the module, resolve fabric/state from ND params, and pre-fetch all resources. + + Queries the ND Manage API for all existing resources in ``fabric`` at construction + time and caches the result in ``self._all_resources``. The cached list is used as + the ``existing`` baseline for diff computation in both merged and deleted states, + avoiding repeated GET requests during the same module run. + + Args: + nd: Initialised ``NDModule`` wrapper that holds the Ansible module params + and the underlying ``RestSend`` HTTP client. + results: ``Results`` instance used to accumulate API call results and + build the final module output. + logger: Optional external logger. If not provided a module-level logger + (``logging.getLogger(__name__)``) is used. + """ + self.nd = nd + self.results = results + self.log = logger if logger is not None else logging.getLogger(__name__) + + self.fabric = nd.params["fabric"] + self.state = nd.params["state"] + self.config = nd.params.get("config") or [] + + # ND-compatible tracking dicts + self.changed_dict = [{"merged": [], "deleted": [], "gathered": [], "debugs": []}] + self.api_responses = [] + + # Cached GET results — resources + self._all_resources = [] + self._resources_fetched = False + + # Cached GET results — switches + self._all_switches: List[SwitchRecord] = [] + self._switches_fetched = False + self._switch_ip_to_id: Dict[str, str] = {} + + # Get All resources for the given fabric and cache them for matching during merged/deleted operations + self._get_all_resources() + + # Get all switches and build IP→switchId map; translate config switch lists + self._get_all_switches() + self._build_switch_ip_to_id_map() + self.config = self._resolve_switch_ids_in_config(self.config) + + # Resource collections — existing/previous snapshot at init, proposed populated in manage_state + self.existing: List[ResourceManagerResponse] = list(self._all_resources) + self.previous: List[ResourceManagerResponse] = list(self._all_resources) + self.proposed: List[ResourceManagerConfigModel] = [] + + # NDOutput for building consistent Ansible output across all states + self.output: NDOutput = NDOutput(output_level=nd.params.get("output_level", "normal")) + + self.log.info( + "NDResourceManagerModule initialized: fabric=%s, state=%s, config_count=%s", + self.fabric, + self.state, + len(self.config), + ) + + # ------------------------------------------------------------------ + # Input validation + # ------------------------------------------------------------------ + + def _validate_resource_params(self, item): + """Validate that the combination of pool_type, pool_name, and scope_type is allowed. + + Maps pool_type to an internal check_key (the pool_name for ID pools, 'IP_POOL' for + IP pools, 'SUBNET' for subnet pools), then looks up the allowed scope_type list in + ``POOL_SCOPE_MAP``. Fails fast with an informative message if the + combination is not permitted by the ND Manage API. + + Args: + item: A single config dict from the playbook ``config`` list, expected to + contain ``pool_type``, ``pool_name``, and ``scope_type`` keys. + + Returns: + Tuple ``(True, '')`` when validation passes. + Tuple ``(False, error_message)`` when an invalid combination is detected. + """ + pool_type = item.get("pool_type") + pool_name = item.get("pool_name") + scope_type = item.get("scope_type") + + self.log.debug( + "Validating resource params: pool_type=%s, pool_name=%s, scope_type=%s", + pool_type, + pool_name, + scope_type, + ) + + if pool_type == "ID": + self.log.debug("pool_type is 'ID', using pool_name as check_key: %s", pool_name) + check_key = pool_name + elif pool_type == "IP": + self.log.debug("pool_type is 'IP', using check_key='IP_POOL'") + check_key = "IP_POOL" + elif pool_type == "SUBNET": + self.log.debug("pool_type is 'SUBNET', using check_key='SUBNET'") + check_key = "SUBNET" + else: + msg = ( + "Given pool type = '{0}' is invalid," + " Allowed pool types = ['ID', 'IP', 'SUBNET']".format(pool_type) + ) + self.log.warning("Validation failed: %s", msg) + return False, msg + + allowed_scopes = POOL_SCOPE_MAP.get(check_key) + if allowed_scopes is None: + msg = "Given pool name '{0}' is not valid".format(pool_name) + self.log.warning("Validation failed: %s", msg) + return False, msg + + if scope_type not in allowed_scopes: + msg = ( + "Given scope type '{0}' is not valid for pool name = '{1}'," + " Allowed scope_types = {2}".format( + scope_type, pool_name, allowed_scopes + ) + ) + self.log.warning("Validation failed: %s", msg) + return False, msg + + self.log.debug( + "Validation passed: pool_name=%s, scope_type=%s, allowed_scopes=%s", + pool_name, + scope_type, + allowed_scopes, + ) + return True, "" + + def _validate_input(self): + """Validate all playbook config items against the requirements of the current state. + + For ``merged`` and ``deleted`` states, ensures that ``config`` is provided and that + every item carries the four mandatory fields (``entity_name``, ``pool_type``, + ``pool_name``, ``scope_type``). Also verifies that ``switch`` is present for any + non-fabric scope type, runs pool_type/pool_name/scope_type compatibility checks via + ``_validate_resource_params``, and performs pydantic cross-field validation via + ``ResourceManagerConfigModel.from_config``. + + For ``gathered`` state, mandatory field checks are skipped so that partial filter + criteria (e.g. only ``pool_name`` or only ``switch``) are accepted. + + Calls ``self.nd.module.fail_json`` directly on any validation failure. + """ + self.log.info( + "Validating input: state=%s, config_count=%s", + self.state, + len(self.config), + ) + + if not self.config: + if self.state in ("merged", "deleted", "overridden"): + self.log.error( + "'config' is mandatory for state '%s' but was not provided", + self.state, + ) + self.nd.module.fail_json( + msg="'config' element is mandatory for state '{0}'".format( + self.state + ) + ) + return + + for item in self.config: + self.log.debug( + "Validating config item: entity_name=%s, pool_name=%s, scope_type=%s, pool_type=%s", + item.get('entity_name'), + item.get('pool_name'), + item.get('scope_type'), + item.get('pool_type'), + ) + if self.state != "gathered": + # Mandatory parameter checks + for field in ("scope_type", "pool_type", "pool_name", "entity_name"): + if item.get(field) is None: + self.log.error( + "Mandatory parameter '%s' is missing in config item: %s", + field, + item, + ) + self.nd.module.fail_json( + msg="Mandatory parameter '{0}' missing".format(field) + ) + else: + self.log.debug("Mandatory parameter '%s' present: %s", field, item.get(field)) + + # Switch required for non-fabric scopes + if item.get("scope_type") != "fabric" and not item.get("switch"): + self.log.error( + "'switch' is required for scope_type='%s' but is missing in config item: %s", + item.get('scope_type'), + item, + ) + self.nd.module.fail_json(msg="switch : Required parameter not found") + elif item.get("scope_type") != "fabric": + self.log.debug( + "'switch' provided for scope_type='%s': %s", + item.get('scope_type'), + item.get('switch'), + ) + + # Validate pool_name / scope_type combination (only when pool_type is provided) + if item.get("pool_type") is not None: + self.log.debug( + "Running pool_type/pool_name/scope_type compatibility check for: pool_type=%s, pool_name=%s, scope_type=%s", + item.get('pool_type'), + item.get('pool_name'), + item.get('scope_type'), + ) + rc, mesg = self._validate_resource_params(item) + if not rc: + self.log.error("Pool/scope compatibility check failed: %s", mesg) + self.nd.module.fail_json(msg=mesg) + else: + self.log.debug("Pool/scope compatibility check passed") + + # Pydantic cross-field validation for merged/deleted + if self.state != "gathered": + try: + ResourceManagerConfigModel.from_config(item) + self.log.debug( + "Pydantic validation passed for entity_name=%s", + item.get('entity_name'), + ) + except Exception as exc: + self.log.error( + "Pydantic validation failed for entity_name=%s: %s", + item.get('entity_name'), + exc, + ) + self.nd.module.fail_json( + msg="Invalid parameters in playbook: {0}".format(str(exc)) + ) + + # ------------------------------------------------------------------ + # ND API interaction helpers + # ------------------------------------------------------------------ + + def _get_all_resources(self): + """Fetch all existing resources for the fabric from the ND Manage API and cache them. + + Issues a single GET request to the fabric resources endpoint. The response is + normalised to a flat list of ``ResourceManagerResponse`` model instances (or raw + dicts when model parsing fails) and stored in ``self._all_resources``. Subsequent + calls return immediately without hitting the API again (``self._resources_fetched`` + flag). + + A 404 response is treated as an empty fabric (no resources allocated yet) rather + than an error. Any other ``NDModuleError`` is re-raised to the caller. + """ + if self._resources_fetched: + self.log.debug( + "Resources already cached for fabric=%s: %s resource(s)", + self.fabric, + len(self._all_resources), + ) + return + + self.log.info("Fetching all resources for fabric=%s", self.fabric) + + ep = EpManageFabricResourcesGet(fabric_name=self.fabric) + try: + data = self.nd.request(ep.path, ep.verb) + except NDModuleError as exc: + if exc.status == 404: + # Fabric has no resources yet — that is valid + self.log.info( + "No resources found (404) for fabric=%s, treating as empty", + self.fabric, + ) + self._resources_fetched = True + return + raise + + # The ND API may return a list directly or {"resources": [...], "meta": {...}} + if isinstance(data, list): + self.log.debug("API returned a list with %s item(s)", len(data)) + raw_list = data + elif isinstance(data, dict) and "resources" in data: + self.log.debug( + "API returned dict with 'resources' key, %s resource(s)", + len(data['resources']), + ) + raw_list = data["resources"] + elif isinstance(data, dict) and data: + self.log.debug("API returned a non-empty dict without 'resources' key, wrapping in list") + raw_list = [data] + else: + self.log.debug("API returned empty or unexpected data, treating as empty list") + raw_list = [] + + for raw in raw_list: + try: + resource_model = ResourceManagerResponse.from_response(raw) + self.log.debug( + "Parsed resource: entity_name=%s, pool_name=%s", + getattr(resource_model, 'entity_name', None), + getattr(resource_model, 'pool_name', None), + ) + self._all_resources.append(resource_model) + except Exception as exc: + # If parsing fails, keep the raw dict so we can still match on it + self.log.warning( + "Failed to parse resource into ResourceManagerResponse (keeping raw): %s | raw=%s", + exc, + raw, + ) + self._all_resources.append(raw) + + self._resources_fetched = True + self.log.info( + "Fetched %s resource(s) for fabric=%s", + len(self._all_resources), + self.fabric, + ) + + def _get_all_switches(self): + """Fetch all switches for the fabric from the ND Manage API and cache them. + + Issues a single GET request to the fabric switches endpoint using + ``EpManageFabricSwitchesGet``. The response is parsed into a + ``GetAllSwitchesResponse`` model and the individual ``SwitchRecord`` items are + stored in ``self._all_switches``. Subsequent calls return immediately without + hitting the API again (``self._switches_fetched`` flag). + + A 404 response is treated as an empty fabric (no switches found) rather than an + error. Any other ``NDModuleError`` is re-raised to the caller. + """ + if self._switches_fetched: + self.log.debug( + "_get_all_switches: Switches already cached for fabric=%s: %s switch(es)", + self.fabric, + len(self._all_switches), + ) + return + + self.log.info("_get_all_switches: Fetching all switches for fabric=%s", self.fabric) + + ep = EpManageFabricSwitchesGet(fabric_name=self.fabric) + self.log.debug( + "_get_all_switches: querying path='%s' for fabric='%s'", + ep.path, + self.fabric, + ) + + try: + data = self.nd.request(ep.path, ep.verb) + except NDModuleError as exc: + if exc.status == 404: + self.log.info( + "_get_all_switches: No switches found (404) for fabric=%s, treating as empty", + self.fabric, + ) + self._switches_fetched = True + return + raise + + self.log.debug( + "_get_all_switches: received response type=%s", + type(data).__name__, + ) + + parsed = GetAllSwitchesResponse.from_response(data) + self._all_switches = parsed.switches + + self._switches_fetched = True + total = parsed.meta.counts.total if (parsed.meta and parsed.meta.counts) else len(self._all_switches) + self.log.info( + "_get_all_switches: Fetched %s switch(es) for fabric=%s (API total=%s)", + len(self._all_switches), + self.fabric, + total, + ) + + def _build_switch_ip_to_id_map(self): + """Build the ``fabricManagementIp → switchId`` lookup map from cached switch records. + + Iterates ``self._all_switches`` (populated by ``_get_all_switches``) and populates + ``self._switch_ip_to_id``. Records that are missing either ``fabric_management_ip`` + or ``switch_id`` are skipped with a debug log entry. + """ + self.log.debug( + "_build_switch_ip_to_id_map: building map from %s cached switch record(s)", + len(self._all_switches), + ) + + for idx, sw in enumerate(self._all_switches): + switch_id = sw.switch_id + switch_ip = sw.fabric_management_ip + self.log.debug( + "_build_switch_ip_to_id_map: [%s] switchId='%s', fabricManagementIp='%s'", + idx, + switch_id, + switch_ip, + ) + if switch_id and switch_ip: + self._switch_ip_to_id[str(switch_ip).strip()] = switch_id + self.log.debug( + "_build_switch_ip_to_id_map: [%s] mapped ip='%s' -> switchId='%s' (map_size=%s)", + idx, + switch_ip, + switch_id, + len(self._switch_ip_to_id), + ) + else: + self.log.debug( + "_build_switch_ip_to_id_map: [%s] skipping — missing switch_id='%s' or fabric_management_ip='%s'", + idx, + switch_id, + switch_ip, + ) + + self.log.info( + "_build_switch_ip_to_id_map: map complete — %s entry/entries", + len(self._switch_ip_to_id), + ) + + def _resolve_switch_ids_in_config(self, config): + """Translate management IPs in config ``switch`` lists to switchId values. + + Returns a deep copy of ``config`` with each entry's ``switch`` list translated + from management IP strings (e.g. ``'192.168.10.150'``) to the corresponding + ``switchId`` values (e.g. ``'9H1Q6YOL08G'``) using ``self._switch_ip_to_id``. + + IPs that are not found in the map are passed through unchanged so the caller can + decide how to handle unresolved entries (the ND API will reject them with an + appropriate error). + + Args: + config: Raw config list from ``nd.params["config"]``. Not mutated. + + Returns: + A deep copy of ``config`` with switch IPs replaced by switchId values. + """ + self.log.debug( + "_resolve_switch_ids_in_config: translating %s config item(s) using map of %s entry/entries", + len(config or []), + len(self._switch_ip_to_id), + ) + + config_copy = copy.deepcopy(config or []) + + for idx, item in enumerate(config_copy): + raw_switch_list = item.get("switch") or [] + entity_name = item.get("entity_name") + scope_type = item.get("scope_type") + + self.log.debug( + "_resolve_switch_ids_in_config: [%s] entity='%s', scope_type='%s', raw_switch_list=%s", + idx, + entity_name, + scope_type, + raw_switch_list, + ) + + if not raw_switch_list: + self.log.debug( + "_resolve_switch_ids_in_config: [%s] entity='%s' — no switch list present, skipping translation", + idx, + entity_name, + ) + continue + + resolved = [] + for sw_ip in raw_switch_list: + sw_key = str(sw_ip).strip() + sw_id = self._switch_ip_to_id.get(sw_key, sw_key) + if sw_id != sw_key: + self.log.debug( + "_resolve_switch_ids_in_config: [%s] entity='%s' switch '%s' -> resolved switchId='%s'", + idx, + entity_name, + sw_ip, + sw_id, + ) + else: + self.log.debug( + "_resolve_switch_ids_in_config: [%s] entity='%s' switch '%s' not found in map — passing through unchanged", + idx, + entity_name, + sw_ip, + ) + resolved.append(sw_id) + + item["switch"] = resolved + self.log.debug( + "_resolve_switch_ids_in_config: [%s] entity='%s' final switch list: %s -> %s", + idx, + entity_name, + raw_switch_list, + resolved, + ) + + self.log.debug( + "_resolve_switch_ids_in_config: completed, returning %s translated config item(s)", + len(config_copy), + ) + return config_copy + + # ------------------------------------------------------------------ + # Resource attribute accessors (handle both ResourceManagerResponse and raw dict) + # ------------------------------------------------------------------ + + def _attr(self, resource, model_attr, dict_key): + """Return a field value from a resource that may be a model instance or a raw dict. + + Tries to read ``model_attr`` from the resource via ``getattr`` first (for typed + ``ResourceManagerResponse`` instances), then falls back to ``resource.get(dict_key)`` + for raw dict responses returned when model parsing failed at fetch time. + + Args: + resource: A ``ResourceManagerResponse`` model instance or a plain dict. + model_attr: Attribute name to access on a model instance (snake_case). + dict_key: Key to access on a raw dict (camelCase, e.g. ``'entityName'``). + + Returns: + The field value, or None if neither path resolves. + """ + if hasattr(resource, model_attr): + value = getattr(resource, model_attr) + self.log.debug("_attr: resolved '%s' from model: %s", model_attr, value) + return value + if isinstance(resource, dict): + value = resource.get(dict_key) + self.log.debug("_attr: resolved '%s' from dict: %s", dict_key, value) + return value + self.log.debug("_attr: could not resolve '%s'/'%s' from resource type %s", model_attr, dict_key, type(resource)) + return None + + def _get_entity_name(self, resource): + """Return the entity_name field from a resource model or raw dict.""" + return self._attr(resource, "entity_name", "entityName") + + def _get_pool_name(self, resource): + """Return the pool_name field from a resource model or raw dict.""" + return self._attr(resource, "pool_name", "poolName") + + def _get_resource_id(self, resource): + """Return the resource_id field from a resource model or raw dict.""" + return self._attr(resource, "resource_id", "resourceId") + + def _get_resource_value(self, resource): + """Return the resource_value field from a resource model or raw dict.""" + return self._attr(resource, "resource_value", "resourceValue") + + def _get_scope_type(self, resource): + """Return the playbook-style scope_type string for a resource. + + Reads the raw ND API ``scopeType`` value from either the model's + ``scope_details.scope_type`` attribute or the ``scopeDetails.scopeType`` key of a + raw dict, then maps it from the API camelCase format (e.g. ``'deviceInterface'``) + to the playbook format (e.g. ``'device_interface'``) using + ``API_SCOPE_TYPE_TO_PLAYBOOK``. + + Args: + resource: A ``ResourceManagerResponse`` model instance or a plain dict. + + Returns: + Playbook-style scope_type string, or None if the resource type is unrecognised. + """ + if hasattr(resource, "scope_details") and resource.scope_details: + raw = getattr(resource.scope_details, "scope_type", None) + self.log.debug("_get_scope_type: from model scope_details, raw=%s", raw) + elif isinstance(resource, dict): + sd = resource.get("scopeDetails") or {} + raw = sd.get("scopeType") + self.log.debug("_get_scope_type: from dict scopeDetails, raw=%s", raw) + else: + self.log.debug("_get_scope_type: unrecognised resource type %s, returning None", type(resource)) + return None + mapped = API_SCOPE_TYPE_TO_PLAYBOOK.get(raw, raw) if raw else None + self.log.debug("_get_scope_type: mapped API scope '%s' -> playbook scope '%s'", raw, mapped) + return mapped + + def _get_switch_ip(self, resource): + """Return the primary switch IP/ID from scopeDetails (src switch for device_pair/link). + + Delegates to ResourceManagerDiffEngine._extract_scope_switch_key_val for model + instances so that all scope types are handled uniformly: + - fabric → None + - device / device_interface → switch_ip + - device_pair / link → src_switch_ip + """ + if hasattr(resource, "scope_details") and resource.scope_details: + value = ResourceManagerDiffEngine._extract_scope_switch_key_val(resource.scope_details, switch_key="switch_ip", src_switch_key="src_switch_ip") + self.log.debug("_get_switch_ip: from model scope_details, switch_ip=%s", value) + return value + if isinstance(resource, dict): + sd = resource.get("scopeDetails") or {} + # device/deviceInterface use "switchIp"; device_pair/link use "srcSwitchIp" + value = sd.get("switchIp") or sd.get("srcSwitchIp") + self.log.debug("_get_switch_ip: from dict scopeDetails, switch_ip=%s", value) + return value + self.log.debug("_get_switch_ip: unrecognised resource type %s, returning None", type(resource)) + return None + + def _to_dict(self, resource): + """Convert a resource to a plain dict suitable for API response output. + + Calls ``resource.to_payload()`` for ``ResourceManagerResponse`` model instances + (which serialises to the ND API camelCase wire format). Returns raw dicts + unchanged, since they are already in the correct format. + + Args: + resource: A ``ResourceManagerResponse`` model instance or a plain dict. + + Returns: + A plain dict representation of the resource. + """ + if hasattr(resource, "to_payload"): + result = resource.to_payload() + self.log.debug("_to_dict: converted ResourceManagerResponse to dict via to_payload(): %s", result) + return result + self.log.debug("_to_dict: resource is already a raw dict, returning as-is") + return resource + + # ------------------------------------------------------------------ + # Matching helpers + # ------------------------------------------------------------------ + + def _entity_names_match(self, e1, e2): + """Compare two entity names in a tilde-order-insensitive way. + + Splits each name on ``'~'``, sorts the resulting parts alphabetically, and + compares the sorted lists. This ensures that a device_pair entity such as + ``'SER1~SER2~label'`` matches ``'SER2~SER1~label'`` regardless of the order + in which the serial numbers appear in the playbook vs the ND API response. + + Args: + e1: First entity name string. + e2: Second entity name string. + + Returns: + True if both names are non-None and their sorted tilde-parts are equal, + False otherwise. + """ + if e1 is None or e2 is None: + self.log.debug( + "_entity_names_match: one or both entity names are None (e1=%s, e2=%s), returning False", + e1, + e2, + ) + return False + result = sorted(e1.split("~")) == sorted(e2.split("~")) + self.log.debug( + "_entity_names_match: e1='%s', e2='%s', sorted_e1=%s, sorted_e2=%s, match=%s", + e1, + e2, + sorted(e1.split('~')), + sorted(e2.split('~')), + result, + ) + return result + + # ------------------------------------------------------------------ + # API payload builders + # ------------------------------------------------------------------ + + def _build_scope_details(self, scope_type, switch_ip=None, entity_name=None): + """Build the scopeDetails Pydantic model for the ND Manage API. + + ``switch_ip`` is the translated switchId (serial number) of the source switch + from the playbook ``switch`` list. The entity_name encodes the full topology + (src and dst) as tilde-separated fields — the server uses it to resolve + additional context, so we only need to supply srcSwitchId for multi-switch + scopes (device_pair, link) and let the server derive dst from entityName. + + - fabric: FabricScope(fabricName) + - device: DeviceScope(switchId) + - device_interface: DeviceInterfaceScope(switchId, interfaceName) + - device_pair: DevicePairScope(srcSwitchId) — dst derived by server from entityName + - link: LinkScope(srcSwitchId, srcInterfaceName) — dst derived by server from entityName + """ + self.log.debug( + "_build_scope_details: scope_type=%s, switch_ip=%s, entity_name=%s, fabric=%s", + scope_type, + switch_ip, + entity_name, + self.fabric, + ) + + if scope_type == "fabric": + self.log.debug( + "_build_scope_details: fabric scope -> fabricName=%s", + self.fabric, + ) + result = FabricScope(fabric_name=self.fabric) + + elif scope_type == "device": + self.log.debug( + "_build_scope_details: device scope -> switchId=%s", + switch_ip, + ) + result = DeviceScope(switch_id=switch_ip) + + elif scope_type == "device_interface": + # entity_name format: ~ + # switch_ip is already the translated switchId (serial number) + parts = (entity_name or "").split("~", 1) + if_name = parts[1] if len(parts) > 1 else None + self.log.debug( + "_build_scope_details: device_interface scope -> switchId=%s, interfaceName=%s (interfaceName parsed from entity_name='%s')", + switch_ip, + if_name, + entity_name, + ) + if not if_name: + self.log.warning( + "_build_scope_details: device_interface scope: could not parse interfaceName from entity_name='%s'", + entity_name, + ) + result = DeviceInterfaceScope(switch_id=switch_ip, interface_name=if_name) + + elif scope_type == "device_pair": + # entity_name format: ~[~