diff --git a/dcnm-ut b/dcnm-ut deleted file mode 100644 index e69de29bb..000000000 diff --git a/plugins/module_utils/network/dcnm/dcnm.py b/plugins/module_utils/network/dcnm/dcnm.py index e308fd275..716289e50 100644 --- a/plugins/module_utils/network/dcnm/dcnm.py +++ b/plugins/module_utils/network/dcnm/dcnm.py @@ -28,7 +28,6 @@ # Any third party module must be imported as shown. If not ansible sanity tests will fail try: import requests - HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False @@ -57,7 +56,8 @@ def validate_ip_address_format(type, item, invalid_params): - if (type == "ipv4_subnet") or (type == "ipv4"): + if ((type == "ipv4_subnet") or (type == "ipv4") or + (type == "ipV4AddressWithSubnet") or (type == "ipV4Address")): addr_type = "IPv4" addr_family = socket.AF_INET mask_len = 32 @@ -68,7 +68,7 @@ def validate_ip_address_format(type, item, invalid_params): if item.strip() != "": address = item.split("/")[0] - if "subnet" in type: + if "subnet" in type.lower(): if "/" in item: subnet = item.split("/")[1] if not subnet or int(subnet) > mask_len: @@ -116,13 +116,13 @@ def validate_list_of_dicts(param_list, spec, module=None): item = spec[param].get("default") else: type = spec[param].get("type") - if type == "str": + if type == "str" or type == "string" or type == "string[]": item = v.check_type_str(item) if spec[param].get("length_max"): - if 1 <= len(item) <= spec[param].get("length_max"): + if 1 <= len(item) <= int(spec[param].get("length_max")): pass elif param == "vrf_name" and ( - len(item) <= spec[param].get("length_max") + len(item) <= int(spec[param].get("length_max")) ): pass else: @@ -132,13 +132,13 @@ def validate_list_of_dicts(param_list, spec, module=None): param, item, spec[param].get("length_max") ) ) - elif type == "int": + elif type == "int" or type == "integer" or type == "long": item = v.check_type_int(item) min_value = 1 if spec[param].get("range_min") is not None: - min_value = spec[param].get("range_min") + min_value = int(spec[param].get("range_min")) if spec[param].get("range_max"): - if min_value <= item <= spec[param].get("range_max"): + if min_value <= item <= int(spec[param].get("range_max")): pass else: invalid_params.append( @@ -147,11 +147,13 @@ def validate_list_of_dicts(param_list, spec, module=None): param, item, spec[param].get("range_max") ) ) - elif type == "bool": + elif type == "bool" or type == "boolean": item = v.check_type_bool(item) + if type == "boolean": + item = str(item).lower() elif type == "list": item = v.check_type_list(item) - elif type == "dict": + elif type == "dict" or type == "structureArray": item = v.check_type_dict(item) elif ( (type == "ipv4_subnet") @@ -458,9 +460,6 @@ def dcnm_version_supported(module): # For these examples 11 or 12 would be returned raw_version = data["version"] - if raw_version == "DEVEL": - raw_version = "11.5(1)" - regex = r"^(\d+)\.\d+" mo = re.search(regex, raw_version) if mo: @@ -780,3 +779,225 @@ def dcnm_post_request(path, hdrs, verify_flag, upload_files): json_resp["REQUEST_PATH"] = path json_resp.pop("message") return json_resp + + +def build_arg_spec(module, path): + """ + Builds the argument specification for the module based on the response received from the DCNM template API. + + Args: + module: The Ansible module object. + path: The API path for template. + + Returns: + arg_spec: The argument specification dictionary for the module. + + """ + + resp = dcnm_send(module, "GET", path) + arg_spec = {} + + if ( + resp + and resp["RETURN_CODE"] == 200 + and resp["MESSAGE"] == "OK" + and resp["DATA"] + ): + params = resp["DATA"]["parameters"] + for i in params: + name = None + type = "string" + default = None + required = False + range_min = None + range_max = None + length_max = None + length_min = None + arg = {} + hidden = False + reqcode = False + isshow = None + for key in i.keys(): + if key == "name": + name = i[key] + if key == "parameterType": + type = i[key] + # if key == "defaultValue": + # default = i[key] + if key == "optional": + required = not i[key] + if key == "annotations": + k = i[key] + for anonkey in k.keys(): + if anonkey == "IsHidden" or anonkey == "IsInternal" or anonkey == "ReadOnly": + if k[anonkey]: + hidden = True + break + # if anonkey == "Section" and + # (k[anonkey] == "\"Hidden\"" or k[anonkey] == "\"Attach/Hidden\""): + if anonkey == "Section" and k[anonkey] == "\"Hidden\"": + hidden = True + break + if anonkey == "Section" and k[anonkey] == "\"Attach/Hidden\"": + reqcode = True + if anonkey == "IsShow": + isshow = k[anonkey] + if hidden: + break + if key == "metaProperties": + j = i[key] + for metakey in j.keys(): + if metakey == "min": + range_min = j[metakey] + if metakey == "max": + range_max = j[metakey] + if metakey == "defaultValue": + default = j[metakey] + if metakey == "minLength": + length_min = j[metakey] + if metakey == "maxLength": + length_max = j[metakey] + + if not hidden: + if reqcode: + required = False + vars()[name] = dict(type=type, required=required) + if default: + vars()[name].update({"default": default}) + else: + vars()[name].update({"default": ""}) + if range_min: + vars()[name].update({"range_min": range_min}) + if range_max: + vars()[name].update({"range_max": range_max}) + if length_min: + vars()[name].update({"length_min": length_min}) + if length_max: + vars()[name].update({"length_max": length_max}) + if isshow: + vars()[name].update({"is_show": isshow}) + # vars()[name] = dict(type=type, default=default, required=required, + # range_min=range_min, range_max=range_max, + # length_min=length_min, length_max=length_max) + arg = {name: vars()[name]} + arg_spec.update(arg) + return arg_spec + else: + return [] + + +def resolve_dependency(spec, template): + + for param in spec: + if spec[param].get("is_show"): + value = json.loads(spec[param].get("is_show")) + dep = value.split("==") + if template.get(dep[0]) != dep[1]: + del template[param] + del spec[param]["is_show"] + + +def get_diff(have, want): + """ + Compare two dictionaries or lists and return the differences. + + Args: + have (dict or list): The existing dictionary or list. + want (dict or list): The desired dictionary or list. + + Returns: + tuple: A tuple containing three elements: + - diff_create (list or dict): The elements in `want` but not in `have`. + - diff_create_update (list or dict): The elements in `want` that need to be updated in `have`. + - diff_not_w_in_h (list or dict): The elements in `have` but not in `want`. + """ + key_list = [] + + if isinstance(have, list): + diff_create = [] + diff_not_w_in_h = have.copy() + diff_create_update = [] + + for wa in want: + keys = wa.get("d_key") + if keys: + key_list = keys.split(",") + found = False + for ha in have: + # update_param = False + match = False + if key_list: + for key in key_list: + if wa[key] == ha[key]: + match = True + continue + else: + match = False + else: + match = True + if match: + diff_not_w_in_h.remove(ha) + found = True + wa_keys = list(wa.keys()) + needs_update = False + for wkey in wa_keys: + if wkey == "d_key": + continue + if not ha.get(wkey) and not wa.get(wkey): + continue + if str(ha[wkey]) != str(wa[wkey]): + if isinstance(ha[wkey], dict): + nest_create, nest_create_update, nest_diff_not_w_in_h = get_diff(ha[wkey], wa[wkey]) + if nest_create or nest_create_update: + needs_update = True + else: + needs_update = True + if needs_update: + diff_create_update.append(wa) + break + if not found: + diff_create.append(wa) + + return diff_create, diff_create_update, diff_not_w_in_h + else: + diff_create = {} + diff_not_w_in_h = have.copy() + diff_create_update = {} + keys = want.get("d_key") + if keys: + key_list = keys.split(",") + found = False + if key_list: + match = False + for key in key_list: + if want[key] == have[key]: + match = True + continue + else: + match = False + else: + match = True + + if match: + diff_not_w_in_h = {} + found = True + wa_keys = list(want.keys()) + needs_update = False + for wkey in wa_keys: + if wkey == "d_key": + continue + if not have.get(wkey) and not want.get(wkey): + continue + if str(have[wkey]) != str(want[wkey]): + if isinstance(have[wkey], dict): + nest_create, nest_create_update, nest_diff_not_w_in_h = get_diff(have[wkey], want[wkey]) + if nest_create or nest_create_update: + needs_update = True + else: + needs_update = True + if needs_update: + diff_create_update.update(want) + if not found: + diff_create.update(want) + + return diff_create, diff_create_update, diff_not_w_in_h diff --git a/plugins/modules/dcnm_networkv2.py b/plugins/modules/dcnm_networkv2.py new file mode 100644 index 000000000..7ee3eea08 --- /dev/null +++ b/plugins/modules/dcnm_networkv2.py @@ -0,0 +1,2119 @@ +#!/usr/bin/python +# +# Copyright (c) 2024 Cisco and/or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +__author__ = "Praveen Ramoorthy" + +DOCUMENTATION = """ +--- +module: dcnm_networkv2 +short_description: Add and remove custom Networks from a NDFC managed VXLAN fabric. +version_added: "4.0.0" +description: + - "Add and remove custom Networks from a NDFC managed VXLAN fabric." + - "In Multisite fabrics, Networks can be created only on Multisite fabric" +author: Praveen Ramoorthy(@praveenramoorthy) +options: + fabric: + description: + - Name of the target fabric for network operations + type: str + required: yes + state: + description: + - The state of NDFC after module completion. + type: str + choices: + - merged + - replaced + - overridden + - deleted + - query + default: merged + config: + description: + - List of details of networks being managed. Not required for state deleted + type: list + elements: dict + suboptions: + net_name: + description: + - Name of the network being managed + type: str + required: true + vrf_name: + description: + - Name of the VRF to which the network belongs to + - This field is required for L3 Networks. + - VRF name should be specified as "NA" for L2 networks + type: str + required: true + net_id: + description: + - ID of the network being managed + - If not specified in the playbook, NDFC will auto-select an available net_id + type: int + required: false + net_template: + description: + - Name of the config template to be used + type: str + default: 'Default_Network_Universal' + net_extension_template: + description: + - Name of the extension config template to be used + type: str + default: 'Default_Network_Extension_Universal' + network_template_config: + description: + - To specifiy the network specific values for the network being managed using + the config params and values specified in the network template/extension template. + type: dict + suboptions: + attach: + description: + - List of network attachment details + type: list + elements: dict + suboptions: + fabric: + description: + - Fabric name where the switch to attach is present + type: str + ipAddress: + description: + - IP address of the switch to be attached to network + type: str + required: true + attached: + description: + - To specify if the switch should be attached/detached to/from network + type: bool + default: true + vlan: + description: + - VLAN ID for the attachment. + type: int + default: -1 + switchPorts: + description: + - List of switch ports to be attached to network + type: list + elements: str + default: [] + torPorts: + description: + - List of TOR ports to be attached to network + type: list + elements: dict + suboptions: + switch: + description: + - Name of the TOR switch to which the network is attached + type: str + ports: + description: + - List of TOR ports to be attached to network + type: list + elements: str + default: [] + deploy: + description: + - To specify if the attachment to network is to be deployed + type: bool + default: true +""" + +EXAMPLES = """ +# This module supports the following states: +# +# Merged: +# Networks defined in the playbook will be merged into the target fabric. +# - If the network does not exist it will be added. +# - If the network exists but properties managed by the playbook are different +# they will be updated if possible. +# - Networks that are not specified in the playbook will be untouched. +# +# Replaced: +# Networks defined in the playbook will be replaced in the target fabric. +# - If the Networks does not exist it will be added. +# - If the Networks exists but properties managed by the playbook are different +# they will be updated if possible. +# - Properties that can be managed by the module but are not specified +# in the playbook will be deleted or defaulted if possible. +# - Networks that are not specified in the playbook will be untouched. +# +# Overridden: +# Networks defined in the playbook will be overridden in the target fabric. +# - If the Networks does not exist it will be added. +# - If the Networks exists but properties managed by the playbook are different +# they will be updated if possible. +# - Properties that can be managed by the module but are not specified +# in the playbook will be deleted or defaulted if possible. +# - Networks that are not specified in the playbook will be deleted. +# +# Deleted: +# Networks defined in the playbook will be deleted. +# If no Networks are provided in the playbook, all Networks present on that DCNM fabric will be deleted. +# +# Query: +# Returns the current DCNM state for the Networks listed in the playbook. + + +# Example for creating a network using a Default_Network_Universal/Extension templates +# and Custom_Network_Universal/Extension templates +- name: Merge networks + cisco.dcnm.dcnm_networkv2: + fabric: vxlan-fabric + state: merged + config: + - net_name: "net1" + vrf_name: "vrf1" + net_id: 100 + vlan_id: 100 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + # network_template_config block uses the config params and + # values specified in template specifed net_template + # User should know the config params and values specified in the template. + network_template_config: + gatewayIpAddress: "2.1.1.1/24" + intfDescription: "test_interface" + mtu: 1500 + secondaryGW1: "3.1.1.1" + loopbackId: 10 + attach: + - ipAddress: "1.1.1.1" + attached: true + vlan: 100 + switchPorts: ["Ethernet1/1","Ethernett1/2"] + torPorts: + - switch: Tor1 + ports: ["Ethernet1/13","Ethernet1/14"] + deploy: true + - net_name: "net2" + vrf_name: "vrf2" + net_id: 200 + vlan_id: 200 + net_template: Custom_Network_Universal + net_extension_template: Custom_Network_Extension_Universal + network_template_config: + gatewayIpAddress: "4.10.1.1/24" + intfDescription: "testnet_interface" + mtu: 1500 + secondaryGW1: "30.11.1.1" + loopbackId: 10 + attach: + - ipAddress: "2.2.2.2" + attached: true + vlan: 200 + switchPorts: ["Ethernet1/10","Ethernett1/12"] + torPorts: + - switch: Tor4 + ports: ["Ethernet1/3","Ethernet1/4"] + deploy: true + +- name: Replace networks + cisco.dcnm.dcnm_networkv2: + fabric: vxlan-fabric + state: replaced + config: + - net_name: "net1" + vrf_name: "vrf1" + net_id: 100 + vlan_id: 100 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + # network_template_config block uses the config params and + # values specified in template specifed net_template + # User should know the config params and values specified in the template. + network_template_config: + gatewayIpAddress: "2.1.1.1/24" + intfDescription: "test_interface" + mtu: 1500 + secondaryGW1: "3.1.1.1" + loopbackId: 10 + attach: + - ipAddress: "1.1.1.1" + attached: true + vlan: 100 + # switchPorts: ["Ethernet1/1","Ethernett1/2"] + # Replace the exiting switchPorts with the new switchPorts in attach + switchPorts: ["Ethernet1/21", "Ethernet1/22", "Ethernet1/23"] + torPorts: + - switch: Tor1 + ports: ["Ethernet1/13","Ethernet1/14"] + deploy: true + # This network will not be touched if already present + - net_name: "net2" + vrf_name: "vrf2" + net_id: 200 + vlan_id: 200 + net_template: Custom_Network_Universal + net_extension_template: Custom_Network_Extension_Universal + network_template_config: + gatewayIpAddress: "4.10.1.1/24" + intfDescription: "testnet_interface" + mtu: 1500 + secondaryGW1: "30.11.1.1" + loopbackId: 10 + attach: + - ipAddress: "2.2.2.2" + attached: true + vlan: 200 + switchPorts: ["Ethernet1/10","Ethernett1/12"] + torPorts: + - switch: Tor4 + ports: ["Ethernet1/3","Ethernet1/4"] + deploy: true + +- name: Override networks + cisco.dcnm.dcnm_networkv2: + fabric: vxlan-fabric + state: overridden + config: + - net_name: "net1" + vrf_name: "vrf1" + net_id: 100 + vlan_id: 100 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + # network_template_config block uses the config params and + # values specified in template specifed net_template + # User should know the config params and values specified in the template. + network_template_config: + gatewayIpAddress: "2.1.1.1/24" + intfDescription: "test_interface" + mtu: 1500 + secondaryGW1: "3.1.1.1" + loopbackId: 10 + attach: + # - ipAddress: "1.1.1.1" + # attached: true + # vlan: 100 + # switchPorts: ["Ethernet1/1","Ethernett1/2"] + # torPorts: + # - switch: Tor1 + # ports: ["Ethernet1/13","Ethernet1/14"] + # deploy: true + # Existing network attachment above will be deleted and new attachment below will be added + - ipAddress: "2.2.2.2" + attached: true + vlan: 200 + switchPorts: ["Ethernet1/10","Ethernett1/12"] + torPorts: + - switch: Tor4 + ports: ["Ethernet1/3","Ethernet1/4"] + deploy: true + # Any existing networks in NDFC, not mentioned in the playbook will be deleted + +- name: Delete selected networks + cisco.dcnm.dcnm_network: + fabric: vxlan-fabric + state: deleted + config: + - net_name: ansible-net13 + - net_name: ansible-net12 + +- name: Delete all the networkss + cisco.dcnm.dcnm_networkv2: + fabric: vxlan-fabric + state: deleted + +- name: Query Networks + cisco.dcnm.dcnm_networkv2: + fabric: vxlan-fabric + state: query + config: + - net_name: ansible-net13 + - net_name: ansible-net12 + +""" + +import json +import time +import copy +import re +import datetime +import inspect +from ansible_collections.cisco.dcnm.plugins.module_utils.network.dcnm.dcnm import ( + get_fabric_inventory_details, + dcnm_send, + validate_list_of_dicts, + dcnm_get_ip_addr_info, + get_ip_sn_dict, + get_fabric_details, + get_ip_sn_fabric_dict, + dcnm_version_supported, + dcnm_get_url, + build_arg_spec, + get_diff, + resolve_dependency +) +from ansible.module_utils.basic import AnsibleModule + + +class DcnmNetworkv2: + + dcnm_network_paths = { + 12: { + "GET_NET": "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/fabrics/{}/networks", + "GET_NET_ATTACH": "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/fabrics/{}/networks/attachments?network-names={}", + "GET_NET_NAME": "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/fabrics/{}/networks/{}", + "TEMPLATE_WITH_NAME": "/appcenter/cisco/ndfc/api/v1/configtemplate/rest/config/templates/{}", + "BULK_NET": "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/bulk-create/networks" + + }, + } + + def __init__(self, module): + self.module = module + self.params = module.params + self.fabric = module.params["fabric"] + self.config = copy.deepcopy(module.params.get("config")) + self.check_mode = False + self.have_create = [] + self.want_create = [] + self.diff_create = [] + self.diff_create_update = [] + self.diff_not_w_in_h = [] + # This variable is created specifically to hold all the create payloads which are missing a + # networkId. These payloads are sent to NDFC out of band (basically in the get_diff_merge()) + # We lose diffs for these without this variable. The content stored here will be helpful for + # cases like "check_mode" and to print diffs[] in the output of each task. + self.have_attach = [] + self.want_attach = [] + self.diff_attach = [] + self.diff_attach_update = [] + self.diff_attach_not_w_in_h = [] + self.validated = [] + # diff_detach is to list all attachments of a network being deleted, especially for state: OVERRIDDEN + # The diff_detach and delete operations have to happen before create+attach+deploy for networks being created. + # This is specifically to address cases where VLAN from a network which is being deleted is used for another + # network. Without this additional logic, the create+attach+deploy go out first and complain the VLAN is already + # in use. + self.diff_detach = [] + self.have_deploy = {} + self.want_deploy = {} + self.diff_deploy = {} + self.diff_undeploy = {} + self.diff_delete = {} + self.diff_input_format = [] + self.dyn_arg_spec = {} + self.query = [] + + self.result = dict(changed=False, diff=[], response=[], warnings=[]) + + self.WAIT_TIME_FOR_DELETE_LOOP = 5 # in seconds + + def log(self, msg): + with open('netv2.log', 'a') as of: + callerframerecord = inspect.stack()[1] + frame = callerframerecord[0] + info = inspect.getframeinfo(frame) + d = datetime.datetime.now().replace(microsecond=0).isoformat() + of.write("---- %s ---- %s@%s ---- %s \n" % (d, info.lineno, info.function, msg)) + + def get_diff_delete(self): + """ + Retrieves the network configurations that need to be deleted. + + Returns: + None + + Raises: + None + """ + + diff_detach = [] + diff_undeploy = {} + diff_delete = {} + + if self.config: + for want_c in self.want_create: + if not next( + ( + have_c + for have_c in self.have_create + if have_c["networkName"] == want_c["networkName"] + ), + None, + ): + continue + diff_delete.update({want_c["networkName"]: "DEPLOYED"}) + + have_a = next( + ( + attach + for attach in self.have_attach + if attach["networkName"] == want_c["networkName"] + ), + None, + ) + + if not have_a: + continue + + to_del = [] + atch_h = have_a["lanAttachList"] + for a_h in atch_h: + if a_h["deployment"]: + a_h.update({"deployment": False}) + a_h.update({"torPorts": ""}) + a_h.update({"switchPorts": ""}) + a_h.update({"detachSwitchPorts": ""}) + to_del.append(a_h) + if diff_undeploy.get(a_h["serialNumber"]): + diff_undeploy[a_h["serialNumber"]].append(a_h["networkName"]) + else: + diff_undeploy[a_h["serialNumber"]] = [a_h["networkName"]] + if to_del: + have_a.update({"lanAttachList": to_del}) + diff_detach.append(have_a) + else: + for have_c in self.have_create: + diff_delete.update({have_c["networkName"]: "DEPLOYED"}) + + for have_a in self.have_attach: + to_del = [] + atch_h = have_a["lanAttachList"] + for a_h in atch_h: + if a_h["deployment"]: + a_h.update({"deployment": False}) + a_h.update({"torPorts": ""}) + a_h.update({"switchPorts": ""}) + a_h.update({"detachSwitchPorts": ""}) + to_del.append(a_h) + if diff_undeploy.get(a_h["serialNumber"]): + diff_undeploy[a_h["serialNumber"]].append(a_h["networkName"]) + else: + diff_undeploy[a_h["serialNumber"]] = [a_h["networkName"]] + if to_del: + have_a.update({"lanAttachList": to_del}) + diff_detach.append(have_a) + + self.diff_detach = diff_detach + self.diff_undeploy = diff_undeploy + self.diff_delete = diff_delete + + def get_diff_override(self): + """ + Retrieves the differences and overrides for network attachments. + + This method compares the network and attachments present in the playbook with the ones + currently configured on NDFC. It identifies the networkd and attachments that need to be created, + attached, detached, deployed, undeployed, or deleted. It also updates the attachments + accordingly and generates warning messages if necessary. + + Returns: + str: Warning message generated during the process. + + """ + + diff_delete = {} + + warn_msg = self.get_diff_replace() + + diff_create = self.diff_create + diff_attach = self.diff_attach + diff_detach = self.diff_detach + diff_deploy = self.diff_deploy + diff_undeploy = self.diff_undeploy + + for have_a in self.have_attach: + # This block will take care of deleting all the networks that are only present on NDFC but not on playbook + # The "if not found" block will go through all attachments under those networks and update them so that + # they will be detached and also the network name will be added to delete payload. + found = next( + ( + net + for net in self.want_create + if net["networkName"] == have_a["networkName"] + ), + None, + ) + + to_del = [] + if not found: + atch_h = have_a["lanAttachList"] + for a_h in atch_h: + if a_h["deployment"]: + a_h.update({"deployment": False}) + a_h.update({"torPorts": ""}) + a_h.update({"switchPorts": ""}) + a_h.update({"detachSwitchPorts": ""}) + to_del.append(a_h) + if diff_undeploy.get(a_h["serialNumber"]): + diff_undeploy[a_h["serialNumber"]].append(a_h["networkName"]) + else: + diff_undeploy[a_h["serialNumber"]] = [a_h["networkName"]] + + if to_del: + have_a.update({"lanAttachList": to_del}) + diff_detach.append(have_a) + + # The following is added just to help in deletion, we need to wait for detach transaction to complete + # before attempting to delete the network. + diff_delete.update({have_a["networkName"]: "DEPLOYED"}) + + self.diff_create = diff_create + self.diff_attach = diff_attach + self.diff_deploy = diff_deploy + self.diff_undeploy = diff_undeploy + self.diff_delete = diff_delete + self.diff_detach = diff_detach + return warn_msg + + def get_diff_replace(self): + """ + Retrieves the differences for replacing network and attachments. + + This method compares the existing network and attachments with the desired network and attachments + and identifies the differences that need to be made in order to replace the attachments. + + Returns: + str: A warning message indicating any potential issues or conflicts. + + """ + + warn_msg = self.get_diff_merge(replace=True) + diff_create = self.diff_create + diff_attach = self.diff_attach + diff_deploy = self.diff_deploy + + for have_a in self.have_attach: + r_net_list = [] + h_in_w = False + for want_a in self.want_attach: + # This block will take care of deleting any attachments that are present only on NDFC + # but, not on the playbook. In this case, the playbook will have a network and few attaches under it, + # but, the attaches may be different to what the NDFC has for the same network. + if have_a["networkName"] == want_a["networkName"]: + h_in_w = True + atch_h = have_a["lanAttachList"] + atch_w = want_a.get("lanAttachList") + + for a_h in atch_h: + if not a_h["deployment"]: + continue + a_match = False + + if atch_w: + for a_w in atch_w: + if a_h["serialNumber"] == a_w["serialNumber"]: + # Have is already in diff, no need to continue looking for it. + a_match = True + break + if not a_match: + a_h.update({"deployment": False}) + a_h.update({"torPorts": ""}) + a_h.update({"switchPorts": ""}) + a_h.update({"detachSwitchPorts": ""}) + r_net_list.append(a_h) + if diff_deploy.get(a_h["serialNumber"]): + diff_deploy[a_h["serialNumber"]].append(a_h["networkName"]) + else: + diff_deploy[a_h["serialNumber"]] = [a_h["networkName"]] + break + + if not h_in_w: + # This block will take care of deleting all the attachments which are in NDFC but + # are not mentioned in the playbook. The playbook just has the network, but, does not have any attach + # under it. + found = next( + ( + net + for net in self.want_create + if net["networkName"] == have_a["networkName"] + ), + None, + ) + if found: + atch_h = have_a["lanAttachList"] + for a_h in atch_h: + if not a_h["deployment"]: + continue + a_h.update({"deployment": False}) + a_h.update({"torPorts": ""}) + a_h.update({"switchPorts": ""}) + a_h.update({"detachSwitchPorts": ""}) + r_net_list.append(a_h) + if diff_deploy.get(a_h["serialNumber"]): + diff_deploy[a_h["serialNumber"]].append(a_h["networkName"]) + else: + diff_deploy[a_h["serialNumber"]] = [a_h["networkName"]] + + if r_net_list: + in_diff = False + for d_attach in self.diff_attach: + if have_a["networkName"] == d_attach["networkName"]: + in_diff = True + d_attach["lanAttachList"].extend(r_net_list) + break + + if not in_diff: + r_net_dict = { + "networkName": have_a["networkName"], + "lanAttachList": r_net_list, + } + diff_attach.append(r_net_dict) + + self.diff_create = diff_create + self.diff_attach = diff_attach + self.diff_deploy = diff_deploy + return warn_msg + + def get_deploy_diff(self, diff_deploy): + """ + Get the difference between the desired deployment and the current deployment. + + Args: + diff_deploy (dict): A dictionary representing the diff for deployment. + + Returns: + dict: A dictionary representing the difference between the desired deployment and the current deployment. + """ + + for w_deploy in self.want_deploy: + if diff_deploy.get(w_deploy): + for net in self.want_deploy[w_deploy]: + if net in diff_deploy[w_deploy]: + continue + else: + if diff_deploy.get(w_deploy): + diff_deploy[w_deploy].append(net) + else: + diff_deploy[w_deploy] = [net] + elif self.have_deploy.get(w_deploy): + for net in self.want_deploy[w_deploy]: + if net in self.have_deploy[w_deploy]: + continue + else: + if diff_deploy.get(w_deploy): + diff_deploy[w_deploy].append(net) + else: + diff_deploy[w_deploy] = [net] + else: + diff_deploy[w_deploy] = self.want_deploy[w_deploy] + + def compute_deploy_diff(self, w_attach, diff_deploy): + + if self.want_deploy.get(w_attach["serialNumber"]): + if w_attach["networkName"] in self.want_deploy[w_attach["serialNumber"]]: + if diff_deploy.get(w_attach["serialNumber"]): + diff_deploy[w_attach["serialNumber"]].append(w_attach["networkName"]) + else: + diff_deploy[w_attach["serialNumber"]] = [w_attach["networkName"]] + + def get_attach_ports(self, w_attach, h_attach, replace=False): + """ + Get the attached ports for a given switch attachment. + + Args: + w_attach (dict): The switch attachment details from the desired configuration. + h_attach (dict): The switch attachment details from the existing configuration. + replace (bool, optional): Whether to replace the switch ports or not. Defaults to False. + + Returns: + None + + """ + + h_sw_ports = h_attach["switchPorts"] + w_sw_ports = w_attach["switchPorts"] + + if sorted(h_sw_ports) != sorted(w_sw_ports): + atch_sw_ports = list( + set(w_sw_ports) - set(h_sw_ports) + ) + w_attach.update( + { + "switchPorts": ",".join(atch_sw_ports) + if atch_sw_ports + else "" + } + ) + + if replace: + dtach_sw_ports = list( + set(h_sw_ports) - set(w_sw_ports) + ) + w_attach.update( + { + "detachSwitchPorts": ",".join(dtach_sw_ports) + if dtach_sw_ports + else "" + } + ) + else: + w_attach.update( + { + "switchPorts": ",".join(w_sw_ports) + if w_sw_ports + else "" + } + ) + + def get_attach_torports(self, w_attach, h_attach, replace=False): + """ + Get the attached TOR ports based on the given parameters. + + Args: + w_attach (dict): The dictionary containing the attachment information for the desired attachment. + h_attach (dict): The dictionary containing the attachment information for the existing attachment. + replace (bool, optional): Flag indicating whether to replace the TOR ports or not. Defaults to False. + + Returns: + None + + """ + + if w_attach.get("torPorts") != "": + for tor_w in w_attach["torPorts"]: + if h_attach.get("torPorts") != "": + for tor_h in h_attach["torPorts"]: + if tor_w["switch"] == tor_h["switch"]: + atch_tor_ports = [] + h_tor_ports = tor_h["ports"] + w_tor_ports = tor_w["ports"] + + if sorted(h_tor_ports) != sorted(w_tor_ports): + atch_tor_ports = list( + set(w_tor_ports) - set(h_tor_ports) + ) + + if replace: + atch_tor_ports = w_tor_ports + else: + atch_tor_ports.extend(h_tor_ports) + + torconfig = tor_w["switch"] + "(" + (",".join(atch_tor_ports)).strip() + ")" + w_attach.update({"torPorts": torconfig}) + else: + torconfig = tor_w["switch"] + "(" + (",".join(tor_w["ports"])).strip() + ")" + w_attach.update({"torPorts": torconfig}) + else: + if replace: + w_attach.update({"torPorts": ""}) + elif h_attach.get("torPorts") != "": + for tor_h in h_attach.get("torPorts"): + torconfig = tor_h["switch"] + "(" + (",".join(tor_h["ports"])).strip() + ")" + w_attach.update({"torPorts": torconfig}) + + def get_diff_merge(self, replace=False): + """ + This method calculates the differences between the `have_create`, `want_create`, + `have_attach`, and `want_attach` attributes of the object. It then updates the + `diff_create`, `diff_create_update`, `diff_attach`, and `diff_deploy` attributes + accordingly. Finally, it returns a warning message if any. + + Parameters: + replace (bool): A flag indicating whether to replace existing networks. + Defaults to False. + + Returns: + warn_msg (str): A warning message, if any. + + """ + + diff_create = [] + diff_create_update = [] + diff_attach = [] + diff_deploy = {} + warn_msg = None + + w_create, w_create_update, diff_not_w_in_h = get_diff(self.have_create, self.want_create) + + if w_create_update: + diff_create_update.extend(w_create_update) + for net in w_create_update: + for attach in self.have_attach: + if net["networkName"] == attach["networkName"]: + for atch in attach.get("lanAttachList"): + if atch["deployment"]: + if diff_deploy.get(atch["serialNumber"]): + diff_deploy[atch["serialNumber"]].append(atch["networkName"]) + else: + diff_deploy[atch["serialNumber"]] = [atch["networkName"]] + + if w_create: + diff_create.extend(w_create) + + for want_a in self.want_attach: + found = False + for have_a in self.have_attach: + if want_a["networkName"] == have_a["networkName"]: + found = True + w_attach, w_attach_update, diff_attach_not_w_in_h = get_diff(have_a["lanAttachList"], want_a["lanAttachList"]) + if w_attach: + base = want_a.copy() + del base["lanAttachList"] + base.update({"lanAttachList": w_attach}) + diff_attach.append(base) + for attach in w_attach: + self.compute_deploy_diff(attach, diff_deploy) + + if w_attach_update: + base = want_a.copy() + del base["lanAttachList"] + for attach in w_attach_update: + for h_attach in have_a.get("lanAttachList"): + if attach["serialNumber"] == h_attach["serialNumber"]: + self.get_attach_ports(attach, h_attach, replace) + if h_attach["torPorts"] or attach["torPorts"]: + self.get_attach_torports(attach, h_attach, replace) + else: + attach.update({"torPorts": ""}) + self.compute_deploy_diff(attach, diff_deploy) + break + base.update({"lanAttachList": w_attach_update}) + diff_attach.append(base) + + if not found and want_a.get("lanAttachList"): + diff_attach.append(want_a) + for attach in want_a.get("lanAttachList"): + attach.update( + { + "switchPorts": ",".join(attach["switchPorts"]) + if attach.get("switchPorts") + else "" + } + ) + if attach.get("torPorts"): + for tor_h in attach.get("torPorts"): + torconfig = tor_h["switch"] + "(" + (",".join(tor_h["ports"])).strip() + ")" + attach.update({"torPorts": torconfig}) + self.compute_deploy_diff(attach, diff_deploy) + + self.get_deploy_diff(diff_deploy) + self.diff_create = diff_create + self.diff_create_update = diff_create_update + self.diff_attach = diff_attach + self.diff_deploy = diff_deploy + return warn_msg + + def get_diff_query(self): + """ + Retrieves the difference query for the network. + + It queries the network and its attachments, and constructs a query object containing the network details + and the attached networks. + + Returns: + list: A list of query objects, each containing the network details and the attached networks. + + Raises: + None + + """ + + method = "GET" + + if self.config: + query = [] + if self.have_create or self.have_attach: + for want_c in self.want_create: + # Query the Network + item = {"Network": {}, "attach": []} + path = self.paths["GET_NET_NAME"].format( + self.fabric, want_c["networkName"] + ) + network = dcnm_send(self.module, method, path) + + if not network["DATA"]: + continue + + net = network["DATA"] + if want_c["networkName"] == net["networkName"]: + item["Network"] = net + item["Network"]["networkTemplateConfig"] = net["networkTemplateConfig"] + + # Query the Attachment for the found Networks + path = self.paths["GET_NET_ATTACH"].format( + self.fabric, want_c["networkName"] + ) + net_attach_objects = dcnm_send(self.module, method, path) + + if not net_attach_objects["DATA"]: + return + + for net_attach in net_attach_objects["DATA"]: + if want_c["networkName"] == net_attach["networkName"]: + if not net_attach.get("lanAttachList"): + continue + attach_list = net_attach["lanAttachList"] + + for attach in attach_list: + # append the attach network details + item["attach"].append(attach) + query.append(item) + + else: + query = [] + path = self.paths["GET_NET"].format(self.fabric) + networks = dcnm_send(self.module, method, path) + + if not networks["DATA"]: + return + + for net in networks["DATA"]: + item = {"Network": {}, "attach": []} + # append the network details + item["Network"] = net + item["Network"]["networkTemplateConfig"] = net["networkTemplateConfig"] + + # fetch the attachment for the network + path = self.paths["GET_NET_ATTACH"].format( + self.fabric, net["networkName"] + ) + net_attach_objects = dcnm_send(self.module, method, path) + + if not net_attach_objects["DATA"]: + return + + for net_attach in net_attach_objects["DATA"]: + if not net_attach.get("lanAttachList"): + continue + attach_list = net_attach["lanAttachList"] + + for attach in attach_list: + # append the attach network details + item["attach"].append(attach) + query.append(item) + + self.query = query + + def get_have(self): + """ + Retrieves information about the networks and their attachments in the current fabric. + + Returns: + None + + Raises: + AnsibleFailJson: If the fabric is not present on NDFC. + + """ + + have_create = [] + have_attach = [] + have_deploy = {} + curr_networks = [] + + state = self.params["state"] + + method = "GET" + path = self.paths["GET_NET"].format(self.fabric) + + net_objects = dcnm_send(self.module, method, path) + + missing_fabric, not_ok = self.handle_response(net_objects, "query_dcnm") + + if missing_fabric or not_ok: + msg1 = "Fabric {0} not present on NDFC".format(self.fabric) + msg2 = "Unable to find Networks under fabric: {0}".format(self.fabric) + + self.module.fail_json(msg=msg1 if missing_fabric else msg2) + + for net in net_objects["DATA"]: + json_to_dict = net["networkTemplateConfig"] + net.update({"networkTemplateConfig": json_to_dict}) + del net["displayName"] + del net["serviceNetworkTemplate"] + del net["source"] + del net["tenantName"] + del net["interfaceGroups"] + del net["primaryNetworkId"] + del net["type"] + del net["primaryNetworkName"] + del net["vlanId"] + del net["hierarchicalKey"] + del net["networkStatus"] + + curr_networks.append(net["networkName"]) + have_create.append(net) + + if not curr_networks: + return + + net_attach_objects = dcnm_get_url( + self.module, + self.fabric, + self.paths["GET_NET_ATTACH"], + ",".join(curr_networks), + "networks", + ) + + if not net_attach_objects["DATA"]: + return + + for net_attach in net_attach_objects["DATA"]: + if not net_attach.get("lanAttachList"): + continue + attach_list = net_attach["lanAttachList"] + for attach in attach_list: + deployment = attach["isLanAttached"] + if ( + not bool(deployment) + or attach["lanAttachState"] == "OUT-OF-SYNC" + or attach["lanAttachState"] == "PENDING" + or attach["lanAttachState"] == "FAILED" + ): + deployed = False + else: + deployed = True + + if deployed: + if have_deploy.get(attach["switchSerialNo"]): + have_deploy[attach["switchSerialNo"]].append(attach["networkName"]) + else: + have_deploy[attach["switchSerialNo"]] = [attach["networkName"]] + + sn = attach["switchSerialNo"] + vlan = attach["vlanId"] + hports = attach["portNames"] + attach.update({"torPorts": ""}) + if attach["portNames"] and re.match(r"(\S+\(([Ee]thernet\d+\/\d+,?\s?)+\),?\s?)+", attach["portNames"]): + torlist = [] + sw_ports_list = attach["portNames"] + tor_list = sw_ports_list.split(") ") + for idx, tor in enumerate(tor_list): + if tor: + torports = {} + sw_port = tor.split(")") + eth_list = sw_port[0].split("(") + # idx 0 has the switch ports configured + # idx 1 onwards has the tor ports configured + if idx == 0: + hports = eth_list[1] + ports = sorted(re.split(", |,", hports)) + continue + else: + torports.update({"switch": eth_list[0]}) + torports.update({"ports": sorted(re.split(", |,", eth_list[1]))}) + torlist.append(torports) + torlist = sorted(torlist, key=lambda torlist: torlist["switch"]) + attach.update({"torPorts": torlist}) + elif attach["portNames"]: + ports = sorted(re.split(", |,", hports)) + else: + ports = [] + + # The deletes and updates below are done to update the incoming dictionary format to + # match to what the outgoing payload requirements mandate. + # Ex: 'vlanId' in the attach section of incoming payload needs to be changed to 'vlan' + # on the attach section of outgoing payload. + + if ( + state == "deleted" + and ( + attach["lanAttachState"] == "OUT-OF-SYNC" + or attach["lanAttachState"] == "PENDING" + or attach["lanAttachState"] == "FAILED" + ) + ): + deployment = True + + del attach["vlanId"] + del attach["switchSerialNo"] + del attach["switchName"] + del attach["switchRole"] + del attach["lanAttachState"] + del attach["isLanAttached"] + del attach["fabricName"] + del attach["portNames"] + del attach["switchDbId"] + del attach["networkId"] + del attach["entityName"] + del attach["peerSerialNo"] + + if "displayName" in attach.keys(): + del attach["displayName"] + if "interfaceGroups" in attach.keys(): + del attach["interfaceGroups"] + + attach.update({"fabric": self.fabric}) + attach.update({"vlan": vlan}) + attach.update({"serialNumber": sn}) + attach.update({"deployment": deployment}) + attach.update({"attached": deployment}) + attach.update({"extensionValues": ""}) + attach.update({"instanceValues": ""}) + attach.update({"freeformConfig": ""}) + attach.update({"dot1QVlan": 1}) + attach.update({"detachSwitchPorts": ""}) + attach.update({"switchPorts": ports}) + attach.update({"untagged": False}) + + have_attach = net_attach_objects["DATA"] + + self.have_create = have_create + self.have_attach = have_attach + self.have_deploy = have_deploy + + def update_create_params(self, net): + """ + Update the create parameters for a network. + + Args: + net (dict): The network details. + + Returns: + dict: The updated create parameters for the network. + + """ + + if not net: + return net + + state = self.params["state"] + + n_template = net.get("net_template", "Default_Network_Universal") + ne_template = net.get( + "net_extension_template", "Default_Network_Extension_Universal" + ) + + if state == "deleted" or state == "query": + net_upd = { + "fabric": self.fabric, + "networkName": net["net_name"], + "networkId": net["net_id"], + "networkTemplate": n_template, + "networkExtensionTemplate": ne_template, + } + else: + net_upd = { + "d_key": "networkName", + "fabric": self.fabric, + "vrf": net["vrf_name"], + "networkName": net["net_name"], + "networkId": net["net_id"], + "networkTemplate": n_template, + "networkExtensionTemplate": ne_template, + } + net_upd.update({"networkTemplateConfig": net["network_template_config"]}) + + return net_upd + + def update_attach_params(self, attach, net_name): + """ + Update the attachment parameters based on the provided attachment and network name. + + Args: + attach (dict): The attachment parameters to be updated. + net_name (str): The name of the network. + + Returns: + None + + Raises: + None + """ + + hports = [] + htorlist = [] + ports = [] + state = self.params["state"] + + if state == "deleted" or state == "query": + return + + serial = "" + attach["ipAddress"] = dcnm_get_ip_addr_info( + self.module, attach["ipAddress"], None, None + ) + for ip, ser in self.ip_sn.items(): + if ip == attach["ipAddress"]: + serial = ser + + if not serial: + self.module.fail_json( + msg="Fabric: {0} does not have the switch: {1}".format( + self.fabric, attach["ipAddress"] + ) + ) + attach.update({"serialNumber": serial}) + + if attach["attached"]: + attach.update({"deployment": True}) + else: + attach.update({"deployment": False}) + + if not attach.get("fabric"): + attach.update({"fabric": self.fabric}) + + for atch_h in self.have_attach: + if net_name == atch_h["networkName"]: + hv_attach = atch_h["lanAttachList"] + for h_attach in hv_attach: + if attach["serialNumber"] == h_attach["serialNumber"]: + if attach["vlan"] == "-1": + attach.update({"vlan": h_attach["vlan"]}) + if state == "merged" and h_attach["switchPorts"]: + hports = h_attach["switchPorts"] + if state == "merged" and h_attach["torPorts"]: + htorlist = h_attach["torPorts"] + break + + wports = sorted(attach["switchPorts"]) + if wports: + if state == "merged" and hports: + if wports != hports: + wports = list(set(wports) | set(hports)) + ports = sorted(wports) + else: + ports = wports + elif state == "merged" and hports: + ports = hports + + if attach["torPorts"]: + for tor in attach["torPorts"]: + torports = {} + torlist = [] + for htor in htorlist: + if htor["switch"] == tor["switch"]: + if sorted(tor["ports"]) != sorted(htor["ports"]): + wtor_ports = list(set(tor["ports"]) | set(htor["ports"])) + else: + wtor_ports = tor["ports"] + wtor_ports = sorted(wtor_ports) + torports.update({"switch": tor["switch"]}) + torports.update({"ports": wtor_ports}) + torlist.append(torports) + torlist = sorted(torlist, key=lambda torlist: torlist['switch']) + break + if torlist: + attach.update({"torPorts": torlist}) + else: + if state == "merged" and htorlist: + attach.update({"torPorts": htorlist}) + else: + attach.update({"torPorts": ""}) + + if attach["detachSwitchPorts"]: + attach.update({"detachSwitchPorts": attach["detachSwitchPorts"]}) + else: + attach.update({"detachSwitchPorts": ""}) + + if ports: + attach.update({"switchPorts": ports}) + else: + attach.update({"switchPorts": ""}) + + attach.update({"networkName": net_name}) + attach.update({"d_key": "serialNumber"}) + + def get_want(self): + """ + Retrieves the desired configuration for creating, attaching, and deploying networks. + + Returns: + tuple: A tuple containing three lists: + - want_create: A list of dictionaries representing the parameters for creating networks. + - want_attach: A list of dictionaries representing the parameters for attaching networks. + - want_deploy: A dictionary mapping serial numbers to a list of network names to be deployed. + + Raises: + None + """ + + want_create = [] + want_attach = [] + want_deploy = {} + + state = self.params["state"] + + if not self.config: + return + + for net in self.validated: + net_attach = {} + networks = [] + + want_create.append(self.update_create_params(net)) + + if not net.get("attach") or state == "deleted" or state == "query": + continue + + for attach in net["attach"]: + self.update_attach_params(attach, net["net_name"]) + networks.append(attach) + if attach["deploy"]: + if want_deploy.get(attach["serialNumber"]): + want_deploy[attach["serialNumber"]].append(attach["networkName"]) + else: + want_deploy[attach["serialNumber"]] = [attach["networkName"]] + del attach["deploy"] + + net_attach.update({"networkName": net["net_name"]}) + net_attach.update({"lanAttachList": networks}) + want_attach.append(net_attach) + + self.want_create = want_create + self.want_attach = want_attach + self.want_deploy = want_deploy + + def wait_for_del_ready(self): + + method = "GET" + if self.diff_delete: + for net in self.diff_delete: + state = False + path = self.paths["GET_NET_ATTACH"].format(self.fabric, net) + iter = 0 + while not state: + resp = dcnm_send(self.module, method, path) + state = True + iter += 1 + if resp["DATA"]: + attach_list = resp["DATA"][0]["lanAttachList"] + for atch in attach_list: + if ( + atch["lanAttachState"] == "OUT-OF-SYNC" + or atch["lanAttachState"] == "FAILED" + ): + if iter < 10: + self.diff_delete.update({net: "DEPLOYED"}) + state = False + time.sleep(self.WAIT_TIME_FOR_DELETE_LOOP) + else: + self.diff_delete.update({net: "OUT-OF-SYNC"}) + break + if atch["lanAttachState"] != "NA": + self.diff_delete.update({net: "DEPLOYED"}) + state = False + time.sleep(self.WAIT_TIME_FOR_DELETE_LOOP) + break + self.diff_delete.update({net: "NA"}) + + return True + + def update_ms_fabric(self, diff): + if not self.is_ms_fabric: + return + + for list_elem in diff: + for node in list_elem["lanAttachList"]: + node["fabric"] = self.sn_fab[node["serialNumber"]] + + def push_to_remote_update(self, path): + """ + Pushes the Network updates to the NDFC. + + Args: + path (str): RestAPI URL. + + Returns: + None + + Raises: + None + """ + + method = "PUT" + + for net in self.diff_create_update: + update_path = path + "/{0}".format(net["networkName"]) + resp = dcnm_send(self.module, method, update_path, json.dumps(net)) + self.result["response"].append(resp) + fail, self.result["changed"] = self.handle_response(resp, "create", self.result["changed"]) + if fail: + self.failure(resp) + + def push_to_remote_detach(self, path): + """ + Pushes the detach configuration to NDFC. + + Args: + path (str): RestAPI URL. + """ + + method = "POST" + + detach_path = path + "/attachments" + + # Update the fabric name to specific fabric which the switches are part of. + self.update_ms_fabric(self.diff_detach) + + for d_a in self.diff_detach: + for v_a in d_a["lanAttachList"]: + if v_a.get("d_key") is not None: + del v_a["d_key"] + + resp = dcnm_send( + self.module, method, detach_path, json.dumps(self.diff_detach) + ) + self.result["response"].append(resp) + fail, self.result["changed"] = self.handle_response(resp, "attach", self.result["changed"]) + if fail: + self.failure(resp) + + time.sleep(10) + + def push_to_remote_undeploy(self): + """ + Pushes the undeploy NDFC network. + + Args: + None + + Returns: + None + + Raises: + None + """ + + method = "POST" + + deploy_path = "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/networks/deploy" + for value in self.diff_undeploy: + self.diff_undeploy[value] = ",".join(self.diff_undeploy[value]) + + resp = dcnm_send( + self.module, method, deploy_path, json.dumps(self.diff_undeploy) + ) + # Use the self.wait_for_del_ready() function to refresh the state + # of self.diff_delete dict and re-attempt the undeploy action if + # the state of the network is "OUT-OF-SYNC" + # self.wait_for_del_ready() + # for net, state in self.diff_delete.items(): + # if state == "OUT-OF-SYNC": + # resp = dcnm_send( + # self.module, method, deploy_path, json.dumps(self.diff_undeploy) + # ) + + self.result["response"].append(resp) + fail, self.result["changed"] = self.handle_response(resp, "deploy", self.result["changed"]) + if fail: + self.failure(resp) + + def push_to_remote_delete(self, path): + """ + Deletes networks in NDFC. + + Args: + path (str): RestAPI URL. + + Returns: + None + + Raises: + Exception: If the deletion of networks fails. + + """ + + method = "DELETE" + del_failure = "" + resp = "" + + if self.wait_for_del_ready(): + for net, state in self.diff_delete.items(): + if state == "OUT-OF-SYNC": + del_failure += net + "," + continue + delete_path = path + "/" + net + resp = dcnm_send(self.module, method, delete_path) + self.result["response"].append(resp) + fail, self.result["changed"] = self.handle_response(resp, "delete", self.result["changed"]) + if fail: + self.failure(resp) + + if del_failure: + fail_msg = "Deletion of Networks {0} has failed.".format(del_failure[:-1]) + self.failure(fail_msg) + + def push_to_remote_create(self, path): + """ + Pushes the created network templates to the NDFC. + + Args: + path (str): RestAPI URL. + + Returns: + None + + Raises: + None + """ + + method = "POST" + bulk_path = self.paths["BULK_NET"] + resp = dcnm_send(self.module, method, bulk_path, json.dumps(self.diff_create)) + self.result["response"].append(resp) + fail, self.result["changed"] = self.handle_response(resp, "create", self.result["changed"]) + if fail: + self.failure(resp) + + def push_to_remote_attach(self, path): + """ + Pushes the attachments to the NDFC. + + Args: + path (str): RestAPI URL. + + Returns: + None + + Raises: + Exception: If the attachments fail to be pushed. + """ + + method = "POST" + attach_path = path + "/attachments" + + # Update the fabric name to specific fabric which the switches are part of. + self.update_ms_fabric(self.diff_attach) + + for d_a in self.diff_attach: + for v_a in d_a["lanAttachList"]: + if v_a.get("d_key") is not None: + del v_a["d_key"] + if v_a.get("ipAddress") is not None: + del v_a["ipAddress"] + if v_a.get("attached") is not None: + del v_a["attached"] + + for attempt in range(0, 50): + resp = dcnm_send( + self.module, method, attach_path, json.dumps(self.diff_attach) + ) + update_in_progress = False + if resp.get("DATA") and isinstance(resp["DATA"], dict): + for key in resp["DATA"].keys(): + if re.search( + r"Failed.*Please try after some time", str(resp["DATA"][key]) + ): + update_in_progress = True + if update_in_progress: + time.sleep(1) + continue + break + self.result["response"].append(resp) + fail, self.result["changed"] = self.handle_response(resp, "attach", self.result["changed"]) + # If we get here and an update_in_progress is True then + # not all of the attachments were successful which represents a + # failure condition. + if fail or update_in_progress: + self.failure(resp) + + time.sleep(10) + + def push_to_remote_deploy(self): + """ + Pushes the changes to the remote deployment. + + Args: + None + + Returns: + None + + Raises: + None + """ + + method = "POST" + + deploy_path = "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/networks/deploy" + for value in self.diff_deploy: + self.diff_deploy[value] = ",".join(self.diff_deploy[value]) + resp = dcnm_send( + self.module, method, deploy_path, json.dumps(self.diff_deploy) + ) + self.result["response"].append(resp) + fail, self.result["changed"] = self.handle_response(resp, "deploy", self.result["changed"]) + if fail: + self.failure(resp) + + def push_to_remote(self): + """ + Pushes the changes to the remote device. + + Args: + None + """ + + path = self.paths["GET_NET"].format(self.fabric) + + if self.diff_create_update: + self.push_to_remote_update(path) + + # The detach and un-deploy operations are executed before the create, attach, and deploy to particularly + # address cases where a VLAN of a network being deleted is re-used on a new network being created. This is + # needed especially for state: overridden. + + if self.diff_detach: + self.push_to_remote_detach(path) + + if self.diff_undeploy: + self.push_to_remote_undeploy() + + if self.diff_delete: + self.push_to_remote_delete(path) + + if self.diff_create: + self.push_to_remote_create(path) + + if self.diff_attach: + self.push_to_remote_attach(path) + + if self.diff_deploy: + self.push_to_remote_deploy() + + def get_arg_spec(self, net): + """ + Retrieves the argument specification for a given network. + + Args: + net (dict): The network details. + + Returns: + dict: The argument specification for the network. + + """ + + template_name = net.get("net_template", False) + ext_template_name = net.get("net_extension_template", False) + net_uni_dyn_spec = {} + net_ext_dyn_spec = {} + + if self.dyn_arg_spec.get(template_name): + net_uni_dyn_spec = self.dyn_arg_spec[template_name] + else: + path = self.paths["TEMPLATE_WITH_NAME"].format(template_name) + net_uni_dyn_spec = build_arg_spec(self.module, path) + self.dyn_arg_spec.update({template_name: net_uni_dyn_spec}) + + if self.dyn_arg_spec.get(ext_template_name): + net_ext_dyn_spec = self.dyn_arg_spec[ext_template_name] + else: + path = self.paths["TEMPLATE_WITH_NAME"].format(ext_template_name) + net_ext_dyn_spec = build_arg_spec(self.module, path) + self.dyn_arg_spec.update({ext_template_name: net_ext_dyn_spec}) + + net_dyn_spec = {**net_ext_dyn_spec, **net_uni_dyn_spec} + return net_dyn_spec + + def validate_input(self): + """ + Parse the playbook values and validate them against parameter specifications. + + This method validates the input parameters provided in the playbook against the parameter specifications. + It performs validation for different network configurations and attachment configurations. + If any invalid parameters are found, it raises an exception with the details of the invalid parameters. + + Returns: + None + + Raises: + AnsibleFailJson: If any invalid parameters are found in the playbook. + + """ + + net_template = [] + state = self.params["state"] + + net_static_spec = dict( + net_name=dict(required=True, type="str", length_max=64), + net_id=dict(required=True, type="int", range_max=16777214), + vrf_name=dict(required=True, type="str", length_max=32), + net_template=dict(type="str", default="Default_Network_Universal"), + net_extension_template=dict( + type="str", default="Default_Network_Extension_Universal" + ), + network_template_config=dict(type="dict", default={}) + ) + + net_attach_spec = dict( + attached=dict(type="bool", default=True), + detachSwitchPorts=dict(type="list", default=[]), + dot1QVlan=dict(type="int", default="1"), + extensionValues=dict(type="string", default=""), + fabric=dict(type="str", default=""), + freeformConfig=dict(type="string", default=""), + ipAddress=dict(required=True, type="string"), + switchPorts=dict(type="list", default=[]), + torPorts=dict(type="list", default=[], elements="dict"), + untagged=dict(type="bool", default=False), + vlan=dict(type="int", default="-1"), + deploy=dict(type="bool", default=True), + ) + + tor_att_spec = dict( + switch=dict(required=True, type="str"), + ports=dict(required=False, type="list", default=[]), + ) + + if self.config: + msg = None + # Validate net params + valid_net, invalid_params = validate_list_of_dicts( + self.config, net_static_spec + ) + + if invalid_params: + msg = "Invalid parameters in playbook: {0}".format( + "\n".join(invalid_params) + ) + self.module.fail_json(msg=msg) + + if state == "deleted" or state == "query": + self.validated = valid_net + return + + for net in valid_net: + net_template = [] + att_present = False + net_dyn_spec = self.get_arg_spec(net) + if net.get("network_template_config"): + if net["network_template_config"].get("attach"): + valid_att, invalid_att = validate_list_of_dicts( + net["network_template_config"]["attach"], net_attach_spec + ) + invalid_params.extend(invalid_att) + att_present = True + + net_template.append(net["network_template_config"]) + valid_dyn_net, invalid_net = validate_list_of_dicts( + net_template, net_dyn_spec + ) + invalid_params.extend(invalid_net) + resolve_dependency(net_dyn_spec, valid_dyn_net[0]) + net["network_template_config"] = valid_dyn_net[0] + + if att_present: + net["attach"] = valid_att + for attach in net["attach"]: + if attach.get("switchPorts"): + attach["switchPorts"] = [port.capitalize() for port in attach["switchPorts"]] + if attach.get("torPorts"): + valid_tor, invalid_tor = validate_list_of_dicts( + attach["torPorts"], tor_att_spec + ) + invalid_params.extend(invalid_tor) + attach["torPorts"] = valid_tor + for tor in attach["torPorts"]: + if tor.get("ports"): + tor["ports"] = [port.capitalize() for port in tor["ports"]] + else: + attach["torPorts"] = [] + + self.validated.append(net) + + if invalid_params: + msg = "Invalid parameters in playbook: {0}".format( + "\n".join(invalid_params) + ) + self.module.fail_json(msg=msg) + + def format_diff(self): + """ + Formats the difference between network configurations. + + Returns: + list: A list of dictionaries representing the formatted differences. + """ + + diff = [] + + diff_create = copy.deepcopy(self.diff_create) + diff_create_update = copy.deepcopy(self.diff_create_update) + diff_attach = copy.deepcopy(self.diff_attach) + diff_detach = copy.deepcopy(self.diff_detach) + diff_deploy = copy.deepcopy(self.diff_deploy) + diff_undeploy = copy.deepcopy(self.diff_undeploy) + + diff_create.extend(diff_create_update) + diff_attach.extend(diff_detach) + + for want_d in diff_create: + + found_a = next( + ( + net + for net in diff_attach + if net["networkName"] == want_d["networkName"] + ), + None, + ) + + found_c = want_d + found_c.update({"net_name": found_c["networkName"]}) + found_c.update({"vrf_name": found_c.get("vrf", "NA")}) + found_c.update({"net_id": found_c["networkId"]}) + found_c.update({"net_template": found_c["networkTemplate"]}) + found_c.update( + {"net_extension_template": found_c["networkExtensionTemplate"]} + ) + found_c.update({"attach": []}) + + del found_c["fabric"] + del found_c["networkName"] + del found_c["networkId"] + del found_c["networkTemplate"] + del found_c["networkExtensionTemplate"] + del found_c["vrf"] + del found_c["d_key"] + + if found_a: + attach = found_a.get("lanAttachList") + for atch in attach: + for net in diff_deploy.get(atch["serialNumber"], []): + if net == found_c["net_name"]: + atch.update({"deploy": True}) + break + else: + for net in diff_undeploy.get(atch["serialNumber"], []): + if net == found_c["net_name"]: + atch.update({"deploy": False}) + break + if atch.get("d_key"): + del atch["d_key"] + if atch.get("deployment", None) is not None: + del atch["deployment"] + if atch.get("serialNumber"): + del atch["serialNumber"] + found_c["attach"].extend(attach) + + diff.append(found_c) + + self.diff_input_format = diff + + def handle_response(self, resp, op, change=False): + """ + Handles the response received from the NDFC API. + + Args: + resp (dict): The response received from the NDFC API. + op (str): The operation being performed. + + Returns: + tuple: A tuple containing two boolean values - `fail` and `changed`. + - `fail` (bool): Indicates whether the operation failed or not. + - `changed` (bool): Indicates whether the state of the system was changed. + + """ + + fail = False + changed = True + + res = resp.copy() + + if op == "query_dcnm": + # This if block handles responses to the query APIs against NDFC. + # Basically all GET operations. + # + if res.get("ERROR") == "Not Found" and res["RETURN_CODE"] == 404: + return True, False + if res["RETURN_CODE"] != 200 or res["MESSAGE"] != "OK": + return False, True + return False, False + + # Responses to all other operations POST and PUT are handled here. + if res.get("MESSAGE") != "OK" or res["RETURN_CODE"] != 200: + fail = True + changed = False + return fail, changed + if res.get("ERROR"): + fail = True + changed = False + if op == "attach" and "is in use already" in str(res.values()): + fail = True + changed = False | change + if op == "attach" and "Invalid interfaces" in str(res.values()): + fail = True + changed = True + if op == "deploy" and "No switches PENDING for deployment" in str(res.values()): + changed = False | change + + return fail, changed + + def failure(self, resp): + + self.module.fail_json(msg=resp) + + def dcnm_update_network_information(self, want, have, cfg): + """ + Update the network information based on the provided 'want' and 'have' dictionaries. + + Args: + want (dict): The dictionary containing the desired network template configuration. + have (dict): The dictionary containing the current network template configuration. + cfg (dict): The dictionary containing additional configuration options. + + Returns: + None + + Raises: + None + """ + + dict_want = want["networkTemplateConfig"] + dict_have = have["networkTemplateConfig"] + + for key in dict_want.keys(): + if cfg["network_template_config"].get(key, None) is None: + dict_want[key] = dict_have[key] + + want.update({"networkTemplateConfig": dict_want}) + + def update_want(self): + """ + Updates the 'want_create' list based on certain conditions. + + If the 'want_create' list is empty, the method returns immediately. + For each network in the 'want_create' list, the method checks if there is a matching network in the 'have_create' list. + If a match is found, the method also checks if the network is included in the 'config' list. + If both conditions are met, the method calls 'dcnm_update_network_information' to update the network information. + + Returns: + None + """ + + if self.want_create == []: + return + + for net in self.want_create: + # Get the matching have to copy values if required + match_have = [ + have + for have in self.have_create + if ((net["networkName"] == have["networkName"])) + ] + if match_have == []: + continue + + # Get the network from self.config to check if a particular object is included or not + match_cfg = [ + cfg for cfg in self.config if ((net["networkName"] == cfg["net_name"])) + ] + if match_cfg == []: + continue + + self.dcnm_update_network_information(net, match_have[0], match_cfg[0]) + + def update_module_info(self): + + """ + Routine to update version and fabric details + + Parameters: + None + + Returns: + None + """ + + self.dcnm_version = dcnm_version_supported(self.module) + self.inventory_data = get_fabric_inventory_details( + self.module, self.fabric + ) + self.ip_sn, self.hn_sn = get_ip_sn_dict(self.inventory_data) + self.ip_fab, self.sn_fab = get_ip_sn_fabric_dict(self.inventory_data) + self.fabric_det = get_fabric_details(self.module, self.fabric) + + self.is_ms_fabric = ( + True if self.fabric_det.get("fabricType") == "MFD" else False + ) + + if self.dcnm_version < 12: + self.module.fail_json( + msg="dcnm_networkv2 module is only supported on NDFC. It is not support on DCNM" + ) + else: + self.paths = self.dcnm_network_paths[12] + + +def main(): + """main entry point for module execution""" + + element_spec = dict( + fabric=dict(required=True, type="str"), + config=dict(required=False, type="list", elements="dict"), + state=dict( + default="merged", + choices=["merged", "replaced", "deleted", "overridden", "query"], + ), + ) + + module = AnsibleModule(argument_spec=element_spec, supports_check_mode=True) + + dcnm_netv2 = DcnmNetworkv2(module) + + dcnm_netv2.update_module_info() + + if not dcnm_netv2.ip_sn: + module.fail_json( + msg="Fabric {0} missing on DCNM or does not have any switches".format( + dcnm_netv2.fabric + ) + ) + + dcnm_netv2.validate_input() + dcnm_netv2.get_have() + dcnm_netv2.get_want() + + warn_msg = None + + if module.params["state"] == "merged": + dcnm_netv2.update_want() + warn_msg = dcnm_netv2.get_diff_merge() + + if module.params["state"] == "replaced": + warn_msg = dcnm_netv2.get_diff_replace() + + if module.params["state"] == "overridden": + warn_msg = dcnm_netv2.get_diff_override() + + if module.params["state"] == "deleted": + dcnm_netv2.get_diff_delete() + + if module.params["state"] == "query": + dcnm_netv2.get_diff_query() + dcnm_netv2.result["response"] = dcnm_netv2.query + + dcnm_netv2.result["warnings"].append(warn_msg) if warn_msg else [] + + if ( + dcnm_netv2.diff_create + or dcnm_netv2.diff_attach + or dcnm_netv2.diff_deploy + or dcnm_netv2.diff_delete + or dcnm_netv2.diff_create_update + or dcnm_netv2.diff_detach + or dcnm_netv2.diff_undeploy + ): + dcnm_netv2.result["changed"] = True + else: + dcnm_netv2.result["changed"] = False + module.exit_json(**dcnm_netv2.result) + + dcnm_netv2.format_diff() + dcnm_netv2.result["diff"] = dcnm_netv2.diff_input_format + + if module.check_mode: + dcnm_netv2.result["changed"] = False + module.exit_json(**dcnm_netv2.result) + + dcnm_netv2.push_to_remote() + + module.exit_json(**dcnm_netv2.result) + + +if __name__ == "__main__": + main() diff --git a/tests/integration/targets/dcnm_networkv2/defaults/main.yaml b/tests/integration/targets/dcnm_networkv2/defaults/main.yaml new file mode 100644 index 000000000..55a93fc23 --- /dev/null +++ b/tests/integration/targets/dcnm_networkv2/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" \ No newline at end of file diff --git a/tests/integration/targets/dcnm_networkv2/meta/main.yaml b/tests/integration/targets/dcnm_networkv2/meta/main.yaml new file mode 100644 index 000000000..5514b6a40 --- /dev/null +++ b/tests/integration/targets/dcnm_networkv2/meta/main.yaml @@ -0,0 +1 @@ +dependencies: [] \ No newline at end of file diff --git a/tests/integration/targets/dcnm_networkv2/tasks/dcnm.yaml b/tests/integration/targets/dcnm_networkv2/tasks/dcnm.yaml new file mode 100644 index 000000000..f5225200f --- /dev/null +++ b/tests/integration/targets/dcnm_networkv2/tasks/dcnm.yaml @@ -0,0 +1,30 @@ +--- +- name: collect dcnm test cases + find: + paths: ["{{ role_path }}/tests/dcnm"] + patterns: "{{ testcase }}.yaml" + connection: local + register: dcnm_cases + tags: sanity + +- set_fact: + test_cases: + files: "{{ dcnm_cases.files }}" + tags: sanity + +- name: set test_items + set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" + tags: sanity + +- name: run test cases (connection=httpapi) + include_tasks: "{{ test_case_to_run }}" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run + tags: sanity + +# - name: DELETED - Clean up any existing vrfs +# cisco.dcnm.dcnm_vrf: +# fabric: "{{ test_fabric }}" +# state: deleted +# tags: sanity diff --git a/tests/integration/targets/dcnm_networkv2/tasks/main.yaml b/tests/integration/targets/dcnm_networkv2/tasks/main.yaml new file mode 100644 index 000000000..452f202a4 --- /dev/null +++ b/tests/integration/targets/dcnm_networkv2/tasks/main.yaml @@ -0,0 +1,41 @@ +--- +- name: Remove all existing networks to start with a clean state + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + tags: sanity + +- name: Create vrfs required for this test and remove all other vrfs + cisco.dcnm.dcnm_vrf: + fabric: "{{ test_fabric }}" + state: overridden + config: + - vrf_name: ansible-vrf-int1 + vrf_id: 9008011 + vlan_id: 500 + attach: + - ip_address: "{{ ansible_switch1 }}" + - ip_address: "{{ ansible_switch2 }}" + deploy: true + - vrf_name: Tenant-1 + vrf_id: 9008012 + vlan_id: 501 + attach: + - ip_address: "{{ ansible_switch1 }}" + - ip_address: "{{ ansible_switch2 }}" + deploy: true + - vrf_name: Tenant-2 + vrf_id: 9008013 + vlan_id: 502 + attach: + - ip_address: "{{ ansible_switch1 }}" + - ip_address: "{{ ansible_switch2 }}" + deploy: true + tags: sanity + +# - { include: dcnm.yaml, tags: ['dcnm'] } + +- name: Include the NDFCI tasks + ansible.builtin.include_tasks: dcnm.yaml + tags: + - dcnm diff --git a/tests/integration/targets/dcnm_networkv2/tests/dcnm/deleted.yaml b/tests/integration/targets/dcnm_networkv2/tests/dcnm/deleted.yaml new file mode 100644 index 000000000..9720b4c08 --- /dev/null +++ b/tests/integration/targets/dcnm_networkv2/tests/dcnm/deleted.yaml @@ -0,0 +1,622 @@ +############################################## +## SETUP ## +############################################## + +- name: DELETED - Verify if fabric - Fabric1 is deployed. + cisco.dcnm.dcnm_rest: + method: GET + path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + register: result + +- assert: + that: + - 'result.response.DATA != None' + +- name: DELETED - setup - Clean up any existing networks + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + +- name: DELETED - Create, Attach and Deploy Single Network with multiple switch Attach + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + vlanId: 1504 + tag: 14345 + mtu: 1500 + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + vlan: 1504 + deploy: true + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + untagged: false + vlan: 1504 + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + +############################################### +### DELETED ## +############################################### + +- name: DELETED - Delete Single Network with deleted state + cisco.dcnm.dcnm_networkv2: &conf + fabric: "{{ test_fabric }}" + state: deleted + config: + - net_name: ansible-net13 + net_id: 7005 + vrf_name: Tenant-1 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + vlanId: 1504 + tag: 14345 + mtu: 1500 + register: result + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - 'result.response[0].MESSAGE == "OK"' + - 'result.response[1].MESSAGE == "OK"' + - 'result.response[2].MESSAGE == "OK"' + - 'result.response[2].METHOD == "DELETE"' + - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' + +- name: DELETED - conf - Idempotence + cisco.dcnm.dcnm_networkv2: *conf + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + - 'result.diff|length == 0' + +- name: DELETED - Create, Attach and Deploy Multiple Network with single switch Attach + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + vlanId: 1503 + tag: 14345 + mtu: 1500 + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + vlan: 1503 + deploy: true + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7002 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + tag: 14346 + mtu: 1500 + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + untagged: false + vlan: 151 + deploy: true + register: result + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + - "query_result.response[1].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- name: DELETED - Delete Single Network with deleted state and verify other network is still there + cisco.dcnm.dcnm_networkv2: &conf1 + fabric: "{{ test_fabric }}" + state: deleted + config: + - net_name: ansible-net12 + net_id: 7002 + vrf_name: Tenant-2 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + tag: 14346 + mtu: 1500 + register: result + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - 'result.response[0].MESSAGE == "OK"' + - 'result.response[1].MESSAGE == "OK"' + - 'result.response[2].MESSAGE == "OK"' + - 'result.response[2].METHOD == "DELETE"' + - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' + + +- name: DELETED - conf - Idempotence + cisco.dcnm.dcnm_networkv2: *conf1 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + - 'result.diff|length == 0' + +- name: DELETED - Delete the other Single Network with deleted state and verify no network is present now + cisco.dcnm.dcnm_networkv2: &conf2 + fabric: "{{ test_fabric }}" + state: deleted + config: + - net_name: ansible-net13 + net_id: 7005 + vrf_name: Tenant-1 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + vlanId: 1503 + tag: 14345 + mtu: 1500 + register: result + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - 'result.response[0].MESSAGE == "OK"' + - 'result.response[1].MESSAGE == "OK"' + - 'result.response[2].MESSAGE == "OK"' + - 'result.response[2].METHOD == "DELETE"' + - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' + +- name: DELETED - conf - Idempotence + cisco.dcnm.dcnm_networkv2: *conf2 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + - 'result.diff|length == 0' + +- name: DELETED - Create, Attach and Deploy Multiple Network with single switch Attach + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + vlanId: 1503 + tag: 14345 + mtu: 1500 + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + vlan: 1503 + deploy: true + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7002 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + tag: 14346 + mtu: 1500 + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + untagged: false + vlan: 151 + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + - "query_result.response[1].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + +- name: DELETED - Delete all the networks + cisco.dcnm.dcnm_networkv2: &conf3 + fabric: "{{ test_fabric }}" + state: deleted + register: result + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - 'result.response[3].RETURN_CODE == 200' + - 'result.response[0].MESSAGE == "OK"' + - 'result.response[1].MESSAGE == "OK"' + - 'result.response[2].MESSAGE == "OK"' + - 'result.response[3].MESSAGE == "OK"' + - 'result.response[3].METHOD == "DELETE"' + - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' + +- name: DELETED - conf3 - Idempotence + cisco.dcnm.dcnm_networkv2: *conf3 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + - 'result.diff|length == 0' + +- name: DELETED - Create L2 only networks along with all dhcp, arp options + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + net_id: 7009 + vrf_name: NA + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 3509 + isLayer2Only: True + suppressArp: True + intfDescription: "test interface" + vlanName: "testvlan" + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + deploy: true + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + torPorts: [] + untagged: false + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + +- name: DELETED - setup - Clean up l2_only existing network + cisco.dcnm.dcnm_networkv2: &conf4 + fabric: "{{ test_fabric }}" + state: deleted + config: + - net_name: ansible-net13 + net_id: 7009 + vrf_name: NA + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 3509 + isLayer2Only: True + suppressArp: True + intfDescription: "test interface" + vlanName: "testvlan" + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + register: result + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - 'result.response[0].MESSAGE == "OK"' + - 'result.response[1].MESSAGE == "OK"' + - 'result.response[2].MESSAGE == "OK"' + - 'result.response[2].METHOD == "DELETE"' + - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' + +- name: DELETED - conf4 - Idempotence + cisco.dcnm.dcnm_networkv2: *conf4 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + - 'result.diff|length == 0' + +- name: DELETED - Create a L2 only and L3 networks along with all dhcp, arp options + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + net_id: 7009 + vrf_name: NA + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 3509 + isLayer2Only: True + suppressArp: True + intfDescription: "test interface" + vlanName: "testvlan" + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + deploy: true + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7010 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + tag: 14346 + mtu: 1500 + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + untagged: false + vlan: 151 + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + - "query_result.response[1].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + +- name: DELETED - Delete all the networks + cisco.dcnm.dcnm_networkv2: &conf5 + fabric: "{{ test_fabric }}" + state: deleted + register: result + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - 'result.response[3].RETURN_CODE == 200' + - 'result.response[0].MESSAGE == "OK"' + - 'result.response[1].MESSAGE == "OK"' + - 'result.response[2].MESSAGE == "OK"' + - 'result.response[3].MESSAGE == "OK"' + - 'result.response[3].METHOD == "DELETE"' + - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' + +- name: DELETED - conf5 - Idempotence + cisco.dcnm.dcnm_networkv2: *conf5 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + - 'result.diff|length == 0' + +- name: DELETED - Delete Single Network with no network name + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + config: + - net_name: + net_id: 7005 + vrf_name: Tenant-2 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + register: result + ignore_errors: yes + +- assert: + that: + - 'result.changed == false' + - '"Invalid parameters in playbook: net_name : Required parameter not found" in result.msg' + +- name: DELETED - Delete Single Network with invalid network name which is not configured + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + config: + - net_name: network + net_id: 7005 + vrf_name: Tenant-2 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + register: result + ignore_errors: yes + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + +############################################### +### CLEAN-UP ## +############################################### + +- name: DELETED - setup - remove any networks + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted diff --git a/tests/integration/targets/dcnm_networkv2/tests/dcnm/merged.yaml b/tests/integration/targets/dcnm_networkv2/tests/dcnm/merged.yaml new file mode 100644 index 000000000..bedf305bf --- /dev/null +++ b/tests/integration/targets/dcnm_networkv2/tests/dcnm/merged.yaml @@ -0,0 +1,936 @@ +############################################## +## SETUP ## +############################################## + +- name: MERGED - Verify if fabric - Fabric1 is deployed. + cisco.dcnm.dcnm_rest: + method: GET + path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + register: result + +- assert: + that: + - 'result.response.DATA != None' + +- name: MERGED - setup - Clean up any existing networks + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + +############################################## +## MERGED ## +############################################## + +- name: MERGED - Create New Network without Deploy + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 1432 + vlanId: 1500 + register: result + +- name: Query fabric for creation of Network Object + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].Network.displayName is search('ansible-net13')" + - "query_result.response[0].Network.networkId is search('7005')" + - "query_result.response[0].Network.vrf is search('Tenant-1')" + retries: 5 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + +- name: MERGED - setup - Clean up any existing networks + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + +- name: MERGED - Create New Network with Attach and deploy + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + vlanId: 1501 + tag: 14345 + mtu: 1500 + vlanName: "test-vlan" + intfDescription: "test-interface" + attach: + - attached: true + dot1QVlan: 1 + extensionValues: "" + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + untagged: false + vlan: 1501 + deploy: True + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + +- name: MERGED - setup - Clean up any existing networks + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + +- name: MERGED - Create New Network with attach but no deploy + cisco.dcnm.dcnm_networkv2: &conf + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + vlanId: 1502 + tag: 14345 + mtu: 1500 + vlanName: "test-vlan" + intfDescription: "test-interface" + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + freeformConfig: "" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + vlan: 1502 + deploy: false + register: result + +- name: Query fabric state until networkStatus transitions to PENDING state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('PENDING')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + +- name: MERGED - conf - Idempotence + cisco.dcnm.dcnm_networkv2: *conf + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + +- name: MERGED - setup - Clean up any existing network + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + +- name: MERGED - Create, Attach and Deploy Multiple Network with Single Switch Attach + cisco.dcnm.dcnm_networkv2: &conf1 + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + vlanId: 1503 + tag: 14345 + mtu: 1500 + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + vlan: 1503 + deploy: true + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7002 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + tag: 14346 + mtu: 1500 + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + untagged: false + vlan: 151 + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + - "query_result.response[1].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + +- name: MERGED - conf1 - Idempotence + cisco.dcnm.dcnm_networkv2: *conf1 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + +- name: MERGED - setup - Clean up any existing network + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + +- name: MERGED - Create, Attach and Deploy Single Network with Multiple Switch Attach + cisco.dcnm.dcnm_networkv2: &conf2 + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + vlanId: 1504 + tag: 14345 + mtu: 1500 + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + vlan: 1504 + deploy: true + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + untagged: false + vlan: 1504 + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED|PENDING')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + +- name: MERGED - conf2 - Idempotence + cisco.dcnm.dcnm_networkv2: *conf2 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + +- name: MERGED - setup - Clean up any existing network + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + +- name: MERGED - Create, Attach and Deploy Multiple Network with Multiple Switch Attach - One with and other without VlanId + cisco.dcnm.dcnm_networkv2: &conf3 + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 1500 + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + deploy: true + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + torPorts: [] + untagged: false + deploy: true + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7002 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 1505 + tag: 14345 + mtu: 1500 + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int3 }}", "{{ ansible_sw1_int4 }}"] + vlan: 1506 + untagged: false + deploy: true + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int3 }}", "{{ ansible_sw2_int4 }}"] + vlan: 1507 + untagged: false + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + - "query_result.response[1].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[2].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[3].value == "SUCCESS"' + +- name: MERGED - conf3 - Idempotence + cisco.dcnm.dcnm_networkv2: *conf3 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + +- name: MERGED - setup - Clean up any existing network + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + +- name: MERGED - Create, Attach and Deploy Single Network with Multiple Switch Attach - without vrf + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + net_id: 7005 + vrf_name: NA + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 1500 + vlanId: 1508 + isLayer2Only: True + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + deploy: true + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + torPorts: [] + untagged: false + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + +- name: MERGED - setup - Clean up any existing network + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + +- name: MERGED - Create L2 only networks along with all dhcp, arp options + cisco.dcnm.dcnm_networkv2: &conf4 + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + net_id: 7009 + vrf_name: NA + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 1509 + isLayer2Only: True + suppressArp: True + intfDescription: "test interface" + vlanName: "testvlan" + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + deploy: true + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + torPorts: [] + untagged: false + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + +- name: MERGED - conf4 - Idempotence + cisco.dcnm.dcnm_networkv2: *conf4 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + +- name: MERGED - setup - Clean up any existing network + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + +- name: MERGED - Create L3 networks along with all dhcp, arp options + cisco.dcnm.dcnm_networkv2: &conf5 + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7009 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 1509 + suppressArp: false + intfDescription: "test interface" + vlanName: "testvlan" + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + deploy: true + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + torPorts: [] + untagged: false + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED|PENDING')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + +- name: MERGED - conf5 - Idempotence + cisco.dcnm.dcnm_networkv2: *conf5 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + +- name: MERGED - setup - Clean up any existing network + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + +- name: MERGED - Create L3 networks along with all dhcp, arp options without attach + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7009 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 3504 + suppressArp: false + intfDescription: "test interface" + vlanName: "testvlan" + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + register: result + +- name: Query fabric state until networkName is present + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkName is search('ansible-net13')" + retries: 5 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + +- name: MERGED - attach networks to already created network + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7009 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 3504 + suppressArp: false + intfDescription: "test interface" + vlanName: "testvlan" + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + deploy: true + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + torPorts: [] + untagged: false + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED|PENDING')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' + +- name: MERGED - Query the Network to check for configs + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response[0].parent.networkName == "ansible-net13"' + - 'result.response[0].parent.networkId == 7009' + - 'result.response[0].parent.networkTemplate == "Default_Network_Universal"' + - 'result.response[0].parent.vrf == "Tenant-1"' + - 'result.response[0].parent.networkTemplateConfig.suppressArp == "false"' + - 'result.response[0].parent.networkTemplateConfig.isLayer2Only == "false"' + - 'result.response[0].parent.networkTemplateConfig.intfDescription == "test interface"' + - 'result.response[0].parent.networkTemplateConfig.vlanName == "testvlan"' + - 'result.response[0].parent.networkTemplateConfig.mtu == "2500"' + - 'result.response[0].parent.networkTemplateConfig.vlanId == "3504"' + - 'result.response[0].attach[0].isLanAttached== true' + - 'result.response[0].attach[0].lanAttachState== "DEPLOYED"' + - 'result.response[0].attach[0].networkName== "ansible-net13"' + - '"{{ ansible_switch2 }}" or "{{ ansible_switch1 }}" in result.response[0].attach[0].ipAddress' + - 'result.response[0].attach[1].isLanAttached== true' + - 'result.response[0].attach[1].lanAttachState== "DEPLOYED"' + - 'result.response[0].attach[1].networkName== "ansible-net13"' + - '"{{ ansible_switch1 }}" or "{{ ansible_switch2 }}" in result.response[0].attach[1].ipAddress' + +- name: MERGED - setup - Clean up any existing network + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: deleted + +- name: MERGED - Create Network with invalid network name + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 3504 + suppressArp: false + intfDescription: "test interface" + vlanName: "testvlan" + register: result + ignore_errors: yes + +- assert: + that: + - 'result.changed == false' + - '"Invalid parameters in playbook: net_name : Required parameter not found" in result.msg' + +- name: MERGED - Create Network with invalid VRF name + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-10000 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 3504 + suppressArp: false + intfDescription: "test interface" + vlanName: "testvlan" + register: result + ignore_errors: yes + +- assert: + that: + - 'result.changed == false' + - '"Invalid VRF" in result.msg.DATA.failureList[0].message' + +- name: MERGED - Create Network with invalid vlan id + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 15000 + suppressArp: false + intfDescription: "test interface" + vlanName: "testvlan" + register: result + ignore_errors: yes + +- assert: + that: + - 'result.changed == false' + - '"Invalid Vlan" in result.msg.DATA.failureList[0].message' + +- name: MERGED - Create Network and deploy in invalid switch + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 3504 + suppressArp: false + intfDescription: "test interface" + vlanName: "testvlan" + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + deploy: true + register: result + ignore_errors: yes + +- assert: + that: + - 'result.changed == false' + - '"Invalid parameters in playbook: ipAddress : Required parameter not found" in result.msg' + +- name: MERGED - Create Network with out of range routing tag + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1000 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + mtu: 2500 + vlanId: 3504 + suppressArp: false + intfDescription: "test interface" + vlanName: "testvlan" + tag: 4294967296 + register: result + ignore_errors: yes + +- assert: + that: + - 'result.changed == false' + - '"tag:4294967296 : The item exceeds the allowed range of max 4294967295" in result.msg' + +- name: MERGED - Create L3 Network without a vrf name + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 3504 + suppressArp: false + intfDescription: "test interface" + vlanName: "testvlan" + register: result + ignore_errors: yes + +- assert: + that: + - 'result.changed == false' + - '"Invalid parameters in playbook: vrf_name : Required parameter not found" in result.msg' + +############################################## +## CLEAN-UP ## +############################################## + +- name: MERGED - setup - remove any networks + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted diff --git a/tests/integration/targets/dcnm_networkv2/tests/dcnm/overridden.yaml b/tests/integration/targets/dcnm_networkv2/tests/dcnm/overridden.yaml new file mode 100644 index 000000000..98f3853ec --- /dev/null +++ b/tests/integration/targets/dcnm_networkv2/tests/dcnm/overridden.yaml @@ -0,0 +1,341 @@ +############################################## +## SETUP ## +############################################## + +- name: OVERRIDDEN - Verify if fabric - Fabric1 is deployed. + cisco.dcnm.dcnm_rest: + method: GET + path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + register: result + +- assert: + that: + - 'result.response.DATA != None' + +- name: OVERRIDDEN - setup - Clean up any existing networks + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + +- name: OVERRIDDEN - Create, Attach and Deploy Multiple Network with Switch Attach + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + vlanId: 1503 + tag: 14345 + mtu: 1500 + mcastGroup: '239.1.1.1' + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + vlan: 1503 + deploy: true + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + untagged: false + vlan: 1504 + deploy: true + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7002 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + tag: 14346 + mtu: 1500 + mcastGroup: '239.10.11.1' + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int3 }}", "{{ ansible_sw2_int4 }}"] + untagged: false + vlan: 151 + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + - "query_result.response[1].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + +############################################## +## OVERRIDDEN ## +############################################## + +- name: OVERRIDDEN - Create, Attach and Deploy Network with Single Switch Attach + cisco.dcnm.dcnm_networkv2: &conf1 + fabric: "{{ test_fabric }}" + state: overridden + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + vlanId: 1503 + tag: 14345 + mtu: 150 + mcastGroup: '239.1.1.1' + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int3 }}", "{{ ansible_sw1_int4 }}"] + torPorts: [] + untagged: false + vlan: 1503 + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - 'result.response[3].RETURN_CODE == 200' + - 'result.response[4].RETURN_CODE == 200' + - 'result.response[5].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[4].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[4].DATA|dict2items)[1].value == "SUCCESS"' + - 'result.response[3].METHOD == "DELETE"' + +- name: OVERRIDDEN - conf1 - Idempotence + cisco.dcnm.dcnm_networkv2: *conf1 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + +- name: OVERRIDDEN - setup - remove any networks + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + +- name: OVERRIDDEN- Create, Attach and Deploy L2. L3 Network with Switch Attach + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: NA + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 150 + isLayer2Only: True + suppressArp: True + intfDescription: "test interface" + vlanName: "testvlan" + mcastGroup: '239.1.1.1' + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + deploy: true + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + torPorts: [] + untagged: false + deploy: true + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7002 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + tag: 14346 + mtu: 1500 + mcastGroup: '239.11.10.1' + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int3 }}", "{{ ansible_sw2_int4 }}"] + untagged: false + vlan: 151 + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + - "query_result.response[1].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + +- name: OVERRIDDEN - Override L2, L3 Networks with a new L2 network + cisco.dcnm.dcnm_networkv2: &conf2 + fabric: "{{ test_fabric }}" + state: overridden + config: + - net_name: ansible-net14 + vrf_name: NA + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 150 + isLayer2Only: True + suppressArp: True + intfDescription: "test interface" + vlanName: "testvlan" + mcastGroup: '239.1.1.1' + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + torPorts: [] + untagged: false + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - 'result.response[3].RETURN_CODE == 200' + - 'result.response[4].RETURN_CODE == 200' + - 'result.response[5].RETURN_CODE == 200' + - 'result.response[6].RETURN_CODE == 200' + - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - '(result.response[0].DATA|dict2items)[2].value == "SUCCESS"' + - '(result.response[5].DATA|dict2items)[0].value == "SUCCESS"' + - 'result.response[2].METHOD == "DELETE"' + - 'result.response[3].METHOD == "DELETE"' + +- name: OVERRIDDEN - conf2 - Idempotence + cisco.dcnm.dcnm_networkv2: *conf2 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + +- name: OVERRIDDEN - Check for networks in fabric + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: query + register: result + +- assert: + that: + - 'result.response|length == 1' + - 'result.response[0].Network.displayName == "ansible-net14"' + +############################################## +## CLEAN-UP ## +############################################## + +- name: OVERRIDDEN - setup - remove any networks + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted diff --git a/tests/integration/targets/dcnm_networkv2/tests/dcnm/query.yaml b/tests/integration/targets/dcnm_networkv2/tests/dcnm/query.yaml new file mode 100644 index 000000000..38f1fae5d --- /dev/null +++ b/tests/integration/targets/dcnm_networkv2/tests/dcnm/query.yaml @@ -0,0 +1,417 @@ +############################################## +## SETUP ## +############################################## + +- name: Verify if fabric - Fabric is deployed. + cisco.dcnm.dcnm_rest: + method: GET + path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + register: result + +- assert: + that: + - 'result.response.DATA != None' + +- name: Setup - Clean up any existing networks + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + register: result + until: + - "result.response | length == 0" + retries: 30 + delay: 2 + +- name: Create, Attach and Deploy Multiple Network with Single Switch Attach + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + vlanId: 1501 + tag: 14345 + mtu: 1500 + vlanName: "test-vlan" + intfDescription: "test-interface" + attach: + - attached: true + dot1QVlan: 1 + extensionValues: "" + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + untagged: false + vlan: 1501 + deploy: True + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7002 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + tag: 14346 + mtu: 1500 + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + untagged: false + vlan: 151 + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + - "query_result.response[1].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + +############################################### +### QUERY ## +############################################### + +- name: QUERY - Query the Network + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: query + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + vlanId: 1501 + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7002 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response[0].Network.networkName == "ansible-net13"' + - 'result.response[0].Network.networkId | regex_search("700[2|5]", ignorecase=True)' + - 'result.response[0].Network.networkTemplate == "Default_Network_Universal"' + - 'result.response[0].Network.vrf | regex_search("Tenant-[1|2]", ignorecase=True)' + - 'result.response[0].attach[0].isLanAttached== true' + - 'result.response[0].attach[0].lanAttachState== "DEPLOYED"' + - 'result.response[0].attach[0].networkName== "ansible-net13"' + - 'result.response[0].attach[1].isLanAttached== false' + - 'result.response[0].attach[1].lanAttachState== "NA"' + - 'result.response[0].attach[1].networkName== "ansible-net13"' + - 'result.response[1].Network.networkName == "ansible-net12"' + - 'result.response[1].Network.networkId | regex_search("700[2|5]", ignorecase=True)' + - 'result.response[1].Network.networkTemplate == "Default_Network_Universal"' + - 'result.response[1].Network.vrf | regex_search("Tenant-[1|2]", ignorecase=True)' + - 'result.response[1].attach[0].isLanAttached== true' + - 'result.response[1].attach[0].lanAttachState== "DEPLOYED"' + - 'result.response[1].attach[0].networkName== "ansible-net12"' + - 'result.response[1].attach[1].isLanAttached== false' + - 'result.response[1].attach[1].lanAttachState== "NA"' + - 'result.response[1].attach[1].networkName== "ansible-net12"' + +- name: QUERY - Query the Network without the config element + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: query + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response[0].Network.networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + - 'result.response[0].Network.networkId | regex_search("700[2|5]", ignorecase=True)' + - 'result.response[0].Network.networkTemplate == "Default_Network_Universal"' + - 'result.response[0].Network.vrf | regex_search("Tenant-[1|2]", ignorecase=True)' + - 'result.response[0].attach[0].isLanAttached== true' + - 'result.response[0].attach[0].lanAttachState== "DEPLOYED"' + - 'result.response[0].attach[0].networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + - 'result.response[0].attach[1].isLanAttached== false' + - 'result.response[0].attach[1].lanAttachState== "NA"' + - 'result.response[0].attach[1].networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + - 'result.response[1].Network.networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + - 'result.response[1].Network.networkId | regex_search("700[2|5]", ignorecase=True)' + - 'result.response[1].Network.networkTemplate == "Default_Network_Universal"' + - 'result.response[1].Network.vrf | regex_search("Tenant-[1|2]", ignorecase=True)' + - 'result.response[1].attach[0].isLanAttached== true' + - 'result.response[1].attach[0].lanAttachState== "DEPLOYED"' + - 'result.response[1].attach[0].networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + - 'result.response[1].attach[1].isLanAttached== false' + - 'result.response[1].attach[1].lanAttachState== "NA"' + - 'result.response[1].attach[1].networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + + +- name: Delete all the networks + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + register: result + +- name: Query fabric state until all networks are deleted + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response | length == 0" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - 'result.response[3].RETURN_CODE == 200' + - 'result.response[0].MESSAGE == "OK"' + - 'result.response[1].MESSAGE == "OK"' + - 'result.response[2].MESSAGE == "OK"' + - 'result.response[3].MESSAGE == "OK"' + - 'result.response[3].METHOD == "DELETE"' + - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' + +- name: QUERY - Query the non available Network + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: query + config: + - net_name: ansible-net13 + vrf_name: Tenant-2 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + vlanId: 1501 + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7002 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + +- name: Create a L2 only and L3 networks along with all dhcp, arp options + cisco.dcnm.dcnm_networkv2: &conf3 + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + net_id: 7009 + vrf_name: NA + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 3509 + isLayer2Only: True + suppressArp: True + intfDescription: "test interface" + vlanName: "testvlan" + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + deploy: true + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7010 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + tag: 14346 + mtu: 1500 + vlanName: "testvlan1" + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + untagged: false + vlan: 151 + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + - "query_result.response[1].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + +- name: QUERY - Query the L2 and L3 Network + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: query + config: + - net_name: ansible-net13 + net_id: 7009 + vrf_name: NA + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 3509 + isLayer2Only: True + suppressArp: True + intfDescription: "test interface" + vlanName: "testvlan" + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + - net_name: ansible-net12 + net_id: 7010 + vrf_name: Tenant-2 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + tag: 14346 + mtu: 1500 + vlanName: "testvlan1" + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response[0].Network.networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + - 'result.response[0].Network.networkId | regex_search("70[0|1][0|9]", ignorecase=True)' + - 'result.response[0].Network.networkTemplate == "Default_Network_Universal"' + - 'result.response[0].Network.vrf == "NA"' + - 'result.response[0].Network.networkTemplateConfig.suppressArp == "true"' + - 'result.response[0].Network.networkTemplateConfig.isLayer2Only == "true"' + - 'result.response[0].Network.networkTemplateConfig.intfDescription == "test interface"' + - 'result.response[0].Network.networkTemplateConfig.vlanName == "testvlan"' + - 'result.response[0].attach[0].isLanAttached== true' + - 'result.response[0].attach[0].lanAttachState== "DEPLOYED"' + - 'result.response[0].attach[0].networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + - 'result.response[0].attach[1].isLanAttached== false' + - 'result.response[0].attach[1].lanAttachState== "NA"' + - 'result.response[0].attach[1].networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + - 'result.response[1].Network.networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + - 'result.response[1].Network.networkId | regex_search("70[0|1][0|9]", ignorecase=True)' + - 'result.response[1].Network.networkTemplate == "Default_Network_Universal"' + - 'result.response[1].Network.vrf | regex_search("Tenant-[1|2]", ignorecase=True)' + - 'result.response[1].Network.networkTemplateConfig.isLayer2Only == "false"' + - 'result.response[1].Network.networkTemplateConfig.vlanName == "testvlan1"' + - 'result.response[1].attach[0].isLanAttached== true' + - 'result.response[1].attach[0].lanAttachState== "DEPLOYED"' + - 'result.response[1].attach[0].networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + - 'result.response[1].attach[1].isLanAttached== false' + - 'result.response[1].attach[1].lanAttachState== "NA"' + - 'result.response[1].attach[1].networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + +- name: QUERY - Query L2 and L3 the Network without the config element + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: query + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response[0].Network.networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + - 'result.response[0].Network.networkId | regex_search("70[0|1][0|9]", ignorecase=True)' + - 'result.response[0].Network.networkTemplate == "Default_Network_Universal"' + - 'result.response[0].Network.vrf == "NA"' + - 'result.response[0].Network.networkTemplateConfig.suppressArp == "true"' + - 'result.response[0].Network.networkTemplateConfig.isLayer2Only == "true"' + - 'result.response[0].Network.networkTemplateConfig.intfDescription == "test interface"' + - 'result.response[0].Network.networkTemplateConfig.vlanName == "testvlan"' + - 'result.response[0].attach[0].isLanAttached== true' + - 'result.response[0].attach[0].lanAttachState== "DEPLOYED"' + - 'result.response[0].attach[0].networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + - 'result.response[0].attach[1].isLanAttached== false' + - 'result.response[0].attach[1].lanAttachState== "NA"' + - 'result.response[0].attach[1].networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + - 'result.response[1].Network.networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + - 'result.response[1].Network.networkId | regex_search("70[0|1][0|9]", ignorecase=True)' + - 'result.response[1].Network.networkTemplate == "Default_Network_Universal"' + - 'result.response[1].Network.vrf | regex_search("Tenant-[1|2]", ignorecase=True)' + - 'result.response[1].Network.networkTemplateConfig.isLayer2Only == "false"' + - 'result.response[1].Network.networkTemplateConfig.vlanName == "testvlan1"' + - 'result.response[1].attach[0].isLanAttached== true' + - 'result.response[1].attach[0].lanAttachState== "DEPLOYED"' + - 'result.response[1].attach[0].networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + - 'result.response[1].attach[1].isLanAttached== false' + - 'result.response[1].attach[1].lanAttachState== "NA"' + - 'result.response[1].attach[1].networkName | regex_search("ansible-net1[2|3]", ignorecase=True)' + +############################################### +### CLEAN-UP ## +############################################### + +- name: QUERY - setup - remove any networks + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted diff --git a/tests/integration/targets/dcnm_networkv2/tests/dcnm/replaced.yaml b/tests/integration/targets/dcnm_networkv2/tests/dcnm/replaced.yaml new file mode 100644 index 000000000..a71132f53 --- /dev/null +++ b/tests/integration/targets/dcnm_networkv2/tests/dcnm/replaced.yaml @@ -0,0 +1,503 @@ +############################################## +## SETUP ## +############################################## + +- name: REPLACED - Verify if fabric - Fabric1 is deployed. + cisco.dcnm.dcnm_rest: + method: GET + path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + register: result + +- assert: + that: + - 'result.response.DATA != None' + +- name: REPLACED - setup - Clean up any existing networks + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted + +- name: REPLACED - Create, Attach and Deploy Multiple Network with Switch Attach + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + vlanId: 1503 + tag: 14345 + mtu: 1500 + mcastGroup: '239.1.1.1' + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + vlan: 1503 + deploy: true + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + untagged: false + vlan: 1504 + deploy: true + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7002 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + tag: 14346 + mtu: 1500 + mcastGroup: '239.21.21.1' + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int3 }}", "{{ ansible_sw2_int4 }}"] + untagged: false + vlan: 151 + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + - "query_result.response[1].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + +############################################## +## REPLACED ## +############################################## + +- name: REPLACED - Update Network using replace - Delete Attachments + cisco.dcnm.dcnm_networkv2: &conf1 + fabric: "{{ test_fabric }}" + state: replaced + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + vlanId: 1503 + mtu: 1500 + mcastGroup: '239.1.1.1' + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7002 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + tag: 14346 + mtu: 1500 + mcastGroup: '239.21.21.1' + register: result + +- name: Query fabric state until networkStatus transitions to NA state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('NA')" + - "query_result.response[1].parent.networkStatus is search('NA')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - '(result.response[0].DATA|dict2items)[2].value == "SUCCESS"' + +- name: REPLACED - conf1 - Idempotence + cisco.dcnm.dcnm_networkv2: *conf1 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + +- name: REPLACED - Update Network using replace - Create Attachments + cisco.dcnm.dcnm_networkv2: &conf2 + fabric: "{{ test_fabric }}" + state: replaced + config: + - net_name: ansible-net13 + vrf_name: Tenant-1 + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + vlanId: 1503 + tag: 14345 + mtu: 1500 + mcastGroup: '239.1.1.1' + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + vlan: 1503 + deploy: true + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + untagged: false + vlan: 1504 + deploy: true + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7002 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + tag: 14346 + mtu: 1500 + mcastGroup: '239.21.21.1' + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int3 }}", "{{ ansible_sw2_int4 }}"] + untagged: false + vlan: 151 + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + - "query_result.response[1].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - '(result.response[0].DATA|dict2items)[2].value == "SUCCESS"' + +- name: REPLACED - conf2 - Idempotence + cisco.dcnm.dcnm_networkv2: *conf2 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + +- name: REPLACED - setup - remove any networks + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: deleted + +- name: REPLACED - Create, Attach and Deploy L2, L3 Network with Switch Attach + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: merged + config: + - net_name: ansible-net13 + vrf_name: NA + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 150 + isLayer2Only: True + suppressArp: True + intfDescription: "test interface" + vlanName: "testvlan" + mcastGroup: '239.1.1.1' + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + deploy: true + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + torPorts: [] + untagged: false + deploy: true + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7002 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + tag: 14346 + mtu: 1500 + mcastGroup: '239.21.21.1' + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int3 }}", "{{ ansible_sw2_int4 }}"] + untagged: false + vlan: 151 + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + - "query_result.response[1].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - 'result.response[2].RETURN_CODE == 200' + - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - '(result.response[1].DATA|dict2items)[2].value == "SUCCESS"' + +- name: REPLACED - Update L2, L3 Networks using replace - Delete Attachments + cisco.dcnm.dcnm_networkv2: &conf3 + fabric: "{{ test_fabric }}" + state: replaced + config: + - net_name: ansible-net13 + vrf_name: NA + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 150 + isLayer2Only: True + suppressArp: True + intfDescription: "test interface" + vlanName: "testvlan" + mcastGroup: '239.1.1.1' + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7002 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + tag: 14346 + mtu: 1500 + mcastGroup: '239.21.21.1' + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + register: result + +- name: Query fabric state until networkStatus transitions to NA state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('NA')" + - "query_result.response[1].parent.networkStatus is search('NA')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - '(result.response[0].DATA|dict2items)[2].value == "SUCCESS"' + +- name: REPLACED - conf3 - Idempotence + cisco.dcnm.dcnm_networkv2: *conf3 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + +- name: REPLACED - Update L2, L3 Networks using replace - Create Attachments + cisco.dcnm.dcnm_networkv2: &conf4 + fabric: "{{ test_fabric }}" + state: replaced + config: + - net_name: ansible-net13 + vrf_name: NA + net_id: 7005 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.30.1/24' + tag: 14345 + mtu: 2500 + vlanId: 150 + isLayer2Only: True + suppressArp: True + intfDescription: "test interface" + vlanName: "testvlan" + mcastGroup: '239.1.1.1' + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch1 }}" + switchPorts: ["{{ ansible_sw1_int1 }}", "{{ ansible_sw1_int2 }}"] + torPorts: [] + untagged: false + deploy: true + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int1 }}", "{{ ansible_sw2_int2 }}"] + torPorts: [] + untagged: false + deploy: true + - net_name: ansible-net12 + vrf_name: Tenant-2 + net_id: 7002 + net_template: Default_Network_Universal + net_extension_template: Default_Network_Extension_Universal + network_template_config: + gatewayIpAddress: '192.168.40.1/24' + vlanId: 151 + tag: 14346 + mtu: 1500 + mcastGroup: '239.21.21.1' + dhcpServers: {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}, {"srvrAddr":"2.2.2.2", "srvrVrf":"vrf2"}]} + attach: + - attached: true + dot1QVlan: 1 + fabric: "{{ test_fabric }}" + instanceValues: "" + ipAddress: "{{ ansible_switch2 }}" + switchPorts: ["{{ ansible_sw2_int3 }}", "{{ ansible_sw2_int4 }}"] + untagged: false + vlan: 151 + deploy: true + register: result + +- name: Query fabric state until networkStatus transitions to DEPLOYED state + cisco.dcnm.dcnm_network: + fabric: "{{ test_fabric }}" + state: query + register: query_result + until: + - "query_result.response[0].parent.networkStatus is search('DEPLOYED')" + - "query_result.response[1].parent.networkStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- assert: + that: + - 'result.changed == true' + - 'result.response[0].RETURN_CODE == 200' + - 'result.response[1].RETURN_CODE == 200' + - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - '(result.response[0].DATA|dict2items)[2].value == "SUCCESS"' + +- name: REPLACED - conf4 - Idempotence + cisco.dcnm.dcnm_networkv2: *conf4 + register: result + +- assert: + that: + - 'result.changed == false' + - 'result.response|length == 0' + +############################################## +## CLEAN-UP ## +############################################## + +- name: REPLACED - setup - remove any networks + cisco.dcnm.dcnm_networkv2: + fabric: "{{ test_fabric }}" + state: deleted diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 60d9043d3..be72266d7 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -15,6 +15,7 @@ plugins/modules/dcnm_links.py validate-modules:missing-gplv3-license # GPLv3 lic plugins/modules/dcnm_vpc_pair.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_image_upload.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_fabric.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module +plugins/modules/dcnm_networkv2.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_maintenance_mode.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_rest.py import-2.6!skip plugins/modules/dcnm_rest.py import-2.7!skip diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 4723c583b..351cf2724 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -15,6 +15,7 @@ plugins/modules/dcnm_links.py validate-modules:missing-gplv3-license # GPLv3 lic plugins/modules/dcnm_vpc_pair.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_image_upload.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_fabric.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module +plugins/modules/dcnm_networkv2.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_maintenance_mode.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_rest.py import-2.6!skip plugins/modules/dcnm_rest.py import-2.7!skip diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 334160f16..0c7a5f4ed 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -15,6 +15,7 @@ plugins/modules/dcnm_links.py validate-modules:missing-gplv3-license # GPLv3 lic plugins/modules/dcnm_vpc_pair.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_image_upload.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_fabric.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module +plugins/modules/dcnm_networkv2.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_maintenance_mode.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_rest.py import-2.6!skip plugins/modules/dcnm_rest.py import-2.7!skip diff --git a/tests/sanity/ignore-2.13.txt b/tests/sanity/ignore-2.13.txt index b535a3144..41740c64c 100644 --- a/tests/sanity/ignore-2.13.txt +++ b/tests/sanity/ignore-2.13.txt @@ -16,6 +16,7 @@ plugins/modules/dcnm_image_upload.py validate-modules:missing-gplv3-license # GP plugins/httpapi/dcnm.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_vpc_pair.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_fabric.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module +plugins/modules/dcnm_networkv2.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_maintenance_mode.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_rest.py import-2.7!skip plugins/httpapi/dcnm.py import-3.8!skip diff --git a/tests/sanity/ignore-2.14.txt b/tests/sanity/ignore-2.14.txt index 15705d33b..0cde9a534 100644 --- a/tests/sanity/ignore-2.14.txt +++ b/tests/sanity/ignore-2.14.txt @@ -16,6 +16,7 @@ plugins/modules/dcnm_image_upload.py validate-modules:missing-gplv3-license # GP plugins/httpapi/dcnm.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_vpc_pair.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_fabric.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module +plugins/modules/dcnm_networkv2.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_maintenance_mode.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_rest.py import-2.7!skip plugins/httpapi/dcnm.py import-3.9!skip diff --git a/tests/sanity/ignore-2.15.txt b/tests/sanity/ignore-2.15.txt index 15705d33b..88bd27f1d 100644 --- a/tests/sanity/ignore-2.15.txt +++ b/tests/sanity/ignore-2.15.txt @@ -16,7 +16,8 @@ plugins/modules/dcnm_image_upload.py validate-modules:missing-gplv3-license # GP plugins/httpapi/dcnm.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_vpc_pair.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_fabric.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module +plugins/modules/dcnm_networkv2.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_maintenance_mode.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_rest.py import-2.7!skip plugins/httpapi/dcnm.py import-3.9!skip -plugins/httpapi/dcnm.py import-3.10!skip +plugins/httpapi/dcnm.py import-3.10!skip \ No newline at end of file diff --git a/tests/sanity/ignore-2.16.txt b/tests/sanity/ignore-2.16.txt index 20cfc7582..e5310a225 100644 --- a/tests/sanity/ignore-2.16.txt +++ b/tests/sanity/ignore-2.16.txt @@ -16,4 +16,5 @@ plugins/modules/dcnm_image_upload.py validate-modules:missing-gplv3-license # GP plugins/httpapi/dcnm.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_vpc_pair.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_fabric.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module +plugins/modules/dcnm_networkv2.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_maintenance_mode.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 60d9043d3..be72266d7 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -15,6 +15,7 @@ plugins/modules/dcnm_links.py validate-modules:missing-gplv3-license # GPLv3 lic plugins/modules/dcnm_vpc_pair.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_image_upload.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_fabric.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module +plugins/modules/dcnm_networkv2.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_maintenance_mode.py validate-modules:missing-gplv3-license # GPLv3 license header not found in the first 20 lines of the module plugins/modules/dcnm_rest.py import-2.6!skip plugins/modules/dcnm_rest.py import-2.7!skip diff --git a/tests/unit/modules/dcnm/fixtures/dcnm_networkv2/dcnm_networkv2_common.py b/tests/unit/modules/dcnm/fixtures/dcnm_networkv2/dcnm_networkv2_common.py new file mode 100644 index 000000000..f28f276f8 --- /dev/null +++ b/tests/unit/modules/dcnm/fixtures/dcnm_networkv2/dcnm_networkv2_common.py @@ -0,0 +1,55 @@ +# Copyright (c) 2024 Cisco and/or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import pytest +from ansible_collections.ansible.netcommon.tests.unit.modules.utils import ( + AnsibleFailJson, +) + +from ansible_collections.cisco.dcnm.plugins.modules.dcnm_networkv2 import ( + DcnmNetworkv2, +) + + +class MockAnsibleModule: + """ + Mock the AnsibleModule class + """ + + params = { + "config": [], + "state": "merged", + "fabric": "test_netv2", + } + supports_check_mode = True + + @staticmethod + def fail_json(msg, **kwargs) -> AnsibleFailJson: + """ + mock the fail_json method + """ + raise AnsibleFailJson(msg, kwargs) + + +@pytest.fixture(name="dcnm_networkv2_fixture") +def dcnm_networkv2_fixture(monkeypatch): + """ + mock DcnmNetworkv2 + """ + + return DcnmNetworkv2(MockAnsibleModule) diff --git a/tests/unit/modules/dcnm/fixtures/dcnm_networkv2/dcnm_networkv2_data.json b/tests/unit/modules/dcnm/fixtures/dcnm_networkv2/dcnm_networkv2_data.json new file mode 100644 index 000000000..4e65cf9b9 --- /dev/null +++ b/tests/unit/modules/dcnm/fixtures/dcnm_networkv2/dcnm_networkv2_data.json @@ -0,0 +1,962 @@ +{ + "networkv2_want": + { + "d_key": "networkName", + "fabric": "test_netv2", + "vrf": "Tenant-1", + "networkName": "net1", + "networkId": 31001, + "networkTemplate": "Default_Network_Universal", + "networkExtensionTemplate": "Default_Network_Extension_Universal", + "networkTemplateConfig": { + "gatewayIpAddress": "2.1.1.1/24", + "vlanId": 1001, + "nveId": "1", + "gatewayIpV6Address": "", + "isLayer2Only": "false", + "suppressArp": "", + "mcastGroup": "", + "dhcpServers": "", + "loopbackId": 10, + "vlanName": "", + "intfDescription": "test_interface", + "tag": "12345", + "mtu": "1500", + "trmEnabled": "", + "secondaryGW1": "3.1.1.1/24", + "secondaryGW2": "", + "secondaryGW3": "", + "secondaryGW4": "", + "rtBothAuto": "false", + "ENABLE_NETFLOW": "false", + "enableL3OnBorder": "true", + "MULTISITE_CONN": "" + } + }, + + "networkv2_have_00001": + { + "id": 743, + "fabric": "test_netv2", + "networkName": "net1", + "networkId": 31001, + "networkTemplate": "Default_Network_Universal", + "networkExtensionTemplate": "Default_Network_Extension_Universal", + "vrf": "Tenant-1", + "networkTemplateConfig": { + "suppressArp": "", + "secondaryGW3": "", + "secondaryGW2": "", + "loopbackId": "10", + "secondaryGW1": "3.1.1.1/24", + "enableL3OnBorder": "true", + "type": "Normal", + "SVI_NETFLOW_MONITOR": "", + "enableIR": "false", + "rtBothAuto": "false", + "isLayer2Only": "false", + "MULTISITE_CONN": "", + "ENABLE_NETFLOW": "false", + "dhcpServerAddr3": "", + "gatewayIpV6Address": "", + "dhcpServerAddr2": "", + "tag": "12345", + "nveId": "1", + "vrfDhcp": "", + "secondaryGW4": "", + "vlanId": "1001", + "gatewayIpAddress": "2.1.1.1/24", + "vlanName": "", + "mtu": "1800", + "intfDescription": "test_interface", + "mcastGroup": "239.11.11.1", + "igmpVersion": "2", + "trmEnabled": "", + "VLAN_NETFLOW_MONITOR": "", + "dhcpServers": "" + } + }, + + "networkv2_have_00002": + { + "id": 743, + "fabric": "test_netv2", + "networkName": "net2", + "networkId": 31002, + "networkTemplate": "Default_Network_Universal", + "networkExtensionTemplate": "Default_Network_Extension_Universal", + "vrf": "Tenant-1", + "networkTemplateConfig": { + "suppressArp": "", + "secondaryGW3": "", + "secondaryGW2": "", + "loopbackId": "20", + "secondaryGW1": "3.1.1.2/24", + "enableL3OnBorder": "true", + "type": "Normal", + "SVI_NETFLOW_MONITOR": "", + "enableIR": "false", + "rtBothAuto": "false", + "isLayer2Only": "false", + "MULTISITE_CONN": "", + "ENABLE_NETFLOW": "false", + "dhcpServerAddr3": "", + "gatewayIpV6Address": "", + "dhcpServerAddr2": "", + "tag": "12342", + "nveId": "1", + "vrfDhcp": "", + "secondaryGW4": "", + "vlanId": "1002", + "gatewayIpAddress": "2.1.1.2/24", + "vlanName": "", + "mtu": "1500", + "intfDescription": "test_interface", + "mcastGroup": "239.11.11.2", + "igmpVersion": "2", + "trmEnabled": "", + "VLAN_NETFLOW_MONITOR": "", + "dhcpServers": "" + } + }, + + "networkv2_cfg_00001": + { + "net_name": "net1", + "vrf_name": "Tenant-1", + "net_id": 31001, + "net_template": "Default_Network_Universal", + "net_extension_template": "Default_Network_Extension_Universal", + "network_template_config": { + "gatewayIpAddress": "2.1.1.1/24", + "vlanId": 1001, + "intfDescription": "test_interface", + "secondaryGW1": "3.1.1.1/24", + "loopbackId": 10, + "enableL3OnBorder": true + } + }, + + "networkv2_cfg_00002": + { + "net_name": "net1", + "vrf_name": "Tenant-1", + "net_id": 31001, + "net_template": "Default_Network_Universal", + "net_extension_template": "Default_Network_Extension_Universal", + "network_template_config": { + "gatewayIpAddress": "2.1.1.1/24", + "vlanId": 1001, + "intfDescription": "test_interface", + "mtu": 1800, + "secondaryGW1": "3.1.1.1/24", + "loopbackId": 10, + "enableL3OnBorder": true, + "dhcpServers": {"dhcpServers":[{"srvrAddr":"1.1.1.1", "srvrVrf":"vrf1"}]}, + "attach": [ + { + "ipAddress": "192.168.2.1", + "switchPorts": ["Ethernet1/10", "Ethernet1/11"], + "torPorts": [ + { + "switch": "Tor1", + "ports": ["Ethernet1/10", "Ethernet1/11"] + } + ], + "deploy": true + } + ] + } + }, + + "networkv2_diff_create_00001": + [ + { + "d_key": "networkName", + "fabric": "test_netv2", + "vrf": "Tenant-1", + "networkName": "net1", + "networkId": 31001, + "networkTemplate": "Default_Network_Universal", + "networkExtensionTemplate": "Default_Network_Extension_Universal", + "networkTemplateConfig": { + "gatewayIpAddress": "2.1.1.1/24", + "vlanId": 1001, + "nveId": "1", + "gatewayIpV6Address": "", + "isLayer2Only": "false", + "suppressArp": "", + "mcastGroup": "", + "dhcpServers": { + "dhcpServers": [ + { + "srvrAddr": "1.1.1.1", + "srvrVrf": "vrf1" + } + ] + }, + "loopbackId": 10, + "vlanName": "", + "intfDescription": "test_interface", + "mtu": 1800, + "tag": "12345", + "trmEnabled": "", + "secondaryGW1": "3.1.1.1/24", + "secondaryGW2": "", + "secondaryGW3": "", + "secondaryGW4": "", + "rtBothAuto": "false", + "ENABLE_NETFLOW": "false", + "enableL3OnBorder": "false", + "MULTISITE_CONN": "" + } + } + ], + + "networkv2_diff_attach_00001": + [ + { + "networkName": "net1", + "lanAttachList": [ + { + "attached": true, + "detachSwitchPorts": "", + "dot1QVlan": "1", + "extensionValues": "", + "fabric": "test_netv2", + "freeformConfig": "", + "ipAddress": "192.168.2.1", + "switchPorts": "Ethernet1/10,Ethernet1/11", + "torPorts": "", + "untagged": false, + "vlan": "-1", + "serialNumber": "9SFRKD0M6AS", + "deployment": true, + "networkName": "net1", + "d_key": "serialNumber" + } + ] + } + ], + + "networkv2_diff_deploy_00001": + { + "9SFRKD0M6AS": [ + "net1" + ] + }, + + "networkv2_want_deploy_00001": + { + "9SFRKD0M6AS": [ + "net1" + ] + }, + + "networkv2_have_deploy_00001": + { + "9SFRKD0M6AS": [ + "net1" + ] + }, + + "networkv2_diff_create_update_00001": [], + + "networkv2_diff_undeploy_00001": [], + + "networkv2_diff_detach_00001": [], + + "networkv2_diff_attach_00002":[], + + "dyn_arg_spec": + { + "Default_Network_Universal": { + "gatewayIpAddress": { + "type": "ipV4AddressWithSubnet", + "required": false, + "default": "" + }, + "vlanId": { + "type": "integer", + "required": false, + "default": "" + }, + "nveId": { + "type": "integer", + "required": false, + "default": "1" + }, + "gatewayIpV6Address": { + "type": "string[]", + "required": false, + "default": "" + }, + "isLayer2Only": { + "type": "boolean", + "required": false, + "default": "false" + }, + "suppressArp": { + "type": "boolean", + "required": false, + "default": "" + }, + "mcastGroup": { + "type": "ipV4Address", + "required": false, + "default": "" + }, + "dhcpServers": { + "type": "structureArray", + "required": false, + "default": "" + }, + "loopbackId": { + "type": "integer", + "required": false, + "default": "", + "range_min": "0", + "range_max": "1023" + }, + "vlanName": { + "type": "string", + "required": false, + "default": "", + "length_min": "0", + "length_max": "128" + }, + "intfDescription": { + "type": "string", + "required": false, + "default": "" + }, + "mtu": { + "type": "integer", + "required": false, + "default": "", + "range_min": "68", + "range_max": "9216" + }, + "tag": { + "type": "long", + "required": false, + "default": "12345", + "range_min": "0", + "range_max": "4294967295" + }, + "trmEnabled": { + "type": "boolean", + "required": false, + "default": "" + }, + "igmpVersion": { + "type": "enum", + "required": false, + "default": "2", + "is_show": "\"trmEnabled==true\"" + }, + "secondaryGW1": { + "type": "ipV4AddressWithSubnet", + "required": false, + "default": "" + }, + "secondaryGW2": { + "type": "ipV4AddressWithSubnet", + "required": false, + "default": "" + }, + "secondaryGW3": { + "type": "ipV4AddressWithSubnet", + "required": false, + "default": "" + }, + "secondaryGW4": { + "type": "ipV4AddressWithSubnet", + "required": false, + "default": "" + }, + "rtBothAuto": { + "type": "boolean", + "required": false, + "default": "false" + }, + "ENABLE_NETFLOW": { + "type": "boolean", + "required": false, + "default": "false" + }, + "SVI_NETFLOW_MONITOR": { + "type": "string", + "required": false, + "default": "", + "is_show": "\"ENABLE_NETFLOW==true\"" + }, + "VLAN_NETFLOW_MONITOR": { + "type": "string", + "required": false, + "default": "", + "is_show": "\"ENABLE_NETFLOW==true\"" + } + }, + "Default_Network_Extension_Universal": { + "gatewayIpAddress": { + "type": "ipV4AddressWithSubnet", + "required": false, + "default": "" + }, + "vlanId": { + "type": "integer", + "required": false, + "default": "" + }, + "nveId": { + "type": "integer", + "required": false, + "default": "1" + }, + "gatewayIpV6Address": { + "type": "string[]", + "required": false, + "default": "" + }, + "isLayer2Only": { + "type": "boolean", + "required": false, + "default": "false" + }, + "suppressArp": { + "type": "boolean", + "required": false, + "default": "" + }, + "enableL3OnBorder": { + "type": "boolean", + "required": false, + "default": "false" + }, + "mcastGroup": { + "type": "ipV4Address", + "required": false, + "default": "" + }, + "dhcpServers": { + "type": "structureArray", + "required": false, + "default": "" + }, + "loopbackId": { + "type": "integer", + "required": false, + "default": "", + "range_min": "0", + "range_max": "1023" + }, + "vlanName": { + "type": "string", + "required": false, + "default": "", + "length_min": "0", + "length_max": "128" + }, + "intfDescription": { + "type": "string", + "required": false, + "default": "" + }, + "mtu": { + "type": "integer", + "required": false, + "default": "", + "range_min": "68", + "range_max": "9216" + }, + "tag": { + "type": "long", + "required": false, + "default": "12345", + "range_min": "0", + "range_max": "4294967295" + }, + "trmEnabled": { + "type": "boolean", + "required": false, + "default": "" + }, + "igmpVersion": { + "type": "enum", + "required": false, + "default": "2", + "is_show": "\"trmEnabled==true\"" + }, + "secondaryGW1": { + "type": "ipV4AddressWithSubnet", + "required": false, + "default": "" + }, + "secondaryGW2": { + "type": "ipV4AddressWithSubnet", + "required": false, + "default": "" + }, + "secondaryGW3": { + "type": "ipV4AddressWithSubnet", + "required": false, + "default": "" + }, + "secondaryGW4": { + "type": "ipV4AddressWithSubnet", + "required": false, + "default": "" + }, + "rtBothAuto": { + "type": "boolean", + "required": false, + "default": "false" + }, + "ENABLE_NETFLOW": { + "type": "boolean", + "required": false, + "default": "false" + }, + "SVI_NETFLOW_MONITOR": { + "type": "string", + "required": false, + "default": "", + "is_show": "\"ENABLE_NETFLOW==true\"" + }, + "VLAN_NETFLOW_MONITOR": { + "type": "string", + "required": false, + "default": "", + "is_show": "\"ENABLE_NETFLOW==true\"" + }, + "MULTISITE_CONN": { + "type": "structureArray", + "required": false, + "default": "" + } + } + }, + + "networkv2_validated_00001": + [ + { + "net_name": "net1", + "net_id": 31001, + "vrf_name": "Tenant-1", + "net_template": "Default_Network_Universal", + "net_extension_template": "Default_Network_Extension_Universal", + "network_template_config": { + "gatewayIpAddress": "2.1.1.1/24", + "vlanId": 1001, + "nveId": "1", + "gatewayIpV6Address": "", + "isLayer2Only": "false", + "suppressArp": "", + "enableL3OnBorder": "true", + "mcastGroup": "", + "dhcpServers": { + "dhcpServers": [ + { + "srvrAddr": "1.1.1.1", + "srvrVrf": "vrf1" + } + ] + }, + "loopbackId": 10, + "vlanName": "", + "intfDescription": "test_interface", + "mtu": 1800, + "tag": "12345", + "trmEnabled": "", + "secondaryGW1": "3.1.1.1/24", + "secondaryGW2": "", + "secondaryGW3": "", + "secondaryGW4": "", + "rtBothAuto": "false", + "ENABLE_NETFLOW": "false", + "MULTISITE_CONN": "" + }, + "attach": [ + { + "attached": true, + "detachSwitchPorts": [], + "dot1QVlan": "1", + "extensionValues": "", + "fabric": "", + "freeformConfig": "", + "ipAddress": "192.168.2.1", + "switchPorts": [ + "Ethernet1/10", + "Ethernet1/11", + "Ethernet1/12" + ], + "torPorts": [ + { + "switch": "Tor1", + "ports": [ + "Ethernet1/10", + "Ethernet1/11", + "Ethernet1/12" + ] + } + ], + "untagged": false, + "vlan": "-1", + "deploy": true + } + ] + } + ], + + "networkv2_ip_sn": + { + "192.168.2.1": "9SFRKD0M6AS", + "192.168.2.2": "9KRDG57QQZT", + "192.168.2.3": "959A4D0NYXI" + }, + + "networkv2_hn_sn": + { + "leaf1": "9SFRKD0M6AS", + "leaf2": "9KRDG57QQZT", + "tor": "959A4D0NYXI" + }, + + "networkv2_ip_fab": + { + "192.168.2.1": "test_netv2", + "192.168.2.2": "test_netv2", + "192.168.2.3": "test_netv2" + }, + + "networkv2_sn_fab": + { + "9SFRKD0M6AS": "test_netv2", + "9KRDG57QQZT": "test_netv2", + "959A4D0NYXI": "test_netv2" + }, + + "networkv2_have_create_00001": + [ + { + "id": 748, + "fabric": "test_netv2", + "networkName": "net1", + "networkId": 31001, + "networkTemplate": "Default_Network_Universal", + "networkExtensionTemplate": "Default_Network_Extension_Universal", + "vrf": "Tenant-1", + "networkTemplateConfig": { + "suppressArp": "", + "secondaryGW3": "", + "secondaryGW2": "", + "loopbackId": "10", + "secondaryGW1": "3.1.1.1/24", + "enableL3OnBorder": "false", + "type": "Normal", + "SVI_NETFLOW_MONITOR": "", + "enableIR": "false", + "rtBothAuto": "false", + "isLayer2Only": "false", + "MULTISITE_CONN": "", + "ENABLE_NETFLOW": "false", + "dhcpServerAddr3": "", + "gatewayIpV6Address": "", + "dhcpServerAddr2": "", + "tag": "12345", + "dhcpServerAddr1": "1.1.1.1", + "nveId": "1", + "vrfDhcp": "vrf1", + "secondaryGW4": "", + "vlanId": "1001", + "gatewayIpAddress": "2.1.1.1/24", + "vlanName": "", + "mtu": "1800", + "intfDescription": "test_interface", + "mcastGroup": "239.11.11.1", + "igmpVersion": "2", + "trmEnabled": "", + "VLAN_NETFLOW_MONITOR": "", + "dhcpServers": { + "dhcpServers": [ + { + "srvrAddr": "1.1.1.1", + "srvrVrf": "vrf1" + } + ] + } + } + } + ], + + "networkv2_have_attach_00001": + [ + { + "networkName": "net1", + "lanAttachList": [ + { + "networkName": "net1", + "ipAddress": "192.168.2.1", + "instanceValues": "", + "torPorts": [ + { + "switch": "Tor1", + "ports": [ + "Ethernet1/10", + "Ethernet1/11" + ] + } + ], + "fabric": "test_netv2", + "vlan": 1001, + "serialNumber": "9SFRKD0M6AS", + "deployment": true, + "attached": true, + "extensionValues": "", + "freeformConfig": "", + "dot1QVlan": 1, + "detachSwitchPorts": "", + "switchPorts": [ + "Ethernet1/10", + "Ethernet1/11" + ], + "untagged": false + }, + { + "networkName": "net1", + "ipAddress": "192.168.2.2", + "instanceValues": "", + "torPorts": "", + "fabric": "test_netv2", + "vlan": null, + "serialNumber": "9KRDG57QQZT", + "deployment": false, + "attached": false, + "extensionValues": "", + "freeformConfig": "", + "dot1QVlan": 1, + "detachSwitchPorts": "", + "switchPorts": [], + "untagged": false + } + ] + } + ], + + "networkv2_have_attach_00002": + [ + { + "networkName": "net2", + "lanAttachList": [ + { + "networkName": "net2", + "ipAddress": "192.168.2.1", + "instanceValues": "", + "torPorts": [ + { + "switch": "Tor1", + "ports": [ + "Ethernet1/12", + "Ethernet1/13" + ] + } + ], + "fabric": "test_netv2", + "vlan": 1001, + "serialNumber": "9SFRKD0M6AS", + "deployment": true, + "attached": true, + "extensionValues": "", + "freeformConfig": "", + "dot1QVlan": 1, + "detachSwitchPorts": "", + "switchPorts": [ + "Ethernet1/12", + "Ethernet1/13" + ], + "untagged": false + }, + { + "networkName": "net2", + "ipAddress": "192.168.2.2", + "instanceValues": "", + "torPorts": "", + "fabric": "test_netv2", + "vlan": null, + "serialNumber": "9KRDG57QQZT", + "deployment": false, + "attached": false, + "extensionValues": "", + "freeformConfig": "", + "dot1QVlan": 1, + "detachSwitchPorts": "", + "switchPorts": [], + "untagged": false + } + ] + } + ], + + "networkv2_want_attach_00001": + [ + { + "networkName": "net1", + "lanAttachList": [ + { + "networkName": "net1", + "ipAddress": "192.168.2.1", + "instanceValues": "", + "torPorts": "", + "fabric": "test_netv2", + "vlan": 1001, + "serialNumber": "9SFRKD0M6AS", + "deployment": false, + "attached": false, + "extensionValues": "", + "freeformConfig": "", + "dot1QVlan": 1, + "detachSwitchPorts": "", + "switchPorts": "", + "untagged": false, + "d_key": "serialNumber" + }, + { + "networkName": "net1", + "ipAddress": "192.168.2.2", + "instanceValues": "", + "torPorts": "", + "fabric": "test_netv2", + "vlan": null, + "serialNumber": "9KRDG57QQZT", + "deployment": false, + "attached": false, + "extensionValues": "", + "freeformConfig": "", + "dot1QVlan": 1, + "detachSwitchPorts": "", + "switchPorts": [], + "untagged": false, + "d_key": "serialNumber" + } + ] + } + ], + + "networkv2_want_attach_00002": + [ + { + "networkName": "net2", + "lanAttachList": [ + { + "networkName": "net2", + "ipAddress": "192.168.2.1", + "instanceValues": "", + "torPorts": [ + { + "switch": "Tor1", + "ports": [ + "Ethernet1/12", + "Ethernet1/13" + ] + } + ], + "fabric": "test_netv2", + "vlan": 1001, + "serialNumber": "9SFRKD0M6AS", + "deployment": true, + "attached": true, + "extensionValues": "", + "freeformConfig": "", + "dot1QVlan": 1, + "detachSwitchPorts": "", + "switchPorts": [ + "Ethernet1/12", + "Ethernet1/13" + ], + "untagged": false, + "d_key": "serialNumber" + }, + { + "networkName": "net2", + "ipAddress": "192.168.2.2", + "instanceValues": "", + "torPorts": "", + "fabric": "test_netv2", + "vlan": null, + "serialNumber": "9KRDG57QQZT", + "deployment": false, + "attached": false, + "extensionValues": "", + "freeformConfig": "", + "dot1QVlan": 1, + "detachSwitchPorts": "", + "switchPorts": [], + "untagged": false, + "d_key": "serialNumber" + } + ] + } + ], + + "networkv2_want_attach_00003": + [ + { + "networkName": "net1", + "lanAttachList": [ + { + "networkName": "net1", + "ipAddress": "192.168.2.1", + "instanceValues": "", + "torPorts": [ + { + "switch": "Tor1", + "ports": [ + "Ethernet1/12", + "Ethernet1/13" + ] + } + ], + "fabric": "test_netv1", + "vlan": 1001, + "serialNumber": "9SFRKD0M6AS", + "deployment": true, + "attached": true, + "extensionValues": "", + "freeformConfig": "", + "dot1QVlan": 1, + "detachSwitchPorts": "", + "switchPorts": [ + "Ethernet1/12", + "Ethernet1/13" + ], + "untagged": false, + "d_key": "serialNumber" + }, + { + "networkName": "net1", + "ipAddress": "192.168.2.2", + "instanceValues": "", + "torPorts": "", + "fabric": "test_netv2", + "vlan": null, + "serialNumber": "9KRDG57QQZT", + "deployment": false, + "attached": false, + "extensionValues": "", + "freeformConfig": "", + "dot1QVlan": 1, + "detachSwitchPorts": "", + "switchPorts": [], + "untagged": false, + "d_key": "serialNumber" + } + ] + } + ], + + "networkv2_diff_delete_00001": + { + "net1": "DEPLOYED" + } +} diff --git a/tests/unit/modules/dcnm/fixtures/dcnm_networkv2/dcnm_networkv2_response.json b/tests/unit/modules/dcnm/fixtures/dcnm_networkv2/dcnm_networkv2_response.json new file mode 100644 index 000000000..5ef642395 --- /dev/null +++ b/tests/unit/modules/dcnm/fixtures/dcnm_networkv2/dcnm_networkv2_response.json @@ -0,0 +1,2721 @@ +{ + "networkv2_inv_details": { + "192.168.2.1": { + "switchRoleEnum": "Leaf", + "vrf": "management", + "fabricTechnology": "VXLANFabric", + "deviceType": "Switch_Fabric", + "fabricId": 6, + "name": null, + "domainID": 0, + "wwn": null, + "membership": null, + "ports": 0, + "model": "N9K-C9300v", + "version": null, + "upTime": 0, + "ipAddress": "192.168.2.1", + "mgmtAddress": null, + "vendor": "Cisco", + "displayHdrs": null, + "displayValues": null, + "colDBId": 0, + "fid": 0, + "isLan": false, + "is_smlic_enabled": false, + "present": true, + "licenseViolation": false, + "managable": true, + "mds": false, + "connUnitStatus": 0, + "standbySupState": 0, + "activeSupSlot": 0, + "unmanagableCause": "", + "lastScanTime": 0, + "fabricName": "test_netv2", + "modelType": 0, + "logicalName": "leaf1", + "switchDbID": 86340, + "uid": 0, + "release": "10.3(1)", + "location": null, + "contact": null, + "upTimeStr": "21 days, 01:17:37", + "upTimeNumber": 0, + "network": null, + "nonMdsModel": null, + "numberOfPorts": 0, + "availPorts": 0, + "usedPorts": 0, + "vsanWwn": null, + "vsanWwnName": null, + "swWwn": null, + "swWwnName": null, + "serialNumber": "9SFRKD0M6AS", + "domain": null, + "principal": null, + "status": "ok", + "index": 0, + "licenseDetail": null, + "isPmCollect": false, + "sanAnalyticsCapable": false, + "vdcId": 0, + "vdcName": "", + "vdcMac": null, + "fcoeEnabled": false, + "cpuUsage": 0, + "memoryUsage": 0, + "scope": null, + "fex": false, + "health": -1, + "npvEnabled": false, + "linkName": null, + "username": null, + "primaryIP": "", + "primarySwitchDbID": 0, + "secondaryIP": "", + "secondarySwitchDbID": 0, + "isEchSupport": false, + "moduleIndexOffset": 9999, + "sysDescr": "", + "isTrapDelayed": false, + "switchRole": "leaf", + "mode": "Normal", + "hostName": "leaf1", + "ipDomain": "", + "systemMode": "Normal", + "sourceVrf": "management", + "sourceInterface": "mgmt0", + "protoDiscSettings": null, + "operMode": null, + "modules": null, + "fexMap": {}, + "isVpcConfigured": false, + "vpcDomain": 0, + "role": null, + "peer": null, + "peerSerialNumber": null, + "peerSwitchDbId": 0, + "peerlinkState": null, + "keepAliveState": null, + "consistencyState": false, + "sendIntf": null, + "recvIntf": null, + "interfaces": null, + "elementType": null, + "monitorMode": null, + "freezeMode": null, + "cfsSyslogStatus": 1, + "isNonNexus": false, + "swUUIDId": 86370, + "swUUID": "DCNM-UUID-86370", + "swType": null, + "ccStatus": "In-Sync", + "operStatus": "Minor", + "intentedpeerName": "" + }, + "192.168.2.2": { + "switchRoleEnum": "Leaf", + "vrf": "management", + "fabricTechnology": "VXLANFabric", + "deviceType": "Switch_Fabric", + "fabricId": 6, + "name": null, + "domainID": 0, + "wwn": null, + "membership": null, + "ports": 0, + "model": "N9K-C9300v", + "version": null, + "upTime": 0, + "ipAddress": "192.168.2.2", + "mgmtAddress": null, + "vendor": "Cisco", + "displayHdrs": null, + "displayValues": null, + "colDBId": 0, + "fid": 0, + "isLan": false, + "is_smlic_enabled": false, + "present": true, + "licenseViolation": false, + "managable": true, + "mds": false, + "connUnitStatus": 0, + "standbySupState": 0, + "activeSupSlot": 0, + "unmanagableCause": "", + "lastScanTime": 0, + "fabricName": "test_netv2", + "modelType": 0, + "logicalName": "leaf2", + "switchDbID": 86290, + "uid": 0, + "release": "10.3(1)", + "location": null, + "contact": null, + "upTimeStr": "21 days, 01:17:32", + "upTimeNumber": 0, + "network": null, + "nonMdsModel": null, + "numberOfPorts": 0, + "availPorts": 0, + "usedPorts": 0, + "vsanWwn": null, + "vsanWwnName": null, + "swWwn": null, + "swWwnName": null, + "serialNumber": "9KRDG57QQZT", + "domain": null, + "principal": null, + "status": "ok", + "index": 0, + "licenseDetail": null, + "isPmCollect": false, + "sanAnalyticsCapable": false, + "vdcId": 0, + "vdcName": "", + "vdcMac": null, + "fcoeEnabled": false, + "cpuUsage": 0, + "memoryUsage": 0, + "scope": null, + "fex": false, + "health": -1, + "npvEnabled": false, + "linkName": null, + "username": null, + "primaryIP": "", + "primarySwitchDbID": 0, + "secondaryIP": "", + "secondarySwitchDbID": 0, + "isEchSupport": false, + "moduleIndexOffset": 9999, + "sysDescr": "", + "isTrapDelayed": false, + "switchRole": "leaf", + "mode": "Normal", + "hostName": "leaf2", + "ipDomain": "", + "systemMode": "Normal", + "sourceVrf": "management", + "sourceInterface": "mgmt0", + "protoDiscSettings": null, + "operMode": null, + "modules": null, + "fexMap": {}, + "isVpcConfigured": false, + "vpcDomain": 0, + "role": null, + "peer": null, + "peerSerialNumber": null, + "peerSwitchDbId": 0, + "peerlinkState": null, + "keepAliveState": null, + "consistencyState": false, + "sendIntf": null, + "recvIntf": null, + "interfaces": null, + "elementType": null, + "monitorMode": null, + "freezeMode": null, + "cfsSyslogStatus": 1, + "isNonNexus": false, + "swUUIDId": 86320, + "swUUID": "DCNM-UUID-86320", + "swType": null, + "ccStatus": "In-Sync", + "operStatus": "Minor", + "intentedpeerName": "" + }, + "192.168.2.3": { + "switchRoleEnum": "Tor", + "vrf": "management", + "fabricTechnology": "VXLANFabric", + "deviceType": "Switch_Fabric", + "fabricId": 6, + "name": null, + "domainID": 0, + "wwn": null, + "membership": null, + "ports": 0, + "model": "N9K-C9300v", + "version": null, + "upTime": 0, + "ipAddress": "192.168.2.3", + "mgmtAddress": null, + "vendor": "Cisco", + "displayHdrs": null, + "displayValues": null, + "colDBId": 0, + "fid": 0, + "isLan": false, + "is_smlic_enabled": false, + "present": true, + "licenseViolation": false, + "managable": true, + "mds": false, + "connUnitStatus": 0, + "standbySupState": 0, + "activeSupSlot": 0, + "unmanagableCause": "", + "lastScanTime": 0, + "fabricName": "test_netv2", + "modelType": 0, + "logicalName": "tor", + "switchDbID": 101110, + "uid": 0, + "release": "10.3(1)", + "location": null, + "contact": null, + "upTimeStr": "21 days, 00:27:44", + "upTimeNumber": 0, + "network": null, + "nonMdsModel": null, + "numberOfPorts": 0, + "availPorts": 0, + "usedPorts": 0, + "vsanWwn": null, + "vsanWwnName": null, + "swWwn": null, + "swWwnName": null, + "serialNumber": "959A4D0NYXI", + "domain": null, + "principal": null, + "status": "ok", + "index": 0, + "licenseDetail": null, + "isPmCollect": false, + "sanAnalyticsCapable": false, + "vdcId": 0, + "vdcName": "", + "vdcMac": null, + "fcoeEnabled": false, + "cpuUsage": 0, + "memoryUsage": 0, + "scope": null, + "fex": false, + "health": -1, + "npvEnabled": false, + "linkName": null, + "username": null, + "primaryIP": "", + "primarySwitchDbID": 0, + "secondaryIP": "", + "secondarySwitchDbID": 0, + "isEchSupport": false, + "moduleIndexOffset": 9999, + "sysDescr": "", + "isTrapDelayed": false, + "switchRole": "tor", + "mode": "Normal", + "hostName": "tor", + "ipDomain": "", + "systemMode": "Normal", + "sourceVrf": "management", + "sourceInterface": "mgmt0", + "protoDiscSettings": null, + "operMode": null, + "modules": null, + "fexMap": {}, + "isVpcConfigured": false, + "vpcDomain": 0, + "role": null, + "peer": null, + "peerSerialNumber": null, + "peerSwitchDbId": 0, + "peerlinkState": null, + "keepAliveState": null, + "consistencyState": false, + "sendIntf": null, + "recvIntf": null, + "interfaces": null, + "elementType": null, + "monitorMode": null, + "freezeMode": null, + "cfsSyslogStatus": 1, + "isNonNexus": false, + "swUUIDId": 99910, + "swUUID": "DCNM-UUID-99910", + "swType": null, + "ccStatus": "In-Sync", + "operStatus": "Minor", + "intentedpeerName": "" + } + }, + + "networkv2_fab_details": { + "id": 6, + "fabricId": "FABRIC-6", + "fabricName": "test_netv2", + "fabricType": "Switch_Fabric", + "fabricTypeFriendly": "Switch Fabric", + "fabricTechnology": "VXLANFabric", + "fabricTechnologyFriendly": "VXLAN EVPN", + "provisionMode": "DCNMTopDown", + "deviceType": "n9k", + "replicationMode": "Multicast", + "operStatus": "MINOR", + "asn": "32123", + "siteId": "32123", + "templateName": "Easy_Fabric", + "nvPairs": { + "MSO_SITE_ID": "", + "PHANTOM_RP_LB_ID1": "", + "PHANTOM_RP_LB_ID2": "", + "PHANTOM_RP_LB_ID3": "", + "IBGP_PEER_TEMPLATE": "", + "PHANTOM_RP_LB_ID4": "", + "abstract_ospf": "base_ospf", + "FEATURE_PTP": "false", + "L3_PARTITION_ID_RANGE": "50000-59000", + "DHCP_START_INTERNAL": "", + "SSPINE_COUNT": "0", + "NXC_DEST_VRF": "management", + "ADVERTISE_PIP_BGP": "false", + "FABRIC_VPC_QOS_POLICY_NAME": "spine_qos_for_fabric_vpc_peering", + "BFD_PIM_ENABLE": "false", + "DHCP_END": "", + "UNDERLAY_IS_V6": "false", + "FABRIC_VPC_DOMAIN_ID": "", + "SEED_SWITCH_CORE_INTERFACES": "", + "ALLOW_NXC_PREV": "true", + "FABRIC_MTU_PREV": "9216", + "BFD_ISIS_ENABLE": "false", + "HD_TIME": "180", + "AUTO_UNIQUE_VRF_LITE_IP_PREFIX": "false", + "OSPF_AUTH_ENABLE": "false", + "LOOPBACK1_IPV6_RANGE": "", + "ROUTER_ID_RANGE": "", + "MSO_CONNECTIVITY_DEPLOYED": "", + "ENABLE_MACSEC": "false", + "DEAFULT_QUEUING_POLICY_OTHER": "queuing_policy_default_other", + "UNNUM_DHCP_START_INTERNAL": "", + "MACSEC_REPORT_TIMER": "", + "PREMSO_PARENT_FABRIC": "", + "UNNUM_DHCP_END_INTERNAL": "", + "PTP_DOMAIN_ID": "", + "USE_LINK_LOCAL": "false", + "AUTO_SYMMETRIC_VRF_LITE": "false", + "BGP_AS_PREV": "32123", + "ENABLE_PBR": "false", + "DCI_SUBNET_TARGET_MASK": "30", + "VPC_PEER_LINK_PO": "500", + "ISIS_AUTH_ENABLE": "false", + "PER_VRF_LOOPBACK_AUTO_PROVISION": "false", + "REPLICATION_MODE": "Multicast", + "ANYCAST_RP_IP_RANGE": "110.254.254.0/24", + "VPC_ENABLE_IPv6_ND_SYNC": "true", + "abstract_isis_interface": "isis_interface", + "TCAM_ALLOCATION": "true", + "SERVICE_NETWORK_VLAN_RANGE": "3000-3199", + "MACSEC_ALGORITHM": "", + "ISIS_LEVEL": "level-2", + "SUBNET_TARGET_MASK": "30", + "abstract_anycast_rp": "anycast_rp", + "AUTO_SYMMETRIC_DEFAULT_VRF": "false", + "ENABLE_NETFLOW": "false", + "DEAFULT_QUEUING_POLICY_R_SERIES": "queuing_policy_default_r_series", + "temp_vpc_peer_link": "int_vpc_peer_link_po", + "BROWNFIELD_NETWORK_NAME_FORMAT": "Auto_Net_VNI$$VNI$$_VLAN$$VLAN_ID$$", + "ENABLE_FABRIC_VPC_DOMAIN_ID": "false", + "IBGP_PEER_TEMPLATE_LEAF": "", + "DCI_SUBNET_RANGE": "110.33.0.0/16", + "MGMT_GW_INTERNAL": "", + "ENABLE_NXAPI": "true", + "VRF_LITE_AUTOCONFIG": "Manual", + "GRFIELD_DEBUG_FLAG": "Disable", + "VRF_VLAN_RANGE": "2000-2299", + "ISIS_AUTH_KEYCHAIN_NAME": "", + "OBJECT_TRACKING_NUMBER_RANGE": "100-299", + "SSPINE_ADD_DEL_DEBUG_FLAG": "Disable", + "abstract_bgp_neighbor": "evpn_bgp_rr_neighbor", + "OSPF_AUTH_KEY_ID": "", + "PIM_HELLO_AUTH_ENABLE": "false", + "abstract_feature_leaf": "base_feature_leaf_upg", + "BFD_AUTH_ENABLE": "false", + "BGP_LB_ID": "0", + "LOOPBACK1_IP_RANGE": "10.31.0.0/22", + "EXTRA_CONF_TOR": "", + "AAA_SERVER_CONF": "", + "VPC_PEER_KEEP_ALIVE_OPTION": "management", + "AUTO_VRFLITE_IFC_DEFAULT_VRF": "false", + "enableRealTimeBackup": "", + "V6_SUBNET_TARGET_MASK": "126", + "STRICT_CC_MODE": "false", + "BROWNFIELD_SKIP_OVERLAY_NETWORK_ATTACHMENTS": "false", + "VPC_PEER_LINK_VLAN": "3600", + "abstract_trunk_host": "int_trunk_host", + "NXAPI_HTTP_PORT": "80", + "BGP_AUTH_ENABLE": "false", + "MST_INSTANCE_RANGE": "", + "PM_ENABLE_PREV": "false", + "NXC_PROXY_PORT": "8080", + "RP_MODE": "asm", + "enableScheduledBackup": "", + "abstract_ospf_interface": "ospf_interface_11_1", + "BFD_OSPF_ENABLE": "false", + "MACSEC_FALLBACK_ALGORITHM": "", + "UNNUM_DHCP_END": "", + "LOOPBACK0_IP_RANGE": "10.21.0.0/22", + "ENABLE_AAA": "false", + "DEPLOYMENT_FREEZE": "false", + "L2_HOST_INTF_MTU_PREV": "9216", + "NETFLOW_MONITOR_LIST": "", + "ENABLE_AGENT": "false", + "NTP_SERVER_IP_LIST": "", + "OVERLAY_MODE": "cli", + "MACSEC_FALLBACK_KEY_STRING": "", + "PER_VRF_LOOPBACK_AUTO_PROVISION_PREV": "false", + "FF": "Easy_Fabric", + "STP_ROOT_OPTION": "unmanaged", + "FABRIC_TYPE": "Switch_Fabric", + "ISIS_OVERLOAD_ENABLE": "false", + "NETFLOW_RECORD_LIST": "", + "SPINE_COUNT": "0", + "abstract_extra_config_bootstrap": "extra_config_bootstrap_11_1", + "MPLS_LOOPBACK_IP_RANGE": "", + "LINK_STATE_ROUTING_TAG_PREV": "UNDERLAY", + "DHCP_ENABLE": "false", + "BFD_AUTH_KEY_ID": "", + "MSO_SITE_GROUP_NAME": "", + "MGMT_PREFIX_INTERNAL": "", + "DHCP_IPV6_ENABLE_INTERNAL": "", + "BGP_AUTH_KEY_TYPE": "3", + "SITE_ID": "32123", + "temp_anycast_gateway": "anycast_gateway", + "BRFIELD_DEBUG_FLAG": "Disable", + "BGP_AS": "32123", + "BOOTSTRAP_MULTISUBNET": "#Scope_Start_IP, Scope_End_IP, Scope_Default_Gateway, Scope_Subnet_Prefix", + "ISIS_P2P_ENABLE": "false", + "ENABLE_NGOAM": "true", + "CDP_ENABLE": "false", + "PTP_LB_ID": "", + "DHCP_IPV6_ENABLE": "", + "MACSEC_KEY_STRING": "", + "TOPDOWN_CONFIG_RM_TRACKING": "notstarted", + "OSPF_AUTH_KEY": "", + "ENABLE_FABRIC_VPC_DOMAIN_ID_PREV": "false", + "EXTRA_CONF_LEAF": "", + "vrf_extension_template": "Default_VRF_Extension_Universal", + "DHCP_START": "", + "ENABLE_TRM": "false", + "ENABLE_PVLAN_PREV": "false", + "FEATURE_PTP_INTERNAL": "false", + "ENABLE_NXAPI_HTTP": "true", + "abstract_isis": "base_isis_level2", + "MPLS_LB_ID": "", + "FABRIC_VPC_DOMAIN_ID_PREV": "", + "ROUTE_MAP_SEQUENCE_NUMBER_RANGE": "1-65534", + "NETWORK_VLAN_RANGE": "2300-2999", + "STATIC_UNDERLAY_IP_ALLOC": "false", + "MGMT_V6PREFIX_INTERNAL": "", + "MPLS_HANDOFF": "false", + "STP_BRIDGE_PRIORITY": "", + "scheduledTime": "", + "ANYCAST_LB_ID": "", + "MACSEC_CIPHER_SUITE": "", + "STP_VLAN_RANGE": "", + "MSO_CONTROLER_ID": "", + "POWER_REDUNDANCY_MODE": "ps-redundant", + "BFD_ENABLE": "false", + "abstract_extra_config_leaf": "extra_config_leaf", + "ANYCAST_GW_MAC": "2020.0000.00aa", + "abstract_dhcp": "base_dhcp", + "default_pvlan_sec_network": "", + "EXTRA_CONF_SPINE": "", + "NTP_SERVER_VRF": "", + "SPINE_SWITCH_CORE_INTERFACES": "", + "LINK_STATE_ROUTING_TAG": "UNDERLAY", + "ISIS_OVERLOAD_ELAPSE_TIME": "", + "RP_LB_ID": "254", + "BOOTSTRAP_CONF": "", + "LINK_STATE_ROUTING": "ospf", + "ISIS_AUTH_KEY": "", + "network_extension_template": "Default_Network_Extension_Universal", + "DNS_SERVER_IP_LIST": "", + "DOMAIN_NAME_INTERNAL": "", + "ENABLE_EVPN": "true", + "abstract_multicast": "base_multicast_11_1", + "VPC_DELAY_RESTORE_TIME": "60", + "BFD_AUTH_KEY": "", + "ESR_OPTION": "PBR", + "AGENT_INTF": "eth0", + "FABRIC_MTU": "9216", + "L3VNI_MCAST_GROUP": "", + "UNNUM_BOOTSTRAP_LB_ID": "", + "VPC_DOMAIN_ID_RANGE": "1-1000", + "HOST_INTF_ADMIN_STATE": "true", + "BFD_IBGP_ENABLE": "false", + "AUTO_UNIQUE_VRF_LITE_IP_PREFIX_PREV": "false", + "VPC_AUTO_RECOVERY_TIME": "360", + "DNS_SERVER_VRF": "", + "UPGRADE_FROM_VERSION": "", + "BANNER": "", + "NXC_SRC_INTF": "", + "PER_VRF_LOOPBACK_IP_RANGE": "", + "SYSLOG_SEV": "", + "abstract_loopback_interface": "int_fabric_loopback_11_1", + "SYSLOG_SERVER_VRF": "", + "EXTRA_CONF_INTRA_LINKS": "", + "SNMP_SERVER_HOST_TRAP": "true", + "abstract_extra_config_spine": "extra_config_spine", + "PIM_HELLO_AUTH_KEY": "", + "temp_vpc_domain_mgmt": "vpc_domain_mgmt", + "V6_SUBNET_RANGE": "", + "SUBINTERFACE_RANGE": "2-511", + "abstract_routed_host": "int_routed_host", + "BGP_AUTH_KEY": "", + "ENABLE_PVLAN": "false", + "INBAND_DHCP_SERVERS": "", + "default_network": "Default_Network_Universal", + "ISIS_AUTH_KEYCHAIN_KEY_ID": "", + "MGMT_V6PREFIX": "", + "abstract_feature_spine": "base_feature_spine_upg", + "ENABLE_DEFAULT_QUEUING_POLICY": "false", + "ANYCAST_BGW_ADVERTISE_PIP": "false", + "NETFLOW_EXPORTER_LIST": "", + "abstract_vlan_interface": "int_fabric_vlan_11_1", + "RP_COUNT": "2", + "FABRIC_NAME": "test_netv2", + "abstract_pim_interface": "pim_interface", + "PM_ENABLE": "false", + "LOOPBACK0_IPV6_RANGE": "", + "dcnmUser": "admin", + "DEFAULT_VRF_REDIS_BGP_RMAP": "", + "NVE_LB_ID": "1", + "OVERLAY_MODE_PREV": "cli", + "VPC_DELAY_RESTORE": "150", + "NXAPI_HTTPS_PORT": "443", + "ENABLE_VPC_PEER_LINK_NATIVE_VLAN": "false", + "L2_HOST_INTF_MTU": "9216", + "abstract_route_map": "route_map", + "INBAND_MGMT_PREV": "false", + "EXT_FABRIC_TYPE": "", + "abstract_vpc_domain": "base_vpc_domain_11_1", + "ACTIVE_MIGRATION": "false", + "COPP_POLICY": "strict", + "DHCP_END_INTERNAL": "", + "BOOTSTRAP_ENABLE": "false", + "ADVERTISE_PIP_ON_BORDER": "true", + "default_vrf": "Default_VRF_Universal", + "NXC_PROXY_SERVER": "", + "OSPF_AREA_ID": "0.0.0.0", + "abstract_extra_config_tor": "extra_config_tor", + "SYSLOG_SERVER_IP_LIST": "", + "BOOTSTRAP_ENABLE_PREV": "false", + "ENABLE_TENANT_DHCP": "true", + "ANYCAST_RP_IP_RANGE_INTERNAL": "110.254.254.0/24", + "RR_COUNT": "2", + "BOOTSTRAP_MULTISUBNET_INTERNAL": "", + "MGMT_GW": "", + "UNNUM_DHCP_START": "", + "MGMT_PREFIX": "", + "abstract_bgp_rr": "evpn_bgp_rr", + "INBAND_MGMT": "false", + "abstract_bgp": "base_bgp", + "SLA_ID_RANGE": "10000-19999", + "ENABLE_NETFLOW_PREV": "false", + "SUBNET_RANGE": "110.4.0.0/16", + "DEAFULT_QUEUING_POLICY_CLOUDSCALE": "queuing_policy_default_8q_cloudscale", + "MULTICAST_GROUP_SUBNET": "239.11.11.0/25", + "FABRIC_INTERFACE_TYPE": "p2p", + "ALLOW_NXC": "true", + "OVERWRITE_GLOBAL_NXC": "false", + "FABRIC_VPC_QOS": "false", + "AAA_REMOTE_IP_ENABLED": "false", + "L2_SEGMENT_ID_RANGE": "30000-49000" + }, + "vrfTemplate": "Default_VRF_Universal", + "networkTemplate": "Default_Network_Universal", + "vrfExtensionTemplate": "Default_VRF_Extension_Universal", + "networkExtensionTemplate": "Default_Network_Extension_Universal", + "modifiedOn": 1713867710252 + }, + + "resp_net_template": + { + "RETURN_CODE": 200, + "MESSAGE": "OK", + "METHOD": "GET", + "REQUEST_PATH": "/appcenter/cisco/ndfc/api/v1/configtemplate/rest/config/templates/Default_Network_Universal", + "DATA":{ + "instanceClassId":3170, + "assignedInstanceClassId":0, + "instanceName":"com.cisco.dcbu.dcm.model.cfgtemplate.ConfigTemplate:name=Default_Network_Universal:type=true", + "name":"Default_Network_Universal", + "description":" Default Network Universal Template", + "userDefined":true, + "parameters":[ + { + "name":"gatewayIpAddress", + "description":"None", + "parameterType":"ipV4AddressWithSubnet", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"IPv4 Gateway/NetMask", + "Description":"example 192.0.2.1/24", + "IsMandatory":"false", + "IsMsoManaged":"true", + "IsGateway":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"vlanId", + "description":"None", + "parameterType":"integer", + "metaProperties":{ + + }, + "annotations":{ + "IsVlanId":"true", + "IsMandatory":"true", + "Section":"\"Attach/Hidden\"", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":false + }, + { + "name":"segmentId", + "description":"None", + "parameterType":"integer", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"true", + "IsSegmentId":"true", + "Section":"\"Hidden\"", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":false + }, + { + "name":"nveId", + "description":"None", + "parameterType":"integer", + "metaProperties":{ + "defaultValue":"1" + }, + "annotations":{ + "DisplayName":"NVE Identifier", + "IsNveId":"true", + "IsMandatory":"true", + "Section":"\"Attach/Hidden\"" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":false + }, + { + "name":"type", + "description":"None", + "parameterType":"string", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"Network Type", + "IsMandatory":"false", + "Section":"\"Hidden\"" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"vrfName", + "description":"None", + "parameterType":"string", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"false", + "Section":"\"Hidden\"", + "IsMsoManaged":"true", + "IsVrfName":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"gatewayIpV6Address", + "description":"None", + "parameterType":"string[]", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"IPv6 Gateway/Prefix List", + "Description":"example 2001:db8::1/64,2001:db9::1/64", + "IsMandatory":"false", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"isLayer2Only", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + "defaultValue":"false" + }, + "annotations":{ + "DisplayName":"Layer 2 Only", + "IsMandatory":"false", + "IsLayer2Only":"true", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"suppressArp", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"ARP Suppression", + "Description":"ARP suppression is only supported if SVI is present when Layer-2-Only is not enabled. NX-OS Specific", + "IsMandatory":"false", + "Section":"\"Advanced\"", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"enableIR", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + "defaultValue":"false" + }, + "annotations":{ + "ReadOnly":"true", + "Description":"Read-only per network, Fabric-wide setting", + "IsMandatory":"false", + "IsFabricInstance":"true", + "DisplayName":"Ingress Replication", + "IsIngressReplication":"true", + "Section":"\"Advanced\"" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"mcastGroup", + "description":"None", + "parameterType":"ipV4Address", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"Multicast Group Address", + "IsMultiCastGroupAddress":"true", + "IsMandatory":"false", + "Section":"\"Advanced\"", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"dhcpServerAddr1", + "description":"None", + "parameterType":"ipV4Address", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"\"vrfDhcp!=null\"", + "IsMsoManaged":"true", + "Section":"\"Hidden\"", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"vrfDhcp", + "description":"None", + "parameterType":"string", + "metaProperties":{ + "minLength":"0", + "maxLength":"32" + }, + "annotations":{ + "IsMandatory":"false", + "IsMsoManaged":"true", + "Section":"\"Hidden\"", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"dhcpServerAddr2", + "description":"None", + "parameterType":"ipV4Address", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"false", + "IsMsoManaged":"true", + "Section":"\"Hidden\"", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"vrfDhcp2", + "description":"None", + "parameterType":"string", + "metaProperties":{ + "minLength":"0", + "maxLength":"32" + }, + "annotations":{ + "IsMandatory":"\"dhcpServerAddr2!=null\"", + "IsMsoManaged":"true", + "Section":"\"Hidden\"", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"dhcpServerAddr3", + "description":"None", + "parameterType":"ipV4Address", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"false", + "IsMsoManaged":"true", + "Section":"\"Hidden\"", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"vrfDhcp3", + "description":"None", + "parameterType":"string", + "metaProperties":{ + "minLength":"0", + "maxLength":"32" + }, + "annotations":{ + "IsMandatory":"\"dhcpServerAddr3!=null\"", + "IsMsoManaged":"true", + "Section":"\"Hidden\"", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"dhcpServers", + "description":"None", + "parameterType":"structureArray", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"DHCP Relay Server Information (Max 16)", + "IsMandatory":"false", + "Section":"\"Advanced\"", + "IsMsoManaged":"true", + "IsFabricInstance":"true" + }, + "structureParameters":{ + "srvrAddr":{ + "name":"srvrAddr", + "description":"None", + "parameterType":"ipV4Address", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"true", + "DisplayName":"\"Server IP V4 Address\"" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":false + }, + "srvrVrf":{ + "name":"srvrVrf", + "description":"None", + "parameterType":"string", + "metaProperties":{ + "minLength":"0", + "maxLength":"32" + }, + "annotations":{ + "DisplayName":"\"Server VRF\"", + "Description":"\"If management vrf, enter \\'management\\'. If default/global vrf, enter \\'default\\'.\"", + "IsMandatory":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":false + } + }, + "parameterTypeStructure":true, + "defaultValue":"None", + "optional":true + }, + { + "name":"loopbackId", + "description":"None", + "parameterType":"integer", + "metaProperties":{ + "min":"0", + "max":"1023" + }, + "annotations":{ + "DisplayName":"Loopback ID for DHCP Relay interface (Min:0, Max:1023)", + "IsMandatory":"false", + "Section":"\"Advanced\"", + "IsMsoManaged":"true", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"vlanName", + "description":"None", + "parameterType":"string", + "metaProperties":{ + "regularExpr":"^[^\\?,\\\\,\\s]*$", + "minLength":"0", + "maxLength":"128" + }, + "annotations":{ + "DisplayName":"VLAN Name", + "Description":"If > 32 chars, enable 'system vlan long-name' for NX-OS, disable VTPv1 and VTPv2 or switch to VTPv3 for IOS XE", + "IsMandatory":"false", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"intfDescription", + "description":"None", + "parameterType":"string", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"Interface Description", + "IsMandatory":"false", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"networkName", + "description":"None", + "parameterType":"string", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"Network Name (should be hidden)", + "IsNetworkName":"true", + "IsMandatory":"false", + "Section":"\"Hidden\"" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"mtu", + "description":"None", + "parameterType":"integer", + "metaProperties":{ + "min":"68", + "max":"9216" + }, + "annotations":{ + "DisplayName":"MTU for L3 interface", + "Description":"68-9216. NX-OS Specific", + "IsMandatory":"false", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"tag", + "description":"None", + "parameterType":"long", + "metaProperties":{ + "min":"0", + "max":"4294967295", + "defaultValue":"12345" + }, + "annotations":{ + "DisplayName":"Routing Tag", + "Description":"0-4294967295. NX-OS Specific", + "IsMandatory":"false", + "Section":"\"Advanced\"", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"trmEnabled", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + + }, + "annotations":{ + "Description":"Enable Tenant Routed Multicast", + "IsMandatory":"false", + "IsFabricInstance":"true", + "DisplayName":"TRM Enable", + "Section":"\"Advanced\"", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"igmpVersion", + "description":"None", + "parameterType":"enum", + "metaProperties":{ + "defaultValue":"2", + "validValues":"1,2,3" + }, + "annotations":{ + "Description":"Version 2-3 for NX-OS, 1-3 for IOS XE. 'ip igmp version' command is generated if version is 1 or 3 (switch default is 2)", + "IsMandatory":"false", + "IsFabricInstance":"true", + "DisplayName":"IGMP Version", + "IsShow":"\"trmEnabled==true\"", + "Section":"\"Advanced\"", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"secondaryGW1", + "description":"None", + "parameterType":"ipV4AddressWithSubnet", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"IPv4 Secondary GW1", + "Description":"example 192.0.2.1/24", + "IsMandatory":"false", + "IsMsoManaged":"true", + "IsGateway":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"secondaryGW2", + "description":"None", + "parameterType":"ipV4AddressWithSubnet", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"IPv4 Secondary GW2", + "Description":"example 192.0.2.1/24", + "IsMandatory":"false", + "IsMsoManaged":"true", + "IsGateway":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"secondaryGW3", + "description":"None", + "parameterType":"ipV4AddressWithSubnet", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"IPv4 Secondary GW3", + "Description":"example 192.0.2.1/24", + "IsMandatory":"false", + "IsMsoManaged":"true", + "IsGateway":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"secondaryGW4", + "description":"None", + "parameterType":"ipV4AddressWithSubnet", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"IPv4 Secondary GW4", + "Description":"example 192.0.2.1/24", + "IsMandatory":"false", + "IsMsoManaged":"true", + "IsGateway":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"rtBothAuto", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + "defaultValue":"false" + }, + "annotations":{ + "DisplayName":"L2 VNI Route-Target Both Enable", + "Description":"NX-OS Specific", + "IsMandatory":"false", + "Section":"\"Advanced\"" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"ENABLE_NETFLOW", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + "defaultValue":"false" + }, + "annotations":{ + "DisplayName":"Enable Netflow", + "Description":"Netflow is supported only if it is enabled on fabric. For NX-OS only", + "IsMandatory":"false", + "Section":"\"Advanced\"", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"SVI_NETFLOW_MONITOR", + "description":"None", + "parameterType":"string", + "metaProperties":{ + + }, + "annotations":{ + "Description":"Applicable only if 'Layer 2 Only' is not enabled. Provide monitor name defined in fabric setting for Layer 3 Record. For NX-OS only", + "IsMandatory":"false", + "IsFabricInstance":"true", + "DisplayName":"Interface Vlan Netflow Monitor", + "IsShow":"\"ENABLE_NETFLOW==true\"", + "Section":"\"Advanced\"" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"VLAN_NETFLOW_MONITOR", + "description":"None", + "parameterType":"string", + "metaProperties":{ + + }, + "annotations":{ + "Description":"Provide monitor name defined in fabric setting for Layer 3 Record. For NX-OS only", + "IsMandatory":"false", + "IsFabricInstance":"true", + "DisplayName":"Vlan Netflow Monitor", + "IsShow":"\"ENABLE_NETFLOW==true\"", + "Section":"\"Advanced\"" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"isVPC", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + "defaultValue":"false" + }, + "annotations":{ + "IsHidden":"true", + "IsMandatory":"false", + "IsVPC":"true", + "IsInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"platformType", + "description":"None", + "parameterType":"enum", + "metaProperties":{ + "defaultValue":"Nexus", + "validValues":"Nexus,IOSXE" + }, + "annotations":{ + "IsMandatory":"false", + "IsHidden":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"anycastGMac", + "description":"None", + "parameterType":"macAddress", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"Anycast MAC Address Defined in fabric", + "IsHidden":"true", + "IsMandatory":"false" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"gen_address", + "description":"None", + "parameterType":"string", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"false", + "IsInternal":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"gen_mask", + "description":"None", + "parameterType":"string", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"false", + "IsInternal":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"flagSet", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + + }, + "annotations":{ + "IsInternal":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":false + } + ] + } + }, + + "resp_net_ext_template": + { + "RETURN_CODE": 200, + "MESSAGE": "OK", + "METHOD": "GET", + "REQUEST_PATH": "/appcenter/cisco/ndfc/api/v1/configtemplate/rest/config/templates/Default_Network_Extension_Universal", + "DATA":{ + "instanceClassId":3167, + "assignedInstanceClassId":0, + "instanceName":"com.cisco.dcbu.dcm.model.cfgtemplate.ConfigTemplate:name=Default_Network_Extension_Universal:type=true", + "name":"Default_Network_Extension_Universal", + "description":" Default Network Universal Template for Borders", + "userDefined":true, + "parameters":[ + { + "name":"gatewayIpAddress", + "description":"None", + "parameterType":"ipV4AddressWithSubnet", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"IPv4 Gateway/NetMask", + "Description":"example 192.0.2.1/24", + "IsMandatory":"false", + "IsMsoManaged":"true", + "IsGateway":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"vlanId", + "description":"None", + "parameterType":"integer", + "metaProperties":{ + + }, + "annotations":{ + "IsVlanId":"true", + "IsMandatory":"true", + "Section":"\"Attach/Hidden\"", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":false + }, + { + "name":"segmentId", + "description":"None", + "parameterType":"integer", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"true", + "IsSegmentId":"true", + "Section":"\"Hidden\"", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":false + }, + { + "name":"nveId", + "description":"None", + "parameterType":"integer", + "metaProperties":{ + "defaultValue":"1" + }, + "annotations":{ + "DisplayName":"NVE Identifier", + "IsNveId":"true", + "IsMandatory":"true", + "Section":"\"Attach/Hidden\"" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":false + }, + { + "name":"type", + "description":"None", + "parameterType":"string", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"Network Type", + "IsMandatory":"false", + "Section":"\"Hidden\"" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"vrfName", + "description":"None", + "parameterType":"string", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"false", + "Section":"\"Hidden\"", + "IsMsoManaged":"true", + "IsVrfName":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"gatewayIpV6Address", + "description":"None", + "parameterType":"string[]", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"IPv6 Gateway/Prefix List", + "Description":"example 2001:db8::1/64,2001:db9::1/64", + "IsMandatory":"false", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"isLayer2Only", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + "defaultValue":"false" + }, + "annotations":{ + "DisplayName":"Layer 2 Only", + "IsMandatory":"false", + "IsLayer2Only":"true", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"suppressArp", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"ARP Suppression", + "Description":"ARP suppression is only supported if SVI is present when Layer-2-Only is not enabled. NX-OS Specific", + "IsMandatory":"false", + "Section":"\"Advanced\"", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"enableIR", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + "defaultValue":"false" + }, + "annotations":{ + "ReadOnly":"true", + "Description":"Read-only per network, Fabric-wide setting", + "IsMandatory":"false", + "IsFabricInstance":"true", + "DisplayName":"Ingress Replication", + "IsIngressReplication":"true", + "Section":"\"Advanced\"" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"enableL3OnBorder", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + "defaultValue":"false" + }, + "annotations":{ + "IsMandatory":"false", + "IsEnableL3OnBorder":"true", + "IsFabricInstance":"true", + "DisplayName":"Enable L3 Gateway on Border", + "Section":"\"Advanced\"", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"mcastGroup", + "description":"None", + "parameterType":"ipV4Address", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"Multicast Group Address", + "IsMultiCastGroupAddress":"true", + "IsMandatory":"false", + "Section":"\"Advanced\"", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"dhcpServerAddr1", + "description":"None", + "parameterType":"ipV4Address", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"\"vrfDhcp!=null\"", + "IsMsoManaged":"true", + "Section":"\"Hidden\"", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"vrfDhcp", + "description":"None", + "parameterType":"string", + "metaProperties":{ + "minLength":"0", + "maxLength":"32" + }, + "annotations":{ + "IsMandatory":"false", + "IsMsoManaged":"true", + "Section":"\"Hidden\"", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"dhcpServerAddr2", + "description":"None", + "parameterType":"ipV4Address", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"false", + "IsMsoManaged":"true", + "Section":"\"Hidden\"", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"vrfDhcp2", + "description":"None", + "parameterType":"string", + "metaProperties":{ + "minLength":"0", + "maxLength":"32" + }, + "annotations":{ + "IsMandatory":"\"dhcpServerAddr2!=null\"", + "IsMsoManaged":"true", + "Section":"\"Hidden\"", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"dhcpServerAddr3", + "description":"None", + "parameterType":"ipV4Address", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"false", + "IsMsoManaged":"true", + "Section":"\"Hidden\"", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"vrfDhcp3", + "description":"None", + "parameterType":"string", + "metaProperties":{ + "minLength":"0", + "maxLength":"32" + }, + "annotations":{ + "IsMandatory":"\"dhcpServerAddr3!=null\"", + "IsMsoManaged":"true", + "Section":"\"Hidden\"", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"dhcpServers", + "description":"None", + "parameterType":"structureArray", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"DHCP Relay Server Information (Max 16)", + "IsMandatory":"false", + "Section":"\"Advanced\"", + "IsMsoManaged":"true", + "IsFabricInstance":"true" + }, + "structureParameters":{ + "srvrAddr":{ + "name":"srvrAddr", + "description":"None", + "parameterType":"ipV4Address", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"true", + "DisplayName":"\"Server IP V4 Address\"" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":false + }, + "srvrVrf":{ + "name":"srvrVrf", + "description":"None", + "parameterType":"string", + "metaProperties":{ + "minLength":"0", + "maxLength":"32" + }, + "annotations":{ + "DisplayName":"\"Server VRF\"", + "Description":"\"If management vrf, enter \\'management\\'. If default/global vrf, enter \\'default\\'.\"", + "IsMandatory":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":false + } + }, + "parameterTypeStructure":true, + "defaultValue":"None", + "optional":true + }, + { + "name":"loopbackId", + "description":"None", + "parameterType":"integer", + "metaProperties":{ + "min":"0", + "max":"1023" + }, + "annotations":{ + "DisplayName":"Loopback ID for DHCP Relay interface (Min:0, Max:1023)", + "IsMandatory":"false", + "Section":"\"Advanced\"", + "IsMsoManaged":"true", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"vlanName", + "description":"None", + "parameterType":"string", + "metaProperties":{ + "regularExpr":"^[^\\?,\\\\,\\s]*$", + "minLength":"0", + "maxLength":"128" + }, + "annotations":{ + "DisplayName":"VLAN Name", + "Description":"If > 32 chars, enable 'system vlan long-name' for NX-OS, disable VTPv1 and VTPv2 or switch to VTPv3 for IOS XE", + "IsMandatory":"false", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"intfDescription", + "description":"None", + "parameterType":"string", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"Interface Description", + "IsMandatory":"false", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"networkName", + "description":"None", + "parameterType":"string", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"Network Name (should be hidden)", + "IsNetworkName":"true", + "IsMandatory":"false", + "Section":"\"Hidden\"" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"mtu", + "description":"None", + "parameterType":"integer", + "metaProperties":{ + "min":"68", + "max":"9216" + }, + "annotations":{ + "DisplayName":"MTU for L3 interface", + "Description":"68-9216. NX-OS Specific", + "IsMandatory":"false", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"tag", + "description":"None", + "parameterType":"long", + "metaProperties":{ + "min":"0", + "max":"4294967295", + "defaultValue":"12345" + }, + "annotations":{ + "DisplayName":"Routing Tag", + "Description":"0-4294967295. NX-OS Specific", + "IsMandatory":"false", + "Section":"\"Advanced\"", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"trmEnabled", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + + }, + "annotations":{ + "Description":"Enable Tenant Routed Multicast", + "IsMandatory":"false", + "IsFabricInstance":"true", + "DisplayName":"TRM Enable", + "Section":"\"Advanced\"", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"igmpVersion", + "description":"None", + "parameterType":"enum", + "metaProperties":{ + "defaultValue":"2", + "validValues":"1,2,3" + }, + "annotations":{ + "Description":"Version 2-3 for NX-OS, 1-3 for IOS XE. 'ip igmp version' command is generated if version is 1 or 3 (switch default is 2)", + "IsMandatory":"false", + "IsFabricInstance":"true", + "DisplayName":"IGMP Version", + "IsShow":"\"trmEnabled==true\"", + "Section":"\"Advanced\"", + "IsMsoManaged":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"secondaryGW1", + "description":"None", + "parameterType":"ipV4AddressWithSubnet", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"IPv4 Secondary GW1", + "Description":"example 192.0.2.1/24", + "IsMandatory":"false", + "IsMsoManaged":"true", + "IsGateway":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"secondaryGW2", + "description":"None", + "parameterType":"ipV4AddressWithSubnet", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"IPv4 Secondary GW2", + "Description":"example 192.0.2.1/24", + "IsMandatory":"false", + "IsMsoManaged":"true", + "IsGateway":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"secondaryGW3", + "description":"None", + "parameterType":"ipV4AddressWithSubnet", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"IPv4 Secondary GW3", + "Description":"example 192.0.2.1/24", + "IsMandatory":"false", + "IsMsoManaged":"true", + "IsGateway":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"secondaryGW4", + "description":"None", + "parameterType":"ipV4AddressWithSubnet", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"IPv4 Secondary GW4", + "Description":"example 192.0.2.1/24", + "IsMandatory":"false", + "IsMsoManaged":"true", + "IsGateway":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"rtBothAuto", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + "defaultValue":"false" + }, + "annotations":{ + "DisplayName":"L2 VNI Route-Target Both Enable", + "Description":"NX-OS Specific", + "IsMandatory":"false", + "Section":"\"Advanced\"" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"ENABLE_NETFLOW", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + "defaultValue":"false" + }, + "annotations":{ + "DisplayName":"Enable Netflow", + "Description":"Netflow is supported only if it is enabled on fabric. For NX-OS only", + "IsMandatory":"false", + "Section":"\"Advanced\"", + "IsFabricInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"SVI_NETFLOW_MONITOR", + "description":"None", + "parameterType":"string", + "metaProperties":{ + + }, + "annotations":{ + "Description":"Applicable only if 'Layer 2 Only' is not enabled. Provide monitor name defined in fabric setting for Layer 3 Record. For NX-OS only", + "IsMandatory":"false", + "IsFabricInstance":"true", + "DisplayName":"Interface Vlan Netflow Monitor", + "IsShow":"\"ENABLE_NETFLOW==true\"", + "Section":"\"Advanced\"" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"VLAN_NETFLOW_MONITOR", + "description":"None", + "parameterType":"string", + "metaProperties":{ + + }, + "annotations":{ + "Description":"Provide monitor name defined in fabric setting for Layer 3 Record. For NX-OS only", + "IsMandatory":"false", + "IsFabricInstance":"true", + "DisplayName":"Vlan Netflow Monitor", + "IsShow":"\"ENABLE_NETFLOW==true\"", + "Section":"\"Advanced\"" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"MULTISITE_CONN", + "description":"None", + "parameterType":"structureArray", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"L2 Extension Information", + "Description":"L2 Extension Information", + "IsMandatory":"false", + "Section":"\"MULTISITE\"" + }, + "structureParameters":{ + "IF_NAME":{ + "name":"IF_NAME", + "description":"None", + "parameterType":"interface", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"true", + "IsIfName":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":false + } + }, + "parameterTypeStructure":true, + "defaultValue":"None", + "optional":true + }, + { + "name":"isVPC", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + "defaultValue":"false" + }, + "annotations":{ + "IsHidden":"true", + "IsMandatory":"false", + "IsVPC":"true", + "IsInstance":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"platformType", + "description":"None", + "parameterType":"enum", + "metaProperties":{ + "defaultValue":"Nexus", + "validValues":"Nexus,IOSXE" + }, + "annotations":{ + "IsMandatory":"false", + "IsHidden":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"anycastGMac", + "description":"None", + "parameterType":"macAddress", + "metaProperties":{ + + }, + "annotations":{ + "DisplayName":"Anycast MAC Address Defined in fabric", + "IsHidden":"true", + "IsMandatory":"false" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"gen_address", + "description":"None", + "parameterType":"string", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"false", + "IsInternal":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"gen_mask", + "description":"None", + "parameterType":"string", + "metaProperties":{ + + }, + "annotations":{ + "IsMandatory":"false", + "IsInternal":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":true + }, + { + "name":"flagSet", + "description":"None", + "parameterType":"boolean", + "metaProperties":{ + + }, + "annotations":{ + "IsInternal":"true" + }, + "structureParameters":{ + + }, + "parameterTypeStructure":false, + "defaultValue":"None", + "optional":false + } + ] + } + }, + + "networkv2_net_objects_00001": + { + "RETURN_CODE": 200, + "METHOD": "GET", + "REQUEST_PATH": "https://appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/fabrics/test_netv2/networks", + "MESSAGE": "OK", + "DATA": [ + { + "id": 748, + "fabric": "test_netv2", + "networkName": "net1", + "displayName": "net1", + "networkId": 31001, + "networkTemplate": "Default_Network_Universal", + "networkExtensionTemplate": "Default_Network_Extension_Universal", + "vrf": "Tenant-1", + "tenantName": null, + "serviceNetworkTemplate": null, + "source": null, + "interfaceGroups": null, + "primaryNetworkId": -1, + "type": "Normal", + "primaryNetworkName": null, + "vlanId": null, + "networkStatus": "DEPLOYED", + "hierarchicalKey": "test_netv2", + "networkTemplateConfig": { + "suppressArp": "", + "secondaryGW3": "", + "secondaryGW2": "", + "loopbackId": "10", + "secondaryGW1": "3.1.1.1/24", + "enableL3OnBorder": "false", + "type": "Normal", + "SVI_NETFLOW_MONITOR": "", + "enableIR": "false", + "rtBothAuto": "false", + "isLayer2Only": "false", + "MULTISITE_CONN": "", + "ENABLE_NETFLOW": "false", + "dhcpServerAddr3": "", + "gatewayIpV6Address": "", + "dhcpServerAddr2": "", + "tag": "12345", + "dhcpServerAddr1": "1.1.1.1", + "nveId": "1", + "vrfDhcp": "vrf1", + "secondaryGW4": "", + "vlanId": "1001", + "gatewayIpAddress": "2.1.1.1/24", + "vlanName": "", + "mtu": "1800", + "intfDescription": "test_interface", + "mcastGroup": "239.11.11.1", + "igmpVersion": "2", + "trmEnabled": "", + "VLAN_NETFLOW_MONITOR": "", + "dhcpServers": { + "dhcpServers": [ + { + "srvrAddr": "1.1.1.1", + "srvrVrf": "vrf1" + } + ] + } + } + } + ] + }, + + "networkv2_net_attach_objects_00001": + { + "RETURN_CODE": 200, + "METHOD": "GET", + "REQUEST_PATH": "https://appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/fabrics/test_netv2/networks/attachments?network-names=net1", + "MESSAGE": "OK", + "DATA": [ + { + "networkName": "net1", + "lanAttachList": [ + { + "networkName": "net1", + "displayName": "net1", + "switchName": "leaf1", + "switchRole": "leaf", + "fabricName": "test_netv2", + "lanAttachState": "DEPLOYED", + "isLanAttached": true, + "portNames": "Ethernet1/10,Ethernet1/11", + "switchSerialNo": "9SFRKD0M6AS", + "peerSerialNo": null, + "switchDbId": 86340, + "ipAddress": "192.168.2.1", + "networkId": 31001, + "vlanId": 1001, + "instanceValues": "{\"isVPC\":\"false\"}", + "entityName": "net1", + "interfaceGroups": null + }, + { + "networkName": "net1", + "displayName": "net1", + "switchName": "leaf2", + "switchRole": "leaf", + "fabricName": "test_netv2", + "lanAttachState": "NA", + "isLanAttached": false, + "portNames": null, + "switchSerialNo": "9KRDG57QQZT", + "peerSerialNo": null, + "switchDbId": 86290, + "ipAddress": "192.168.2.2", + "networkId": 31001, + "vlanId": null, + "instanceValues": null, + "entityName": null, + "interfaceGroups": null + } + ] + } + ] + }, + + "networkv2_net_objects_00002": + { + "RETURN_CODE": 200, + "METHOD": "GET", + "REQUEST_PATH": "https://appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/fabrics/test_netv2/networks", + "MESSAGE": "OK", + "DATA": + { + "id": 748, + "fabric": "test_netv2", + "networkName": "net1", + "displayName": "net1", + "networkId": 31001, + "networkTemplate": "Default_Network_Universal", + "networkExtensionTemplate": "Default_Network_Extension_Universal", + "vrf": "Tenant-1", + "tenantName": null, + "serviceNetworkTemplate": null, + "source": null, + "interfaceGroups": null, + "primaryNetworkId": -1, + "type": "Normal", + "primaryNetworkName": null, + "vlanId": null, + "networkStatus": "DEPLOYED", + "hierarchicalKey": "test_netv2", + "networkTemplateConfig": { + "suppressArp": "", + "secondaryGW3": "", + "secondaryGW2": "", + "loopbackId": "10", + "secondaryGW1": "3.1.1.1/24", + "enableL3OnBorder": "false", + "type": "Normal", + "SVI_NETFLOW_MONITOR": "", + "enableIR": "false", + "rtBothAuto": "false", + "isLayer2Only": "false", + "MULTISITE_CONN": "", + "ENABLE_NETFLOW": "false", + "dhcpServerAddr3": "", + "gatewayIpV6Address": "", + "dhcpServerAddr2": "", + "tag": "12345", + "dhcpServerAddr1": "1.1.1.1", + "nveId": "1", + "vrfDhcp": "vrf1", + "secondaryGW4": "", + "vlanId": "1001", + "gatewayIpAddress": "2.1.1.1/24", + "vlanName": "", + "mtu": "1800", + "intfDescription": "test_interface", + "mcastGroup": "239.11.11.1", + "igmpVersion": "2", + "trmEnabled": "", + "VLAN_NETFLOW_MONITOR": "", + "dhcpServers": { + "dhcpServers": [ + { + "srvrAddr": "1.1.1.1", + "srvrVrf": "vrf1" + } + ] + } + } + } + }, + + "get_response_00001": + { + "MESSAGE": "OK", + "METHOD": "PUT", + "REQUEST_PATH": "https:/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/fabrics/test_netv2/networks", + "RETURN_CODE": 200 + }, + + "get_response_00002": + { + "DATA": "Invalid JSON", + "MESSAGE": "ERROR", + "METHOD": "PUT", + "REQUEST_PATH": "https:/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/fabrics/test_netv2/networks", + "RETURN_CODE": 400 + }, + + "attach_response_00001": { + "RETURN_CODE": 200, + "METHOD": "POST", + "REQUEST_PATH": "https:/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/fabrics/test_poap/networks/attachments", + "MESSAGE": "OK", + "DATA": { + "net1-[9SFRKD0M6AS/leaf1]": "SUCCESS" + } + }, + + "get_response_00003": + { + "MESSAGE": "OK", + "METHOD": "DELETE", + "REQUEST_PATH": "https:/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/fabrics/test_netv2/networks", + "RETURN_CODE": 200 + }, + + "get_response_00004": + { + "DATA": "Invalid JSON", + "MESSAGE": "ERROR", + "METHOD": "DELETE", + "REQUEST_PATH": "https:/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/fabrics/test_netv2/networks", + "RETURN_CODE": 400 + }, + + "get_response_00005": + { + "MESSAGE": "OK", + "METHOD": "POST", + "REQUEST_PATH": "https:/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/fabrics/test_netv2/networks", + "RETURN_CODE": 200 + }, + + "networkv2_net_objects_00003": + { + "RETURN_CODE": 200, + "METHOD": "GET", + "REQUEST_PATH": "https://appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/fabrics/test_netv2/networks", + "MESSAGE": "OK", + "DATA": [] + }, + + "networkv2_net_attach_objects_00002": + { + "RETURN_CODE": 200, + "METHOD": "GET", + "REQUEST_PATH": "https://appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/fabrics/test_netv2/networks/attachments?network-names=net1", + "MESSAGE": "OK", + "DATA": [] + }, + + "networkv2_net_attach_deploy_00001": + { + "DATA": { + "status": "Deployment of network(s) has been initiated successfully" + }, + "MESSAGE": "OK", + "METHOD": "POST", + "REQUEST_PATH": "https://appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/v2/networks/deploy", + "RETURN_CODE": 200 + } +} diff --git a/tests/unit/modules/dcnm/test_dcnm_networkv2.py b/tests/unit/modules/dcnm/test_dcnm_networkv2.py new file mode 100644 index 000000000..2e59bef4d --- /dev/null +++ b/tests/unit/modules/dcnm/test_dcnm_networkv2.py @@ -0,0 +1,1661 @@ +# Copyright (c) 2024 Cisco and/or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# See the following regarding *_fixture imports +# https://pylint.pycqa.org/en/latest/user_guide/messages/warning/redefined-outer-name.html +# Due to the above, we also need to disable unused-import +# pylint: disable=unused-import +# Some fixtures need to use *args to match the signature of the function they are mocking +# pylint: disable=unused-argument +# Some tests require calling protected methods +# pylint: disable=protected-access + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +__copyright__ = "Copyright (c) 2024 Cisco and/or its affiliates." +__author__ = "Praveen Ramoorthy" + +from unittest.mock import patch +from _pytest.monkeypatch import MonkeyPatch + +from .dcnm_module import TestDcnmModule, set_module_args, loadPlaybookData + +# from typing import Any, Dict + +import os +import copy +import json +import pytest + +from ansible_collections.cisco.dcnm.plugins.module_utils.network.dcnm import dcnm +from ansible_collections.cisco.dcnm.plugins.modules import dcnm_networkv2 +from ansible_collections.cisco.dcnm.plugins.modules.dcnm_networkv2 import DcnmNetworkv2 + +# Importing Fixtures +from .fixtures.dcnm_networkv2.dcnm_networkv2_common import dcnm_networkv2_fixture + +from unittest.mock import Mock + +import datetime +import inspect + + +def log(msg): + with open('netv2.log', 'a') as of: + callerframerecord = inspect.stack()[1] + frame = callerframerecord[0] + info = inspect.getframeinfo(frame) + d = datetime.datetime.now().replace(microsecond=0).isoformat() + of.write("---- %s ---- %s@%s ---- %s \n" % (d, info.lineno, info.function, msg)) + + +# Fixtures path +fixture_path = os.path.join(os.path.dirname(__file__), "fixtures") +module_data_path = fixture_path + "/dcnm_networkv2/" + + +# UNIT TEST CASES +def load_data(module_name): + path = os.path.join(module_data_path, "{0}.json".format(module_name)) + + with open(path) as f: + data = f.read() + + try: + j_data = json.loads(data) + except Exception as e: + pass + + return j_data + + +def test_dcnm_networkv2_log_msg(monkeypatch, dcnm_networkv2_fixture): + + # Logging test + networkv2 = dcnm_networkv2_fixture + networkv2.log("This is a test message to test logging function\n") + + try: + os.remove("netv2.log") + except Exception as e: + print(str(e)) + + +@pytest.mark.parametrize( + "tc_id, filename, wkey, hkey, cfg", + [(1, "dcnm_networkv2_data", "networkv2_want", "networkv2_have_00001", "networkv2_cfg_00001")], +) +def test_dcnm_networkv2_00001( + tc_id, monkeypatch, dcnm_networkv2_fixture, filename, wkey, hkey, cfg +): + + # Testing Function dcnm_update_network_information() + + networkv2 = dcnm_networkv2_fixture + + data = load_data(filename) + want = data.get(wkey) + have = data.get(hkey) + conf = data.get(cfg) + + networkv2.dcnm_update_network_information(want, have, conf) + assert want["networkTemplateConfig"]["mtu"] == '1800' + + +@pytest.mark.parametrize( + "tc_id, filename, wkey, hkey, hkey1, cfg", + [(1, "dcnm_networkv2_data", "networkv2_want", "networkv2_have_00001", "networkv2_have_00002", "networkv2_cfg_00001"), + (2, "dcnm_networkv2_data", "networkv2_want", "networkv2_have_00001", "networkv2_have_00002", "networkv2_cfg_00001"), + (3, "dcnm_networkv2_data", "networkv2_want", "networkv2_have_00001", "networkv2_have_00002", "networkv2_cfg_00001"), + (4, "dcnm_networkv2_data", "networkv2_want", "networkv2_have_00001", "networkv2_have_00002", "networkv2_cfg_00001")], +) +def test_dcnm_networkv2_00002( + tc_id, monkeypatch, dcnm_networkv2_fixture, filename, wkey, hkey, hkey1, cfg +): + + # Testing Function update_want() + + networkv2 = dcnm_networkv2_fixture + + data = load_data(filename) + want = data.get(wkey) + have = data.get(hkey) + have1 = data.get(hkey1) + conf = data.get(cfg) + + if tc_id == 1: + networkv2.want_create = [] + elif tc_id == 2: + networkv2.want_create.append(want) + networkv2.have_create.append(have1) + networkv2.have_create.append(have) + networkv2.config.append(conf) + elif tc_id == 3: + networkv2.want_create.append(want) + networkv2.have_create.append(have1) + networkv2.config.append(conf) + elif tc_id == 4: + networkv2.want_create.append(want) + networkv2.have_create.append(have) + conf1 = copy.deepcopy(conf) + conf1["net_name"] = "netv2" + networkv2.config.append(conf1) + + networkv2.update_want() + if tc_id == 2: + assert networkv2.want_create[0]["networkTemplateConfig"]["mtu"] == '1800' + elif tc_id == 3: + assert networkv2.want_create[0]["networkTemplateConfig"]["mtu"] == '1500' + elif tc_id == 4: + assert networkv2.want_create[0]["networkTemplateConfig"]["mtu"] == '1500' + + +@pytest.mark.parametrize("tc_id", [(1), (2)]) +def test_dcnm_networkv2_00003(tc_id, monkeypatch, dcnm_networkv2_fixture): + + # Testing Function update_module_info() + + networkv2 = dcnm_networkv2_fixture + + resp = load_data("dcnm_networkv2_response") + + networkv2.fabric = "test_netv2" + + dcnm_version_supported_side_effect = [] + get_fabric_inventory_details_side_effect = [] + get_fabric_details_side_effect = [] + + if tc_id == 1: + dcnm_version_supported_side_effect.append(12) + elif tc_id == 2: + dcnm_version_supported_side_effect.append(11) + + get_fabric_inventory_details_side_effect.append( + resp.get("networkv2_inv_details") + ) + get_fabric_details_side_effect.append( + resp.get("networkv2_fab_details") + ) + + mock_dcnm_version_supported = Mock( + side_effect=dcnm_version_supported_side_effect + ) + monkeypatch.setattr( + dcnm_networkv2, "dcnm_version_supported", mock_dcnm_version_supported + ) + + mock_get_fabric_inventory_details = Mock( + side_effect=get_fabric_inventory_details_side_effect + ) + monkeypatch.setattr( + dcnm_networkv2, + "get_fabric_inventory_details", + mock_get_fabric_inventory_details, + ) + + mock_get_fabric_details = Mock(side_effect=get_fabric_details_side_effect) + monkeypatch.setattr( + dcnm_networkv2, "get_fabric_details", mock_get_fabric_details + ) + + try: + networkv2.update_module_info() + except Exception as e: + assert "dcnm_networkv2 module is only supported on NDFC. It is not support on DCN" in str(e) + + if tc_id == 1: + assert networkv2.inventory_data != [] + assert networkv2.fabric_det != [] + assert networkv2.dcnm_version == 12 + assert networkv2.paths != {} + + +@pytest.mark.parametrize( + "tc_id, filename, create, create_update, attach, detach, deploy, undeploy", + [(1, "dcnm_networkv2_data", "networkv2_diff_create_00001", "networkv2_diff_create_update_00001", + "networkv2_diff_attach_00001", "networkv2_diff_detach_00001", "networkv2_diff_deploy_00001", + "networkv2_diff_undeploy_00001"), + (2, "dcnm_networkv2_data", "networkv2_diff_create_00001", "networkv2_diff_create_update_00001", + "networkv2_diff_attach_00002", "networkv2_diff_detach_00001", "networkv2_diff_deploy_00001", + "networkv2_diff_undeploy_00001")], +) +def test_dcnm_networkv2_00004( + tc_id, monkeypatch, dcnm_networkv2_fixture, filename, create, create_update, + attach, detach, deploy, undeploy +): + + # Testing Function format_diff() + + networkv2 = dcnm_networkv2_fixture + + data = load_data(filename) + networkv2.diff_create = data.get(create) + networkv2.diff_create_update = data.get(create_update) + if tc_id == 1: + networkv2.diff_attach = data.get(attach) + elif tc_id == 2: + networkv2.diff_attach = [] + networkv2.diff_detach = data.get(detach) + networkv2.diff_deploy = data.get(deploy) + networkv2.diff_undeploy = data.get(undeploy) + + networkv2.format_diff() + assert networkv2.diff_input_format != [] + assert networkv2.diff_input_format[0]["net_name"] == "net1" + assert networkv2.diff_input_format[0]["networkTemplateConfig"] != {} + assert networkv2.diff_input_format[0]["networkTemplateConfig"]["mtu"] == 1800 + assert networkv2.diff_input_format[0]["networkTemplateConfig"]["vlanId"] == 1001 + assert networkv2.diff_input_format[0]["networkTemplateConfig"]["secondaryGW1"] == '3.1.1.1/24' + if tc_id == 1: + assert networkv2.diff_input_format[0]["attach"] != [] + assert networkv2.diff_input_format[0]["attach"][0]["ipAddress"] == "192.168.2.1" + assert networkv2.diff_input_format[0]["attach"][0]["deploy"] is True + elif tc_id == 2: + assert networkv2.diff_input_format[0]["attach"] == [] + + +@pytest.mark.parametrize( + "tc_id, cfg", + [(1, "networkv2_cfg_00001"), + (2, "networkv2_cfg_00001"), + (3, "networkv2_cfg_00001"), + (4, "networkv2_cfg_00002"), + (5, "networkv2_cfg_00002"), + (6, "networkv2_cfg_00002")], +) +def test_dcnm_networkv2_00005( + tc_id, + monkeypatch, + dcnm_networkv2_fixture, + cfg, +): + + # Testing Function validate_input() + + networkv2 = dcnm_networkv2_fixture + + data = load_data("dcnm_networkv2_data") + resp = load_data("dcnm_networkv2_response") + config = data.get(cfg) + + networkv2.paths = DcnmNetworkv2.dcnm_network_paths[12] + + if tc_id == 2: + conf = copy.deepcopy(config) + conf.pop("net_name") + elif tc_id == 5: + conf = copy.deepcopy(config) + conf["network_template_config"]["attach"][0]["switchPorts"] = [] + conf["network_template_config"]["attach"][0]["torPorts"] = [] + else: + conf = config + networkv2.config.append(conf) + + networkv2.fabric = "test_netv2" + + if tc_id == 1: + networkv2.params["state"] = "deleted" + else: + networkv2.params["state"] = "merged" + + if tc_id == 3 or tc_id == 4 or tc_id == 5: + dcnm_send_side_effect = [] + dcnm_send_side_effect.append(resp.get("resp_net_template")) + dcnm_send_side_effect.append(resp.get("resp_net_ext_template")) + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + monkeypatch.setattr(dcnm, "dcnm_send", mock_dcnm_send) + + if tc_id == 6: + networkv2.dyn_arg_spec = data.get("dyn_arg_spec") + + try: + networkv2.validate_input() + except Exception as e: + if tc_id == 2: + assert "Invalid parameters in playbook:" in str(e) + + if tc_id != 2: + assert len(networkv2.validated) == 1 + + +@pytest.mark.parametrize( + "tc_id, validated_cfg, cfg", + [(1, "networkv2_validated_00001", "networkv2_cfg_00002"), + (2, "networkv2_validated_00001", "networkv2_cfg_00002"), + (3, "networkv2_validated_00001", "networkv2_cfg_00002"), + (4, "networkv2_validated_00001", "networkv2_cfg_00002"), + (5, "networkv2_validated_00001", "networkv2_cfg_00002"), + (6, "networkv2_validated_00001", "networkv2_cfg_00002"), + (7, "networkv2_validated_00001", "networkv2_cfg_00002"), + (8, "networkv2_validated_00001", "networkv2_cfg_00002")], +) +def test_dcnm_networkv2_00006( + tc_id, + monkeypatch, + dcnm_networkv2_fixture, + validated_cfg, + cfg +): + + # Testing Function get_want() + + networkv2 = dcnm_networkv2_fixture + + data = load_data("dcnm_networkv2_data") + validated_config = data.get(validated_cfg) + config = data.get(cfg) + networkv2.ip_sn = data.get("networkv2_ip_sn") + have_create = data.get("networkv2_have_create_00001") + have_attach = data.get("networkv2_have_attach_00001") + + if tc_id != 2: + networkv2.config.append(config) + + networkv2.validated = validated_config + if tc_id == 1: + networkv2.params["state"] = "deleted" + else: + networkv2.params["state"] = "merged" + + if tc_id == 3: + networkv2.have_create = have_create + networkv2.have_attach = have_attach + elif tc_id == 4: + networkv2.have_create = have_create + networkv2.have_attach = have_attach + validated = copy.deepcopy(validated_config) + validated[0]["attach"][0]["switchPorts"].remove("Ethernet1/12") + validated[0]["attach"][0]["torPorts"][0]["ports"].remove("Ethernet1/12") + networkv2.validated = validated + elif tc_id == 5: + networkv2.have_create = have_create + networkv2.have_attach = have_attach + validated = copy.deepcopy(validated_config) + validated[0]["attach"][0]["switchPorts"] = [] + validated[0]["attach"][0]["torPorts"] = [] + networkv2.validated = validated + elif tc_id == 6: + networkv2.have_create = have_create + attach = copy.deepcopy(have_attach) + attach[0]["lanAttachList"][0]["switchPorts"] = [] + attach[0]["lanAttachList"][0]["torPorts"] = [] + networkv2.have_attach = attach + validated = copy.deepcopy(validated_config) + validated[0]["attach"][0]["switchPorts"] = [] + validated[0]["attach"][0]["torPorts"] = [] + networkv2.validated = validated + elif tc_id == 7: + networkv2.have_create = have_create + networkv2.have_attach = have_attach + validated = copy.deepcopy(validated_config) + validated[0]["attach"][0]["detachSwitchPorts"].append("Ethernet1/12") + validated[0]["attach"][0]["attached"] = False + networkv2.validated = validated + elif tc_id == 8: + networkv2.have_create = have_create + networkv2.have_attach = have_attach + validated = copy.deepcopy(validated_config) + validated[0]["attach"][0]["ipAddress"] = "192.168.2.4" + networkv2.validated = validated + + try: + networkv2.get_want() + except Exception as e: + if tc_id == 8: + assert "does not have the switch" in str(e) + + if tc_id != 8 and tc_id != 1 and tc_id != 2: + assert len(networkv2.want_create) == 1 + assert len(networkv2.want_attach) == 1 + assert networkv2.want_deploy != {} + + +@pytest.mark.parametrize( + "tc_id, get_network, get_netattach", + [(1, "networkv2_net_objects_00001", "networkv2_net_attach_objects_00001"), + (2, "networkv2_net_objects_00001", "networkv2_net_attach_objects_00001"), + (3, "networkv2_net_objects_00001", "networkv2_net_attach_objects_00001"), + (4, "networkv2_net_objects_00001", "networkv2_net_attach_objects_00001"), + (5, "networkv2_net_objects_00001", "networkv2_net_attach_objects_00001"), + (6, "networkv2_net_objects_00001", "networkv2_net_attach_objects_00001"), + (7, "networkv2_net_objects_00001", "networkv2_net_attach_objects_00001")], +) +def test_dcnm_networkv2_00007( + tc_id, + monkeypatch, + dcnm_networkv2_fixture, + get_network, + get_netattach, +): + + # Testing Function get_have() + + networkv2 = dcnm_networkv2_fixture + + resp = load_data("dcnm_networkv2_response") + get_net = copy.deepcopy(resp.get(get_network)) + get_attach = copy.deepcopy(resp.get(get_netattach)) + + networkv2.paths = DcnmNetworkv2.dcnm_network_paths[12] + networkv2.fabric = "test_netv2" + networkv2.params["state"] = "merged" + + if tc_id == 4: + get_net["DATA"] = [] + elif tc_id == 5: + del get_net["DATA"] + del get_net["MESSAGE"] + get_net.update({"ERROR": "Not Found"}) + get_net["RETURN_CODE"] = 404 + elif tc_id == 6: + del get_net["DATA"] + get_net["RETURN_CODE"] = 400 + get_net["MESSAGE"] = "Bad Request" + elif tc_id == 7: + networkv2.params["state"] = "deleted" + get_attach["DATA"][0]["lanAttachList"][0]["lanAttachState"] = "PENDING" + + dcnm_send_side_effect = [] + dcnm_send_side_effect.append(get_net) + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + monkeypatch.setattr(dcnm_networkv2, "dcnm_send", mock_dcnm_send) + + if tc_id == 2: + get_attach["DATA"][0]["lanAttachList"][0]["portNames"] = "leaf1(Ethernet1/10,Ethernet1/11) tor(Ethernet1/10,Ethernet1/11)" + elif tc_id == 3: + get_attach["DATA"] = [] + + dcnm_get_url_side_effect = [] + dcnm_get_url_side_effect.append(get_attach) + mock_dcnm_get_url = Mock(side_effect=dcnm_get_url_side_effect) + monkeypatch.setattr(dcnm_networkv2, "dcnm_get_url", mock_dcnm_get_url) + + try: + networkv2.get_have() + except Exception as e: + if tc_id == 5: + assert "Fabric test_netv2 not present on NDFC" in str(e) + elif tc_id == 56: + assert "Unable to find Networks under fabric" in str(e) + + if tc_id == 1 or tc_id == 2: + assert len(networkv2.have_create) == 1 + assert len(networkv2.have_attach) == 1 + assert networkv2.have_deploy != {} + + +@pytest.mark.parametrize( + "tc_id, cfg, have, want, get_network, get_netattach", + [(1, "networkv2_cfg_00001", "networkv2_have_00001", + "networkv2_want", "networkv2_net_objects_00002", "networkv2_net_attach_objects_00001"), + (2, "networkv2_cfg_00001", "networkv2_have_00001", + "networkv2_want", "networkv2_net_objects_00001", "networkv2_net_attach_objects_00001")], +) +def test_dcnm_networkv2_00008( + tc_id, + monkeypatch, + dcnm_networkv2_fixture, + cfg, + have, + want, + get_network, + get_netattach, +): + + # Testing Function get_diff_query() + + networkv2 = dcnm_networkv2_fixture + + data = load_data("dcnm_networkv2_data") + resp = load_data("dcnm_networkv2_response") + + config = data.get(cfg) + have_create = data.get(have) + want_create = data.get(want) + get_net = copy.deepcopy(resp.get(get_network)) + get_attach = copy.deepcopy(resp.get(get_netattach)) + + networkv2.paths = DcnmNetworkv2.dcnm_network_paths[12] + networkv2.fabric = "test_netv2" + networkv2.params["state"] = "query" + if tc_id == 1: + networkv2.config.append(config) + networkv2.have_create.append(have_create) + networkv2.want_create.append(want_create) + + dcnm_send_side_effect = [] + dcnm_send_side_effect.append(get_net) + dcnm_send_side_effect.append(get_attach) + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + monkeypatch.setattr(dcnm_networkv2, "dcnm_send", mock_dcnm_send) + + networkv2.get_diff_query() + + assert len(networkv2.query) == 1 + + +@pytest.mark.parametrize( + "tc_id, cfg, have, want, have_netattach", + [(1, "networkv2_cfg_00001", "networkv2_have_00001", "networkv2_want", "networkv2_have_attach_00001"), + (2, "networkv2_cfg_00001", "networkv2_have_00001", "networkv2_want", "networkv2_have_attach_00001"), + (3, "networkv2_cfg_00001", "networkv2_have_00001", "networkv2_want", "networkv2_have_attach_00001"), + (4, "networkv2_cfg_00001", "networkv2_have_00001", "networkv2_want", "networkv2_have_attach_00001")], +) +def test_dcnm_networkv2_00009( + tc_id, + monkeypatch, + dcnm_networkv2_fixture, + cfg, + have, + want, + have_netattach, +): + + # Testing Function get_diff_delete() + + networkv2 = dcnm_networkv2_fixture + + data = load_data("dcnm_networkv2_data") + + config = data.get(cfg) + have_create = data.get(have) + want_create = data.get(want) + have_attach = data.get(have_netattach) + + networkv2.paths = DcnmNetworkv2.dcnm_network_paths[12] + networkv2.fabric = "test_netv2" + networkv2.params["state"] = "deleted" + + if tc_id == 1 or tc_id == 2: + networkv2.config.append(config) + if tc_id == 2 or tc_id == 4: + networkv2.have_attach = have_attach + networkv2.have_create.append(have_create) + networkv2.want_create.append(want_create) + + networkv2.get_diff_delete() + + assert len(networkv2.diff_delete) == 1 + + +@pytest.mark.parametrize( + "tc_id, cfg, have, want, have_netattach", + [(1, "networkv2_cfg_00001", "networkv2_have_00001", "networkv2_want", "networkv2_have_attach_00001"), + (2, "networkv2_cfg_00001", "networkv2_have_00002", "networkv2_want", "networkv2_have_attach_00002")], +) +def test_dcnm_networkv2_00010( + tc_id, + monkeypatch, + dcnm_networkv2_fixture, + cfg, + have, + want, + have_netattach, +): + + # Testing Function get_diff_override() with get_diff_override() and get_diff_merge() + + networkv2 = dcnm_networkv2_fixture + + data = load_data("dcnm_networkv2_data") + + config = data.get(cfg) + have_create = data.get(have) + want_create = data.get(want) + have_attach = data.get(have_netattach) + + networkv2.paths = DcnmNetworkv2.dcnm_network_paths[12] + networkv2.fabric = "test_netv2" + networkv2.params["state"] = "overridden" + + networkv2.config.append(config) + networkv2.have_create.append(have_create) + networkv2.have_attach = have_attach + networkv2.want_create.append(want_create) + + networkv2.get_diff_override() + + if tc_id == 1: + assert len(networkv2.diff_create) == 0 + assert len(networkv2.diff_attach) == 1 + elif tc_id == 2: + assert len(networkv2.diff_create) == 1 + assert len(networkv2.diff_attach) == 0 + + +@pytest.mark.parametrize( + "tc_id, cfg, have, want, have_netattach, want_netattach", + [(1, "networkv2_cfg_00001", "networkv2_have_00001", "networkv2_want", + "networkv2_have_attach_00001", "networkv2_want_attach_00001"), + (2, "networkv2_cfg_00001", "networkv2_have_00001", "networkv2_want", + "networkv2_have_attach_00001", "networkv2_want_attach_00001")], +) +def test_dcnm_networkv2_00011( + tc_id, + monkeypatch, + dcnm_networkv2_fixture, + cfg, + have, + want, + have_netattach, + want_netattach +): + + # Testing Fuction get_diff_replace() + + networkv2 = dcnm_networkv2_fixture + + data = load_data("dcnm_networkv2_data") + + config = data.get(cfg) + have_create = data.get(have) + want_create = data.get(want) + have_attach = copy.deepcopy(data.get(have_netattach)) + want_attach = copy.deepcopy(data.get(want_netattach)) + + if tc_id == 2: + del want_attach[0]["lanAttachList"][0] + + networkv2.paths = DcnmNetworkv2.dcnm_network_paths[12] + networkv2.fabric = "test_netv2" + networkv2.params["state"] = "replaced" + + networkv2.config.append(config) + networkv2.have_create.append(have_create) + networkv2.have_attach = have_attach + networkv2.want_create.append(want_create) + networkv2.want_attach = want_attach + + networkv2.get_diff_replace() + + assert len(networkv2.diff_create) == 0 + assert len(networkv2.diff_attach) == 1 + + +@pytest.mark.parametrize( + "tc_id, cfg, have, want, have_netattach, want_netattach", + [(1, "networkv2_cfg_00001", "networkv2_have_00001", "networkv2_want", + "networkv2_have_attach_00001", "networkv2_want_attach_00001"), + (2, "networkv2_cfg_00001", "networkv2_have_00001", "networkv2_want", + "networkv2_have_attach_00001", "networkv2_want_attach_00002"), + (3, "networkv2_cfg_00001", "networkv2_have_00001", "networkv2_want", + "networkv2_have_attach_00001", "networkv2_want_attach_00003"), + (4, "networkv2_cfg_00001", "networkv2_have_00001", "networkv2_want", + "networkv2_have_attach_00001", "networkv2_want_attach_00001")], +) +def test_dcnm_networkv2_00012( + tc_id, + monkeypatch, + dcnm_networkv2_fixture, + cfg, + have, + want, + have_netattach, + want_netattach +): + + # Testing Fuction get_diff_merge() + + networkv2 = dcnm_networkv2_fixture + + data = load_data("dcnm_networkv2_data") + + config = data.get(cfg) + have_create = data.get(have) + want_create = data.get(want) + have_attach = copy.deepcopy(data.get(have_netattach)) + want_attach = copy.deepcopy(data.get(want_netattach)) + + networkv2.paths = DcnmNetworkv2.dcnm_network_paths[12] + networkv2.fabric = "test_netv2" + networkv2.params["state"] = "merged" + + networkv2.config.append(config) + networkv2.have_create.append(have_create) + networkv2.have_attach = have_attach + networkv2.want_create.append(want_create) + networkv2.want_attach = want_attach + + if tc_id == 4: + del networkv2.have_attach[0]["lanAttachList"][0] + + networkv2.get_diff_merge() + + assert len(networkv2.diff_create) == 0 + assert len(networkv2.diff_attach) == 1 + assert len(networkv2.diff_create_update) == 1 + + +@pytest.mark.parametrize( + "tc_id, want, get_response", + [(1, "networkv2_diff_create_00001", "get_response_00001"), + (2, "networkv2_diff_create_00001", "get_response_00002"), + (3, "networkv2_diff_create_00001", "get_response_00002")], +) +def test_dcnm_networkv2_00013( + tc_id, + monkeypatch, + dcnm_networkv2_fixture, + want, + get_response +): + + # Testing Function push_to_remote_update() + + networkv2 = dcnm_networkv2_fixture + + data = load_data("dcnm_networkv2_data") + resp = load_data("dcnm_networkv2_response") + want_create = data.get(want) + get_resp = copy.deepcopy(resp.get(get_response)) + + networkv2.diff_create_update = want_create + + dcnm_send_side_effect = [] + dcnm_send_side_effect.append(get_resp) + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + monkeypatch.setattr(dcnm_networkv2, "dcnm_send", mock_dcnm_send) + + networkv2.fabric = "test_netv2" + networkv2.paths = DcnmNetworkv2.dcnm_network_paths[12] + + try: + networkv2.push_to_remote() + except Exception as e: + assert "ERROR" in str(e) + + +@pytest.mark.parametrize( + "tc_id, want, get_response", + [(1, "networkv2_want_attach_00001", "get_response_00001"), + (2, "networkv2_want_attach_00001", "get_response_00002"), + (3, "networkv2_want_attach_00001", "get_response_00002")], +) +def test_dcnm_networkv2_00014( + tc_id, + monkeypatch, + dcnm_networkv2_fixture, + want, + get_response +): + + # Testing Function push_to_remote_detach() + + networkv2 = dcnm_networkv2_fixture + + data = load_data("dcnm_networkv2_data") + resp = load_data("dcnm_networkv2_response") + want_detach = data.get(want) + get_resp = copy.deepcopy(resp.get(get_response)) + + networkv2.diff_detach = want_detach + + dcnm_send_side_effect = [] + dcnm_send_side_effect.append(get_resp) + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + monkeypatch.setattr(dcnm_networkv2, "dcnm_send", mock_dcnm_send) + + networkv2.fabric = "test_netv2" + networkv2.paths = DcnmNetworkv2.dcnm_network_paths[12] + networkv2.is_ms_fabric = False + + try: + networkv2.push_to_remote() + except Exception as e: + assert "ERROR" in str(e) + + +@pytest.mark.parametrize( + "tc_id, undeploy, get_response", + [(1, "networkv2_diff_deploy_00001", "get_response_00001"), + (2, "networkv2_diff_deploy_00001", "get_response_00002"), + (3, "networkv2_diff_deploy_00001", "get_response_00002")], +) +def test_dcnm_networkv2_00015( + tc_id, + monkeypatch, + dcnm_networkv2_fixture, + undeploy, + get_response +): + + # Testing Function push_to_remote_undeploy() + + networkv2 = dcnm_networkv2_fixture + + data = load_data("dcnm_networkv2_data") + resp = load_data("dcnm_networkv2_response") + want_undeploy = data.get(undeploy) + get_resp = copy.deepcopy(resp.get(get_response)) + + networkv2.diff_undeploy = want_undeploy + networkv2.fabric = "test_netv2" + networkv2.paths = DcnmNetworkv2.dcnm_network_paths[12] + + dcnm_send_side_effect = [] + dcnm_send_side_effect.append(get_resp) + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + monkeypatch.setattr(dcnm_networkv2, "dcnm_send", mock_dcnm_send) + + try: + networkv2.push_to_remote() + except Exception as e: + assert "ERROR" in str(e) + + +@pytest.mark.parametrize( + "tc_id, want, get_response", + [(1, "networkv2_diff_create_00001", "get_response_00001"), + (2, "networkv2_diff_create_00001", "get_response_00002"), + (3, "networkv2_diff_create_00001", "get_response_00002")], +) +def test_dcnm_networkv2_00016( + tc_id, + monkeypatch, + dcnm_networkv2_fixture, + want, + get_response +): + + # Testing Function push_to_remote_create() + + networkv2 = dcnm_networkv2_fixture + + data = load_data("dcnm_networkv2_data") + resp = load_data("dcnm_networkv2_response") + want_create = data.get(want) + get_resp = copy.deepcopy(resp.get(get_response)) + + networkv2.diff_create = want_create + + dcnm_send_side_effect = [] + dcnm_send_side_effect.append(get_resp) + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + monkeypatch.setattr(dcnm_networkv2, "dcnm_send", mock_dcnm_send) + + networkv2.fabric = "test_netv2" + networkv2.paths = DcnmNetworkv2.dcnm_network_paths[12] + + try: + networkv2.push_to_remote() + except Exception as e: + assert "ERROR" in str(e) + + +@pytest.mark.parametrize( + "tc_id, want, get_response", + [(1, "networkv2_want_attach_00001", "attach_response_00001"), + (2, "networkv2_want_attach_00001", "get_response_00002"), + (3, "networkv2_want_attach_00001", "get_response_00002")], +) +def test_dcnm_networkv2_00017( + tc_id, + monkeypatch, + dcnm_networkv2_fixture, + want, + get_response +): + + # Testing Function push_to_remote_attach() + + networkv2 = dcnm_networkv2_fixture + + data = load_data("dcnm_networkv2_data") + resp = load_data("dcnm_networkv2_response") + want_attach = data.get(want) + get_resp = copy.deepcopy(resp.get(get_response)) + + networkv2.diff_attach = want_attach + + dcnm_send_side_effect = [] + dcnm_send_side_effect.append(get_resp) + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + monkeypatch.setattr(dcnm_networkv2, "dcnm_send", mock_dcnm_send) + + networkv2.fabric = "test_netv2" + networkv2.paths = DcnmNetworkv2.dcnm_network_paths[12] + networkv2.is_ms_fabric = False + + try: + networkv2.push_to_remote() + except Exception as e: + assert "ERROR" in str(e) + + +@pytest.mark.parametrize( + "tc_id, deploy, get_response", + [(1, "networkv2_diff_deploy_00001", "get_response_00001"), + (2, "networkv2_diff_deploy_00001", "get_response_00002"), + (3, "networkv2_diff_deploy_00001", "get_response_00002")], +) +def test_dcnm_networkv2_00018( + tc_id, + monkeypatch, + dcnm_networkv2_fixture, + deploy, + get_response +): + + # Testing Function push_to_remote_deploy() + + networkv2 = dcnm_networkv2_fixture + + data = load_data("dcnm_networkv2_data") + resp = load_data("dcnm_networkv2_response") + want_deploy = data.get(deploy) + get_resp = copy.deepcopy(resp.get(get_response)) + + networkv2.diff_deploy = want_deploy + networkv2.fabric = "test_netv2" + networkv2.paths = DcnmNetworkv2.dcnm_network_paths[12] + + dcnm_send_side_effect = [] + dcnm_send_side_effect.append(get_resp) + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + monkeypatch.setattr(dcnm_networkv2, "dcnm_send", mock_dcnm_send) + + try: + networkv2.push_to_remote() + except Exception as e: + assert "ERROR" in str(e) + + +@pytest.mark.parametrize( + "tc_id, delete, get_response, get_netattach_response", + [(1, "networkv2_diff_delete_00001", "get_response_00003", "networkv2_net_attach_objects_00001"), + (2, "networkv2_diff_delete_00001", "get_response_00003", "networkv2_net_attach_objects_00001"), + (3, "networkv2_diff_delete_00001", "get_response_00003", "networkv2_net_attach_objects_00001"), + (4, "networkv2_diff_delete_00001", "get_response_00003", "networkv2_net_attach_objects_00001"), + (5, "networkv2_diff_delete_00001", "get_response_00004", "networkv2_net_attach_objects_00001"), + (6, "networkv2_diff_delete_00001", "get_response_00004", "networkv2_net_attach_objects_00001"), + (7, "networkv2_diff_delete_00001", "get_response_00003", "networkv2_net_attach_objects_00001")], +) +def test_dcnm_networkv2_00019( + tc_id, + monkeypatch, + dcnm_networkv2_fixture, + delete, + get_response, + get_netattach_response +): + + # Testing Function push_to_remote_deploy() + + networkv2 = dcnm_networkv2_fixture + + data = load_data("dcnm_networkv2_data") + resp = load_data("dcnm_networkv2_response") + get_resp = copy.deepcopy(resp.get(get_response)) + get_netattach_resp = copy.deepcopy(resp.get(get_netattach_response)) + get_netattach_resp1 = copy.deepcopy(resp.get(get_netattach_response)) + want_delete = data.get(delete) + + networkv2.diff_delete = want_delete + networkv2.fabric = "test_netv2" + networkv2.paths = DcnmNetworkv2.dcnm_network_paths[12] + if tc_id == 1 or tc_id == 5 or tc_id == 6: + get_netattach_resp["DATA"][0]["lanAttachList"][0]["lanAttachState"] = "NA" + elif tc_id == 2: + get_netattach_resp["DATA"][0]["lanAttachList"][0]["lanAttachState"] = "OUT-OF-SYNC" + get_netattach_resp1["DATA"][0]["lanAttachList"][0]["lanAttachState"] = "NA" + elif tc_id == 3: + get_netattach_resp1["DATA"][0]["lanAttachList"][0]["lanAttachState"] = "NA" + elif tc_id == 4 or tc_id == 7: + get_netattach_resp["DATA"][0]["lanAttachList"][0]["lanAttachState"] = "OUT-OF-SYNC" + + dcnm_send_side_effect = [] + dcnm_send_side_effect.append(get_netattach_resp) + if tc_id == 2 or tc_id == 3: + dcnm_send_side_effect.append(get_netattach_resp1) + elif tc_id == 4 or tc_id == 7: + for i in range(10): + dcnm_send_side_effect.append(get_netattach_resp) + dcnm_send_side_effect.append(get_resp) + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + monkeypatch.setattr(dcnm_networkv2, "dcnm_send", mock_dcnm_send) + + try: + networkv2.push_to_remote() + except Exception as e: + if tc_id == 4 or tc_id == 7: + assert "Deletion of Networks net1 has failed" in str(e) + else: + assert "ERROR" in str(e) + + +@pytest.mark.parametrize( + "tc_id, want_deploy, have_deploy, diff_deploy", + [(1, "networkv2_want_deploy_00001", "networkv2_have_deploy_00001", "networkv2_diff_deploy_00001"), + (2, "networkv2_want_deploy_00001", "networkv2_have_deploy_00001", "networkv2_diff_deploy_00001"), + (3, "networkv2_want_deploy_00001", "networkv2_have_deploy_00001", "networkv2_diff_deploy_00001"), + (4, "networkv2_want_deploy_00001", "networkv2_have_deploy_00001", "networkv2_diff_deploy_00001"), + (5, "networkv2_want_deploy_00001", "networkv2_have_deploy_00001", "networkv2_diff_deploy_00001")], +) +def test_dcnm_networkv2_00020( + tc_id, + monkeypatch, + dcnm_networkv2_fixture, + want_deploy, + have_deploy, + diff_deploy +): + + # Testing Function get_deploy_diff() + + networkv2 = dcnm_networkv2_fixture + + data = load_data("dcnm_networkv2_data") + want = copy.deepcopy(data.get(want_deploy)) + have = copy.deepcopy(data.get(have_deploy)) + if tc_id == 2 or tc_id == 4: + diff = {} + if tc_id == 4: + have["9SFRKD0M6AS"] = ["net2"] + elif tc_id == 5: + diff = {} + have = {} + else: + diff = copy.deepcopy(data.get(diff_deploy)) + if tc_id == 3: + diff["9SFRKD0M6AS"] = ["net2"] + + networkv2.fabric = "test_netv2" + networkv2.paths = DcnmNetworkv2.dcnm_network_paths[12] + networkv2.have_deploy = have + networkv2.want_deploy = want + + networkv2.get_deploy_diff(diff) + + if tc_id == 2: + assert len(diff) == 0 + else: + assert len(diff) == 1 + + +# Following test cases are for black box testing. +# These test cases are written to test the complete flow of the module. +class TestDcnmNetworkv2Module(TestDcnmModule): + + module = dcnm_networkv2 + + fd = None + + def setUp(self): + super(TestDcnmNetworkv2Module, self).setUp() + self.monkeypatch = MonkeyPatch() + + def test_dcnm_networkv2_merged_new(self): + + data = load_data("dcnm_networkv2_data") + resp = load_data("dcnm_networkv2_response") + + dcnm_version_supported_side_effect = [] + get_fabric_inventory_details_side_effect = [] + get_fabric_details_side_effect = [] + dcnm_send_side_effect = [] + dcnm_send_side_effect_1 = [] + + dcnm_version_supported_side_effect.append(12) + get_fabric_inventory_details_side_effect.append( + resp.get("networkv2_inv_details") + ) + get_fabric_details_side_effect.append(resp.get("networkv2_fab_details")) + + mock_dcnm_version_supported = Mock( + side_effect=dcnm_version_supported_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_version_supported", mock_dcnm_version_supported + ) + + mock_get_fabric_inventory_details = Mock( + side_effect=get_fabric_inventory_details_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, + "get_fabric_inventory_details", + mock_get_fabric_inventory_details, + ) + + mock_get_fabric_details = Mock( + side_effect=get_fabric_details_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, "get_fabric_details", mock_get_fabric_details + ) + + # dcnm_send invoked from module_utils/dcnm.py + dcnm_send_side_effect.append(resp.get("resp_net_template")) + dcnm_send_side_effect.append(resp.get("resp_net_ext_template")) + + # dcnm_send invoked from modules/dcnm_networkv2.py + dcnm_send_side_effect_1.append(resp.get("networkv2_net_objects_00003")) + dcnm_send_side_effect_1.append(resp.get("get_response_00005")) + + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + self.monkeypatch.setattr( + dcnm, "dcnm_send", mock_dcnm_send + ) + + mock_dcnm_send_1 = Mock(side_effect=dcnm_send_side_effect_1) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_send", mock_dcnm_send_1 + ) + + config = data.get("networkv2_cfg_00001") + playbook_config = [config] + + set_module_args( + dict( + state="merged", + fabric="test_netv2", + config=playbook_config, + ) + ) + + result = self.execute_module(changed=True, failed=False) + + assert result.get("changed") is True + self.assertEqual(len(result["diff"]), 1) + self.assertEqual(result["diff"][0]["net_name"], "net1") + self.assertEqual(result["diff"][0]["vrf_name"], "Tenant-1") + + def test_dcnm_networkv2_merged_existing(self): + + data = load_data("dcnm_networkv2_data") + resp = load_data("dcnm_networkv2_response") + + dcnm_version_supported_side_effect = [] + get_fabric_inventory_details_side_effect = [] + get_fabric_details_side_effect = [] + dcnm_send_side_effect = [] + dcnm_send_side_effect_1 = [] + dcnm_get_url_side_effect = [] + + dcnm_version_supported_side_effect.append(12) + get_fabric_inventory_details_side_effect.append( + resp.get("networkv2_inv_details") + ) + get_fabric_details_side_effect.append(resp.get("networkv2_fab_details")) + + mock_dcnm_version_supported = Mock( + side_effect=dcnm_version_supported_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_version_supported", mock_dcnm_version_supported + ) + + mock_get_fabric_inventory_details = Mock( + side_effect=get_fabric_inventory_details_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, + "get_fabric_inventory_details", + mock_get_fabric_inventory_details, + ) + + mock_get_fabric_details = Mock( + side_effect=get_fabric_details_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, "get_fabric_details", mock_get_fabric_details + ) + + # dcnm_send invoked from module_utils/dcnm.py + dcnm_send_side_effect.append(resp.get("resp_net_template")) + dcnm_send_side_effect.append(resp.get("resp_net_ext_template")) + + # dcnm_send invoked from modules/dcnm_networkv2.py + dcnm_send_side_effect_1.append(resp.get("networkv2_net_objects_00001")) + dcnm_get_url_side_effect.append(resp.get("networkv2_net_attach_objects_00002")) + dcnm_send_side_effect_1.append(resp.get("get_response_00001")) + dcnm_send_side_effect_1.append(resp.get("attach_response_00001")) + dcnm_send_side_effect_1.append(resp.get("networkv2_net_attach_deploy_00001")) + + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + self.monkeypatch.setattr( + dcnm, "dcnm_send", mock_dcnm_send + ) + + mock_dcnm_send_1 = Mock(side_effect=dcnm_send_side_effect_1) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_send", mock_dcnm_send_1 + ) + + mock_dcnm_get_url = Mock(side_effect=dcnm_get_url_side_effect) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_get_url", mock_dcnm_get_url + ) + + config = data.get("networkv2_cfg_00002") + config["network_template_config"]["mtu"] = 9000 + playbook_config = [config] + + set_module_args( + dict( + state="merged", + fabric="test_netv2", + config=playbook_config, + ) + ) + + result = self.execute_module(changed=True, failed=False) + + assert result.get("changed") is True + self.assertEqual(len(result["diff"]), 1) + self.assertEqual(result["diff"][0]["net_name"], "net1") + self.assertEqual(result["diff"][0]["vrf_name"], "Tenant-1") + self.assertEqual(result["diff"][0]["networkTemplateConfig"]["mtu"], 9000) + self.assertEqual(result["diff"][0]["attach"][0]["attached"], True) + + def test_dcnm_networkv2_merged_checkmode(self): + + data = load_data("dcnm_networkv2_data") + resp = load_data("dcnm_networkv2_response") + + dcnm_version_supported_side_effect = [] + get_fabric_inventory_details_side_effect = [] + get_fabric_details_side_effect = [] + dcnm_send_side_effect = [] + dcnm_send_side_effect_1 = [] + + dcnm_version_supported_side_effect.append(12) + get_fabric_inventory_details_side_effect.append( + resp.get("networkv2_inv_details") + ) + get_fabric_details_side_effect.append(resp.get("networkv2_fab_details")) + + mock_dcnm_version_supported = Mock( + side_effect=dcnm_version_supported_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_version_supported", mock_dcnm_version_supported + ) + + mock_get_fabric_inventory_details = Mock( + side_effect=get_fabric_inventory_details_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, + "get_fabric_inventory_details", + mock_get_fabric_inventory_details, + ) + + mock_get_fabric_details = Mock( + side_effect=get_fabric_details_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, "get_fabric_details", mock_get_fabric_details + ) + + # dcnm_send invoked from module_utils/dcnm.py + dcnm_send_side_effect.append(resp.get("resp_net_template")) + dcnm_send_side_effect.append(resp.get("resp_net_ext_template")) + + # dcnm_send invoked from modules/dcnm_networkv2.py + dcnm_send_side_effect_1.append(resp.get("networkv2_net_objects_00003")) + + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + self.monkeypatch.setattr( + dcnm, "dcnm_send", mock_dcnm_send + ) + + mock_dcnm_send_1 = Mock(side_effect=dcnm_send_side_effect_1) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_send", mock_dcnm_send_1 + ) + + config = data.get("networkv2_cfg_00001") + playbook_config = [config] + + set_module_args( + dict( + state="merged", + fabric="test_netv2", + config=playbook_config, + _ansible_check_mode=True, + ) + ) + + result = self.execute_module(changed=False, failed=False) + + assert result.get("changed") is False + self.assertEqual(len(result["diff"]), 1) + self.assertEqual(result["diff"][0]["net_name"], "net1") + self.assertEqual(result["diff"][0]["vrf_name"], "Tenant-1") + + def test_dcnm_networkv2_query(self): + + data = load_data("dcnm_networkv2_data") + resp = load_data("dcnm_networkv2_response") + + dcnm_version_supported_side_effect = [] + get_fabric_inventory_details_side_effect = [] + get_fabric_details_side_effect = [] + dcnm_send_side_effect = [] + dcnm_send_side_effect_1 = [] + dcnm_get_url_side_effect = [] + + dcnm_version_supported_side_effect.append(12) + get_fabric_inventory_details_side_effect.append( + resp.get("networkv2_inv_details") + ) + get_fabric_details_side_effect.append(resp.get("networkv2_fab_details")) + + mock_dcnm_version_supported = Mock( + side_effect=dcnm_version_supported_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_version_supported", mock_dcnm_version_supported + ) + + mock_get_fabric_inventory_details = Mock( + side_effect=get_fabric_inventory_details_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, + "get_fabric_inventory_details", + mock_get_fabric_inventory_details, + ) + + mock_get_fabric_details = Mock( + side_effect=get_fabric_details_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, "get_fabric_details", mock_get_fabric_details + ) + + # dcnm_send invoked from module_utils/dcnm.py + dcnm_send_side_effect.append(resp.get("resp_net_template")) + dcnm_send_side_effect.append(resp.get("resp_net_ext_template")) + + # dcnm_send invoked from modules/dcnm_networkv2.py + dcnm_send_side_effect_1.append(resp.get("networkv2_net_objects_00001")) + dcnm_get_url_side_effect.append(resp.get("networkv2_net_attach_objects_00001")) + dcnm_send_side_effect_1.append(resp.get("networkv2_net_objects_00002")) + dcnm_send_side_effect_1.append(resp.get("networkv2_net_attach_objects_00001")) + + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + self.monkeypatch.setattr( + dcnm, "dcnm_send", mock_dcnm_send + ) + + mock_dcnm_send_1 = Mock(side_effect=dcnm_send_side_effect_1) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_send", mock_dcnm_send_1 + ) + + mock_dcnm_get_url = Mock(side_effect=dcnm_get_url_side_effect) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_get_url", mock_dcnm_get_url + ) + + config = data.get("networkv2_cfg_00001") + playbook_config = [config] + + set_module_args( + dict( + state="query", + fabric="test_netv2", + config=playbook_config, + ) + ) + + result = self.execute_module(changed=False, failed=False) + + assert result.get("changed") is False + self.assertEqual(len(result["response"]), 1) + self.assertEqual(result["response"][0]["Network"]["networkName"], "net1") + self.assertEqual(result["response"][0]["Network"]["vrf"], "Tenant-1") + + def test_dcnm_networkv2_replaced(self): + + data = load_data("dcnm_networkv2_data") + resp = load_data("dcnm_networkv2_response") + + dcnm_version_supported_side_effect = [] + get_fabric_inventory_details_side_effect = [] + get_fabric_details_side_effect = [] + dcnm_send_side_effect = [] + dcnm_send_side_effect_1 = [] + + dcnm_version_supported_side_effect.append(12) + get_fabric_inventory_details_side_effect.append( + resp.get("networkv2_inv_details") + ) + get_fabric_details_side_effect.append(resp.get("networkv2_fab_details")) + + mock_dcnm_version_supported = Mock( + side_effect=dcnm_version_supported_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_version_supported", mock_dcnm_version_supported + ) + + mock_get_fabric_inventory_details = Mock( + side_effect=get_fabric_inventory_details_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, + "get_fabric_inventory_details", + mock_get_fabric_inventory_details, + ) + + mock_get_fabric_details = Mock( + side_effect=get_fabric_details_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, "get_fabric_details", mock_get_fabric_details + ) + + # dcnm_send invoked from module_utils/dcnm.py + dcnm_send_side_effect.append(resp.get("resp_net_template")) + dcnm_send_side_effect.append(resp.get("resp_net_ext_template")) + + # dcnm_send invoked from modules/dcnm_networkv2.py + dcnm_send_side_effect_1.append(resp.get("networkv2_net_objects_00003")) + dcnm_send_side_effect_1.append(resp.get("get_response_00005")) + + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + self.monkeypatch.setattr( + dcnm, "dcnm_send", mock_dcnm_send + ) + + mock_dcnm_send_1 = Mock(side_effect=dcnm_send_side_effect_1) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_send", mock_dcnm_send_1 + ) + + config = data.get("networkv2_cfg_00001") + playbook_config = [config] + + set_module_args( + dict( + state="replaced", + fabric="test_netv2", + config=playbook_config, + ) + ) + + result = self.execute_module(changed=True, failed=False) + + assert result.get("changed") is True + self.assertEqual(len(result["diff"]), 1) + self.assertEqual(result["diff"][0]["net_name"], "net1") + self.assertEqual(result["diff"][0]["vrf_name"], "Tenant-1") + + def test_dcnm_networkv2_override(self): + + data = load_data("dcnm_networkv2_data") + resp = load_data("dcnm_networkv2_response") + + dcnm_version_supported_side_effect = [] + get_fabric_inventory_details_side_effect = [] + get_fabric_details_side_effect = [] + dcnm_send_side_effect = [] + dcnm_send_side_effect_1 = [] + + dcnm_version_supported_side_effect.append(12) + get_fabric_inventory_details_side_effect.append( + resp.get("networkv2_inv_details") + ) + get_fabric_details_side_effect.append(resp.get("networkv2_fab_details")) + + mock_dcnm_version_supported = Mock( + side_effect=dcnm_version_supported_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_version_supported", mock_dcnm_version_supported + ) + + mock_get_fabric_inventory_details = Mock( + side_effect=get_fabric_inventory_details_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, + "get_fabric_inventory_details", + mock_get_fabric_inventory_details, + ) + + mock_get_fabric_details = Mock( + side_effect=get_fabric_details_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, "get_fabric_details", mock_get_fabric_details + ) + + # dcnm_send invoked from module_utils/dcnm.py + dcnm_send_side_effect.append(resp.get("resp_net_template")) + dcnm_send_side_effect.append(resp.get("resp_net_ext_template")) + + # dcnm_send invoked from modules/dcnm_networkv2.py + dcnm_send_side_effect_1.append(resp.get("networkv2_net_objects_00003")) + dcnm_send_side_effect_1.append(resp.get("get_response_00005")) + + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + self.monkeypatch.setattr( + dcnm, "dcnm_send", mock_dcnm_send + ) + + mock_dcnm_send_1 = Mock(side_effect=dcnm_send_side_effect_1) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_send", mock_dcnm_send_1 + ) + + config = data.get("networkv2_cfg_00001") + playbook_config = [config] + + set_module_args( + dict( + state="overridden", + fabric="test_netv2", + config=playbook_config, + ) + ) + + result = self.execute_module(changed=True, failed=False) + + assert result.get("changed") is True + self.assertEqual(len(result["diff"]), 1) + self.assertEqual(result["diff"][0]["net_name"], "net1") + self.assertEqual(result["diff"][0]["vrf_name"], "Tenant-1") + + def test_dcnm_networkv2_delete(self): + + data = load_data("dcnm_networkv2_data") + resp = load_data("dcnm_networkv2_response") + + dcnm_version_supported_side_effect = [] + get_fabric_inventory_details_side_effect = [] + get_fabric_details_side_effect = [] + dcnm_send_side_effect = [] + dcnm_send_side_effect_1 = [] + dcnm_get_url_side_effect = [] + + dcnm_version_supported_side_effect.append(12) + get_fabric_inventory_details_side_effect.append( + resp.get("networkv2_inv_details") + ) + get_fabric_details_side_effect.append(resp.get("networkv2_fab_details")) + + mock_dcnm_version_supported = Mock( + side_effect=dcnm_version_supported_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_version_supported", mock_dcnm_version_supported + ) + + mock_get_fabric_inventory_details = Mock( + side_effect=get_fabric_inventory_details_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, + "get_fabric_inventory_details", + mock_get_fabric_inventory_details, + ) + + mock_get_fabric_details = Mock( + side_effect=get_fabric_details_side_effect + ) + self.monkeypatch.setattr( + dcnm_networkv2, "get_fabric_details", mock_get_fabric_details + ) + + # dcnm_send invoked from module_utils/dcnm.py + dcnm_send_side_effect.append(resp.get("resp_net_template")) + dcnm_send_side_effect.append(resp.get("resp_net_ext_template")) + + # dcnm_send invoked from modules/dcnm_networkv2.py + dcnm_send_side_effect_1.append(resp.get("networkv2_net_objects_00001")) + dcnm_get_url_side_effect.append(resp.get("networkv2_net_attach_objects_00001")) + dcnm_send_side_effect_1.append(resp.get("attach_response_00001")) + dcnm_send_side_effect_1.append(resp.get("networkv2_net_attach_deploy_00001")) + + att_resp = copy.deepcopy(resp.get("networkv2_net_attach_objects_00001")) + att_resp["DATA"][0]["lanAttachList"][0]["lanAttachState"] = "NA" + dcnm_send_side_effect_1.append(att_resp) + dcnm_send_side_effect_1.append(resp.get("get_response_00003")) + + mock_dcnm_send = Mock(side_effect=dcnm_send_side_effect) + self.monkeypatch.setattr( + dcnm, "dcnm_send", mock_dcnm_send + ) + + mock_dcnm_send_1 = Mock(side_effect=dcnm_send_side_effect_1) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_send", mock_dcnm_send_1 + ) + + mock_dcnm_get_url = Mock(side_effect=dcnm_get_url_side_effect) + self.monkeypatch.setattr( + dcnm_networkv2, "dcnm_get_url", mock_dcnm_get_url + ) + + config = data.get("networkv2_cfg_00001") + playbook_config = [config] + + set_module_args( + dict( + state="deleted", + fabric="test_netv2", + config=playbook_config, + ) + ) + + result = self.execute_module(changed=True, failed=False) + + assert result.get("changed") is True + self.assertEqual(len(result["response"]), 3)