diff --git a/src/config/api-server/vnc_cfg_api_server/resources/virtual_machine_interface.py b/src/config/api-server/vnc_cfg_api_server/resources/virtual_machine_interface.py index 7f42e105a61..d14309832ea 100644 --- a/src/config/api-server/vnc_cfg_api_server/resources/virtual_machine_interface.py +++ b/src/config/api-server/vnc_cfg_api_server/resources/virtual_machine_interface.py @@ -17,7 +17,6 @@ from vnc_api.gen.resource_common import VirtualPortGroup from vnc_api.gen.resource_xsd import MacAddressesType from vnc_api.gen.resource_xsd import PolicyBasedForwardingRuleType -from vnc_api.gen.resource_xsd import VpgInterfaceParametersType from vnc_cfg_api_server.context import get_context from vnc_cfg_api_server.resources._resource_base import ResourceMixin @@ -830,78 +829,6 @@ def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, return True, ret_dict - @classmethod - def _notify_ae_id_modified(cls, obj_dict=None, notify=False): - - if (obj_dict.get('deallocated_ae_id') and - len(obj_dict.get('deallocated_ae_id'))): - dealloc_dict_list = obj_dict.get('deallocated_ae_id') - for dealloc_dict in dealloc_dict_list: - ae_id = dealloc_dict.get('ae_id') - vpg_name = dealloc_dict.get('vpg_name') - prouter_name = dealloc_dict.get('prouter_name') - cls.vnc_zk_client.free_ae_id( - prouter_name, ae_id, - vpg_name, notify=notify) - - if (obj_dict.get('allocated_ae_id') and - len(obj_dict.get('allocated_ae_id'))): - alloc_dict_list = obj_dict.get('allocated_ae_id') - for alloc_dict in alloc_dict_list: - ae_id = alloc_dict.get('ae_id') - vpg_name = alloc_dict.get('vpg_name') - prouter_name = alloc_dict.get('prouter_name') - cls.vnc_zk_client.alloc_ae_id(prouter_name, vpg_name, ae_id, - notify=True) - - # Allocate ae_id: - # 1. Get the ae_id from the old PI ref which is already assoc with PR - # 2. If not, then check if it got already generated on this api call - # from the other PI that belongs to the same PR. - # 3. Else allocate the new ae_id. Id allocation is per PR 0-127 and key - # is the vpg name. - @classmethod - def _check_and_alloc_ae_id(cls, links, prouter_name, - vpg_name, old_pi_to_pr_dict): - if not len(links) > 1: - return None, None - - for pr in old_pi_to_pr_dict.values(): - if (pr.get('prouter_name') == prouter_name and - pr.get('ae_id') is not None): - attr_obj = VpgInterfaceParametersType(pr.get('ae_id')) - return attr_obj, pr.get('ae_id') - - ae_num = cls.vnc_zk_client.alloc_ae_id(prouter_name, vpg_name) - attr_obj = VpgInterfaceParametersType(ae_num) - - return attr_obj, ae_num - - # Free ae_id: - # 1. If the PI ref is getting deleted and there in no other PI left - # that belongs to the same PR. - # 2. Or if there is only one physical link to VPG. - @classmethod - def _check_and_free_ae_id(cls, links, prouter_dict, - vpg_name, pi_to_pr_dict): - prouter_list = [] - dealloc_dict = {} - for pr in pi_to_pr_dict.values(): - prouter_list.append(pr) - - prouter_name = prouter_dict.get('prouter_name') - if prouter_name not in prouter_list or len(links) < 2: - cls.vnc_zk_client.free_ae_id(prouter_name, - prouter_dict.get('ae_id'), - vpg_name) - dealloc_dict['ae_id'] = prouter_dict.get('ae_id') - dealloc_dict['prouter_name'] = prouter_dict.get('prouter_name') - dealloc_dict['vpg_name'] = vpg_name - prouter_dict['ae_id'] = None - return dealloc_dict - - return - @classmethod def _check_annotations( cls, api_server, obj_uuid, @@ -1574,7 +1501,6 @@ def _manage_vpg_association(cls, vmi_id, api_server, db_conn, phy_links, phy_interface_uuids = [] old_phy_interface_uuids = [] new_pi_to_pr_dict = {} - old_pi_to_pr_dict = {} for link in phy_links: if link.get('fabric'): if fabric_name is not None and fabric_name != link['fabric']: @@ -1593,23 +1519,6 @@ def _manage_vpg_association(cls, vmi_id, api_server, db_conn, phy_links, phy_interface_uuids.append(pi_uuid) new_pi_to_pr_dict[pi_uuid] = prouter_name - # check if new physical interfaces belongs to some other vpg - for uuid in set(phy_interface_uuids): - ok, phy_interface_dict = db_conn.dbe_read( - obj_type='physical-interface', - obj_id=uuid, - obj_fields=['name', 'virtual_port_group_back_refs']) - if not ok: - return (ok, 400, phy_interface_dict) - - vpg_refs = phy_interface_dict.get('virtual_port_group_back_refs') - if vpg_refs and vpg_refs[0]['to'][-1] != vpg_name: - msg = 'Physical interface %s already belong to the vpg %s' %\ - (phy_interface_dict.get( - 'name', phy_interface_dict['fq_name']), - vpg_refs[0]['to'][-1]) - return (False, (400, msg)) - if vpg_name: # read the vpg object vpg_fq_name = ['default-global-system-config', fabric_name, vpg_name] @@ -1717,59 +1626,38 @@ def vlanid_sanitizer(vlanid): return ok, result old_phy_interface_refs = vpg_dict.get('physical_interface_refs') - for ref in old_phy_interface_refs or []: - old_pi_to_pr_dict[ref['uuid']] = { - 'prouter_name': ref['to'][1], - 'ae_id': ref['attr'].get('ae_num') if ref['attr'] else None} - old_phy_interface_uuids.append(ref['uuid']) - + old_phy_interface_uuids = [ref['uuid'] for ref in + old_phy_interface_refs or []] ret_dict = {} - ret_dict['deallocated_ae_id'] = [] - ret_dict['allocated_ae_id'] = [] # delete old physical interfaces to the vpg - for uuid in set(old_phy_interface_uuids) - set(phy_interface_uuids): - prouter_dict = old_pi_to_pr_dict.get(uuid) - dealloc_dict = cls._check_and_free_ae_id( - phy_links, prouter_dict, - vpg_name, new_pi_to_pr_dict) - ret_dict['deallocated_ae_id'].append(dealloc_dict) - - api_server.internal_request_ref_update( - 'virtual-port-group', - vpg_uuid, - 'DELETE', - 'physical-interface', - uuid) + delete_pi_uuids = (set(old_phy_interface_uuids) - + set(phy_interface_uuids)) + for uuid in delete_pi_uuids: + try: + api_server.internal_request_ref_update( + 'virtual-port-group', + vpg_uuid, + 'DELETE', + 'physical-interface', + uuid) + except Exception as exc: + return False, (exc.status_code, exc.content) # add new physical interfaces to the vpg - pr_to_ae_id = {} - for uuid in phy_interface_uuids: - prouter_name = new_pi_to_pr_dict.get(uuid) - if pr_to_ae_id.get(prouter_name) is None: - attr_obj, ae_id = cls._check_and_alloc_ae_id( - phy_links, prouter_name, - vpg_name, old_pi_to_pr_dict) - pr_to_ae_id[prouter_name] = ae_id - - if len(phy_links) > 1 and ae_id is not None: - alloc_dict = {} - alloc_dict['ae_id'] = ae_id - alloc_dict['prouter_name'] = prouter_name - alloc_dict['vpg_name'] = vpg_name - ret_dict['allocated_ae_id'].append(alloc_dict) - else: - attr_obj = VpgInterfaceParametersType( - ae_num=pr_to_ae_id.get(prouter_name)) - - api_server.internal_request_ref_update( - 'virtual-port-group', - vpg_uuid, - 'ADD', - 'physical-interface', - uuid, - attr=attr_obj.__dict__ if attr_obj else None, - relax_ref_for_delete=True) + create_pi_uuids = (set(phy_interface_uuids) - + set(old_phy_interface_uuids)) + for uuid in create_pi_uuids: + try: + api_server.internal_request_ref_update( + 'virtual-port-group', + vpg_uuid, + 'ADD', + 'physical-interface', + uuid, + relax_ref_for_delete=True) + except Exception as exc: + return False, (exc.status_code, exc.content) # update intent-map with vn_id # read intent map object @@ -1839,29 +1727,6 @@ def pre_dbe_delete(cls, id, obj_dict, db_conn): delete_dict = {'virtual_machine_refs': []} cls._check_vrouter_link(obj_dict, kvp_dict, delete_dict, db_conn) - # Clean ae ids associated with VPG->PIs - for vpg_back_ref in obj_dict.get('virtual_port_group_back_refs', - []): - fqname = vpg_back_ref['to'] - vpg_uuid = db_conn.fq_name_to_uuid('virtual_port_group', fqname) - ok, vpg_dict = db_conn.dbe_read( - obj_type='virtual-port-group', - obj_id=vpg_uuid, - obj_fields=['physical_interface_refs']) - if not ok: - return ok, vpg_dict - - notify_dict = {} - notify_dict['deallocated_ae_id'] = [] - for pi_ref in vpg_dict.get('physical_interface_refs') or []: - if pi_ref['attr'] and pi_ref['attr'].get('ae_num') is not None: - dealloc_dict = {} - dealloc_dict['ae_id'] = pi_ref['attr'].get('ae_num') - dealloc_dict['prouter_name'] = pi_ref['to'][1] - dealloc_dict['vpg_name'] = fqname[2] - notify_dict['deallocated_ae_id'].append(dealloc_dict) - obj_dict.update(notify_dict) - return True, "", None @classmethod @@ -1948,27 +1813,4 @@ def post_dbe_delete(cls, id, obj_dict, db_conn): api_server.internal_request_delete('virtual_port_group', vpg_uuid) - # Clean ae ids associated with VPG->PIs - cls._notify_ae_id_modified(obj_dict) - return True, "" - - @classmethod - def dbe_create_notification(cls, db_conn, obj_id, obj_dict): - cls._notify_ae_id_modified(obj_dict) - - return True, '' - - @classmethod - def dbe_update_notification(cls, obj_id, extra_dict=None): - - if extra_dict is not None: - cls._notify_ae_id_modified(extra_dict, notify=True) - - return True, '' - - @classmethod - def dbe_delete_notification(cls, obj_id, obj_dict): - cls._notify_ae_id_modified(obj_dict, notify=True) - - return True, '' diff --git a/src/config/api-server/vnc_cfg_api_server/resources/virtual_port_group.py b/src/config/api-server/vnc_cfg_api_server/resources/virtual_port_group.py index 1fb601b5b28..e265c39bdef 100644 --- a/src/config/api-server/vnc_cfg_api_server/resources/virtual_port_group.py +++ b/src/config/api-server/vnc_cfg_api_server/resources/virtual_port_group.py @@ -6,14 +6,237 @@ from cfgm_common.exceptions import HttpError from cfgm_common.exceptions import NoIdError +from cfgm_common.exceptions import ResourceExhaustionError from pysandesh.gen_py.sandesh.ttypes import SandeshLevel from vnc_api.gen.resource_common import VirtualPortGroup +from vnc_api.gen.resource_xsd import VpgInterfaceParametersType from vnc_cfg_api_server.resources._resource_base import ResourceMixin class VirtualPortGroupServer(ResourceMixin, VirtualPortGroup): + @classmethod + def _notify_ae_id_modified(cls, obj_dict=None, notify=False): + if (obj_dict.get('deallocated_ae_id') and + len(obj_dict.get('deallocated_ae_id'))): + dealloc_dict_list = obj_dict.get('deallocated_ae_id') + for dealloc_dict in dealloc_dict_list: + ae_id = dealloc_dict.get('ae_id') + vpg_name = dealloc_dict.get('vpg_name') + prouter_name = dealloc_dict.get('prouter_name') + cls.vnc_zk_client.free_ae_id( + prouter_name, ae_id, + vpg_name, notify=notify) + msg = "NOTIFY: Deallocated AE-ID (%s) at VPG(%s)/PR(%s)" % ( + ae_id, vpg_name, prouter_name) + cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) + if (obj_dict.get('allocated_ae_id') and + len(obj_dict.get('allocated_ae_id'))): + alloc_dict_list = obj_dict.get('allocated_ae_id') + for alloc_dict in alloc_dict_list: + ae_id = alloc_dict.get('ae_id') + vpg_name = alloc_dict.get('vpg_name') + prouter_name = alloc_dict.get('prouter_name') + cls.vnc_zk_client.alloc_ae_id(prouter_name, vpg_name, ae_id, + notify=True) + msg = "NOTIFY: Allocated AE-ID (%s) at VPG(%s)/PR(%s)" % ( + ae_id, vpg_name, prouter_name) + cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) + + @classmethod + def _alloc_ae_id(cls, prouter_name, vpg_name): + try: + pi_ae = cls.vnc_zk_client.alloc_ae_id(prouter_name, vpg_name) + except ResourceExhaustionError: + err_msg = ('ResourceExhaustionError: when allocating AE-ID for ' + 'virtual-port-group (%s) at physical-router (%s)' % ( + vpg_name, prouter_name)) + return False, (400, err_msg) + attr_obj = VpgInterfaceParametersType(pi_ae) + attr_dict = attr_obj.__dict__ + alloc_dict = { + 'ae_id': pi_ae, + 'prouter_name': prouter_name, + 'vpg_name': vpg_name, + } + msg = "Allocated AE-ID (%s) at VPG(%s)/PR(%s)" % ( + pi_ae, vpg_name, prouter_name) + cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) + return True, (attr_dict, alloc_dict) + + @classmethod + def _dealloc_ae_id(cls, prouter_name, ae_id, vpg_name): + cls.vnc_zk_client.free_ae_id(prouter_name, ae_id, vpg_name) + msg = "De-allocated AE-ID (%s) at VPG(%s)/PR(%s)" % ( + ae_id, vpg_name, prouter_name) + cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) + dealloc_dict = { + 'ae_id': ae_id, + 'prouter_name': prouter_name, + 'vpg_name': vpg_name + } + return dealloc_dict + + @classmethod + def _process_ae_id(cls, db_obj_dict, vpg_name, obj_dict=None): + attr_dict = None + alloc_dealloc_dict = {'allocated_ae_id': [], 'deallocated_ae_id': []} + curr_pr_dict = {} + curr_pi_dict = {} + db_pi_dict = {} + db_pr_dict = {} + vpg_uuid = db_obj_dict['uuid'] + if not obj_dict: + obj_dict = {} + + # process incoming PIs + for ref in obj_dict.get('physical_interface_refs') or []: + curr_pi_dict[ref['uuid']] = ref['to'][1] + curr_pr_dict[ref['to'][1]] = ref['attr'] + + # process existing PIs in DB + for ref in db_obj_dict.get('physical_interface_refs') or []: + db_pi_dict[ref['uuid']] = ref['to'][1] + if not (ref['to'][1] in db_pr_dict and db_pr_dict[ref['to'][1]]): + db_pr_dict[ref['to'][1]] = ref['attr'] + + create_pi_uuids = list(set(curr_pi_dict.keys()) - + set(db_pi_dict.keys())) + delete_pi_uuids = list(set(db_pi_dict.keys()) - + set(curr_pi_dict.keys())) + + # no PIs in db_obj_dict + if len(create_pi_uuids) < 2 and len(db_pi_dict.keys()) == 0: + msg = "Skip AE-ID allocation as Creating PI len(%s) < 2" % ( + create_pi_uuids) + cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) + return True, (attr_dict, alloc_dealloc_dict) + + # nothing to delete or add + if len(create_pi_uuids) == len(delete_pi_uuids) == 0: + msg = "Skip AE-ID allocation as no PI to Create / Delete" + cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) + return True, (attr_dict, alloc_dealloc_dict) + + # nothing to delete, because rest of PIs shares same PR + if (len(create_pi_uuids) == 0 and len(delete_pi_uuids) == 1 and + len(db_pr_dict.keys()) == 1 and len(db_pi_dict.keys()) > 2): + msg = "Skip AE-ID allocation as rest PI(%s) shares same PR(%s)" % ( + db_pi_dict.keys(), db_pr_dict.keys()) + cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) + return True, (attr_dict, alloc_dealloc_dict) + + # allocate case + for pi_uuid in create_pi_uuids: + attr_dict = None + pi_pr = curr_pi_dict.get(pi_uuid) + pi_ae = db_pr_dict.get(pi_pr) + if pi_ae is None: + # allocate + ok, result = cls._alloc_ae_id(pi_pr, vpg_name) + if not ok: + return ok, result + attr_dict, _alloc_dict = result + alloc_dealloc_dict['allocated_ae_id'].append(_alloc_dict) + msg = "Allocated AE-ID(%s) for PI(%s) at VPG(%s)/PR(%s)" % ( + attr_dict, pi_uuid, vpg_name, pi_pr) + cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) + else: + attr_dict = pi_ae + + # re-allocate existing single PI if any + if (len(db_pi_dict.keys()) == 1 and len(create_pi_uuids) == 1): + db_pi_uuid = list(db_pi_dict.keys())[0] + if (list(db_pi_dict.values())[0] != + curr_pi_dict.get(create_pi_uuids[0])): + # allocate a new ae-id as it belongs to different PR + db_pr = list(db_pi_dict.values())[0] + ok, result = cls._alloc_ae_id(db_pr, vpg_name) + if not ok: + return ok, result + attr_dict_leftover_pi, _alloc_dict = result + alloc_dealloc_dict['allocated_ae_id'].append(_alloc_dict) + msg = ("Allocated AE-ID(%s) for PI(%s) at " + "VPG(%s)/PR(%s)" % ( + attr_dict_leftover_pi, db_pi_uuid, + vpg_name, db_pr)) + cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) + else: + attr_dict_leftover_pi = attr_dict + msg = "Re-using AE-ID(%s) for PI(%s) at VPG(%s)/PR(%s)" % ( + attr_dict_leftover_pi, db_pi_uuid, vpg_name, pi_pr) + cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) + (ok, result) = cls.db_conn.ref_update( + 'virtual_port_group', + vpg_uuid, + 'physical_interface', + db_pi_uuid, + {'attr': attr_dict_leftover_pi}, + 'ADD', + db_obj_dict.get('id_perms'), + attr_to_publish=None, + relax_ref_for_delete=True) + msg = "Updated AE-ID(%s) in PI(%s) ref to VPG(%s)" % ( + attr_dict_leftover_pi, db_pi_uuid, vpg_name) + cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) + + # deallocate case + _in_dealloc_list = [] + for pi_uuid in delete_pi_uuids: + pi_pr = db_pi_dict.get(pi_uuid) + pi_ae = db_pr_dict.get(pi_pr) + db_pi_prs = list(db_pi_dict.values()).count(pi_pr) + # PR/VPG is already considered for deallocation, so no need + # to dealloc again + if '%s:%s' % (pi_pr, vpg_name) in _in_dealloc_list: + continue + if (pi_ae is not None and (db_pi_prs < 2 or + len(delete_pi_uuids) > 1)): + ae_id = pi_ae.get('ae_num') + # de-allocate + _dealloc_dict = cls._dealloc_ae_id(pi_pr, ae_id, vpg_name) + alloc_dealloc_dict['deallocated_ae_id'].append(_dealloc_dict) + # record deallocated pr/vpg + _in_dealloc_list.append('%s:%s' % (pi_pr, vpg_name)) + msg = "Deallocated AE-ID(%s) for PI(%s) at VPG(%s)/PR(%s)" % ( + ae_id, pi_uuid, vpg_name, pi_pr) + cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) + + # de-allocate leftover single PI, if any + # in delete case, whatever comes in curr_pi_dict are the + # leftovers because for delete refs, ref to be deleted + # will not be coming in payload + if (len(curr_pi_dict.keys()) == 1 and + len(db_pi_dict.keys()) == len(delete_pi_uuids) + 1): + pi_uuid = list(curr_pi_dict.keys())[0] + pi_pr = curr_pi_dict.get(pi_uuid) + pi_ae = curr_pr_dict.get(pi_pr) + if '%s:%s' % (pi_pr, vpg_name) not in _in_dealloc_list: + if pi_ae is not None: + ae_id = pi_ae.get('ae_num') + _dealloc_dict = cls._dealloc_ae_id(pi_pr, ae_id, vpg_name) + alloc_dealloc_dict['deallocated_ae_id'].append( + _dealloc_dict) + # record deallocated pr/vpg + _in_dealloc_list.append('%s:%s' % (pi_pr, vpg_name)) + msg = ("Deallocated AE-ID(%s) from leftover PI(%s) at " + "VPG(%s)/PR(%s)" % ( + ae_id, pi_uuid, vpg_name, pi_pr)) + cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) + pi_ae = db_pr_dict.get(pi_pr) + (ok, result) = cls.db_conn.ref_update( + 'virtual_port_group', + vpg_uuid, + 'physical_interface', + pi_uuid, + {'attr': None}, + 'ADD', + db_obj_dict.get('id_perms'), + relax_ref_for_delete=True) + + return True, (attr_dict, alloc_dealloc_dict) + @classmethod def update_physical_intf_type(cls, obj_dict=None, old_obj_dict=None): @@ -56,6 +279,31 @@ def update_physical_intf_type(cls, obj_dict=None, to_be_added_pi_uuids = list(set(new_uuid_list) - set(old_uuid_list)) to_be_deleted_pi_uuids = list(set(old_uuid_list) - set(new_uuid_list)) + # ensure this PI do not belong to other VPGs + pis_attached_to_vpg = {} + for pi_uuid in to_be_added_pi_uuids: + ok, pi_obj_dict = db_conn.dbe_read( + obj_type='physical-interface', + obj_id=pi_uuid, + obj_fields=['virtual_port_group_back_refs']) + if not ok: + return ok, (400, pi_obj_dict) + vpg_refs = pi_obj_dict.get('virtual_port_group_back_refs') + if vpg_refs: + pis_attached_to_vpg[pi_uuid] = vpg_refs + if pis_attached_to_vpg: + vpg_uuid = obj_dict.get('uuid') + msg = "" + for pi, vpgs in pis_attached_to_vpg.items(): + for vpg in vpgs: + msg += ( + 'PI (%s) VPG-UUID (%s) VPG-FQNAME (%s); ' % ( + pi, vpg['uuid'], ":".join(vpg['to']))) + return ( + False, + (400, "physical interfaces already added at other VPGs can not" + " be attached to this VPG (%s): %s" % (vpg_uuid, msg))) + for pi_uuid in to_be_added_pi_uuids or []: try: api_server.internal_request_update( @@ -84,7 +332,8 @@ def update_physical_intf_type(cls, obj_dict=None, # end update_physical_intf_type @classmethod - def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): + def pre_dbe_create(cls, tenant_name, obj_dict, db_conn, **kwargs): + ret_val = '' if ('vpg-internal' in obj_dict['fq_name'][2] and obj_dict.get('virtual_port_group_user_created', True)): msg = "Virtual port group(%s) with name vpg-internal as prefix "\ @@ -92,6 +341,16 @@ def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): % (obj_dict['uuid']) return False, (400, msg) + # when PI refs are added to VPG object during create VPG. + # stateful_create do not allow us to allocate AE-ID and + # update them in PI object refs + if obj_dict.get('physical_interface_refs'): + msg = ("API Infra do not support allocating AE-ID when " + "Physical Interface refs are sent in VPG create request. " + "Workaround: Create VPG first, then add Physical " + "Interface to VPG") + return False, (400, msg) + if obj_dict.get('virtual_port_group_trunk_port_id'): primary_vmi_id = obj_dict.get('virtual_port_group_trunk_port_id') ok, result = db_conn.dbe_read( @@ -114,31 +373,41 @@ def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): if not ok: return ok, result - return True, '' + return True, ret_val @classmethod def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, **kwargs): # Handling both deletion and addition of interfaces here + ret_val = '' if obj_dict.get('physical_interface_refs'): # compute the already existing physical interface refs for the # vpg object - ok, result = db_conn.dbe_read( + ok, db_obj_dict = db_conn.dbe_read( obj_type='virtual_port_group', obj_id=obj_dict['uuid'], - obj_fields=['physical_interface_refs']) + obj_fields=['physical_interface_refs', 'id_perms']) if not ok: - return ok, (400, result) + return ok, (400, db_obj_dict) - ok, res = cls.update_physical_intf_type(obj_dict, result) + ok, res = cls.update_physical_intf_type(obj_dict, db_obj_dict) + if not ok: + return ok, res + # Allocate/Deallocate AE-IDs for the attached PIs + ok, res = cls._process_ae_id( + db_obj_dict, fq_name[-1], obj_dict) if not ok: return ok, res + if res[0] and kwargs.get('ref_update'): + kwargs['ref_update']['data']['attr'] = res[0] + ret_val = res[1] - return True, '' + return True, ret_val # end pre_dbe_update @classmethod def pre_dbe_delete(cls, id, obj_dict, db_conn): + ret_val = '' # If the user deletes VPG, make sure that all the referring # VMIs are deleted. if obj_dict.get('virtual_machine_interface_refs'): @@ -151,7 +420,15 @@ def pre_dbe_delete(cls, id, obj_dict, db_conn): if not ok: return (False, result, None) - return True, '', None + if obj_dict.get('physical_interface_refs'): + # release ae-ids associated with PIs attached to this VPG + fq_name = obj_dict.get('fq_name') + ok, res = cls._process_ae_id(obj_dict, fq_name[-1]) + if not ok: + return (ok, res, None) + ret_val = res[1] + + return True, ret_val, None @classmethod def post_dbe_delete(cls, id, obj_dict, db_conn): @@ -170,7 +447,14 @@ def dbe_create_notification(cls, db_conn, obj_id, obj_dict): vpg_id = int(fq_name[2].split('-')[2]) vpg_id_fqname = cls.vnc_zk_client.get_vpg_from_id(vpg_id) cls.vnc_zk_client.alloc_vpg_id(vpg_id_fqname, vpg_id) + # Notify AE-ID allocation/de-allocation + cls._notify_ae_id_modified(obj_dict) + return True, '' + @classmethod + def dbe_update_notification(cls, obj_id, obj_dict=None): + if obj_dict is not None: + cls._notify_ae_id_modified(obj_dict, notify=True) return True, '' @classmethod @@ -180,5 +464,6 @@ def dbe_delete_notification(cls, obj_id, obj_dict): vpg_id = int(fq_name[2].split('-')[2]) vpg_id_fqname = cls.vnc_zk_client.get_vpg_from_id(vpg_id) cls.vnc_zk_client.free_vpg_id(vpg_id, vpg_id_fqname, notify=True) - + # Notify AE-ID allocation/de-allocation + cls._notify_ae_id_modified(obj_dict) return True, '' diff --git a/src/config/api-server/vnc_cfg_api_server/tests/resources/test_virtual_port_group.py b/src/config/api-server/vnc_cfg_api_server/tests/resources/test_virtual_port_group.py index 9fe9ad7545b..e221719a3bb 100644 --- a/src/config/api-server/vnc_cfg_api_server/tests/resources/test_virtual_port_group.py +++ b/src/config/api-server/vnc_cfg_api_server/tests/resources/test_virtual_port_group.py @@ -5,11 +5,14 @@ from builtins import int from builtins import range from builtins import str +from collections import defaultdict import json import logging +import os import re from cfgm_common.exceptions import BadRequest +from cfgm_common.exceptions import NoIdError from cfgm_common.zkclient import ZookeeperLock import mock from testtools import ExpectedException @@ -247,14 +250,16 @@ def _get_nth_esi(self, esi, n): esi_long = self._esi_to_long(esi) return self._long_to_esi(esi_long + n) - def _create_pi_objects(self, pr_obj, pi_names): + def _create_pi_objects(self, pr_objs, pi_names): pi_obj_dict = {} esi_start_id = '00:11:22:33:44:55:66:77:88:11' esi_count = 1 + if not isinstance(pr_objs, list): + pr_objs = [pr_objs] * len(pi_names) for pi_name in pi_names: esi_id = self._get_nth_esi(esi_start_id, esi_count) pi = PhysicalInterface(name=pi_name, - parent_obj=pr_obj, + parent_obj=pr_objs[esi_count - 1], ethernet_segment_identifier=esi_id) pi_uuid = self._vnc_lib.physical_interface_create(pi) pi_obj_dict[pi_name] = self._vnc_lib.physical_interface_read( @@ -283,7 +288,6 @@ def _create_vmis(self, vmi_infos): vmi_obj_dict = {} for vmi_info in vmi_infos: vmi_name = vmi_info.get('name') - vmi_id = vmi_info.get('vmi_id') vmi_parent = vmi_info.get('parent_obj') vmi_vn = vmi_info.get('vn') vmi_vpg_uuid = vmi_info.get('vpg') @@ -293,7 +297,7 @@ def _create_vmis(self, vmi_infos): vmi_is_untagged = vmi_info.get('is_untagged') # create vmi obj - vmi_obj = VirtualMachineInterface(vmi_id, parent_obj=vmi_parent) + vmi_obj = VirtualMachineInterface(vmi_name, parent_obj=vmi_parent) vmi_obj.set_virtual_network(vmi_vn) vmi_vpg = self.api.virtual_port_group_read(id=vmi_vpg_uuid) @@ -382,6 +386,1426 @@ def mocked_init(*args, **kwargs): with mock.patch.object(ZookeeperLock, '__enter__', mocked_enter): self.test_reinit_adds_enterprise_annotations() + # New Case, attaching PI directly to VPG + def test_add_and_delete_pis_at_vpg(self): + proj_obj, fabric_obj, pr_objs = self._create_prerequisites( + create_second_pr=True) + test_id = self.id() + + def process_ae_ids(x): + return [int(i) for i in sorted(x)] + + def get_zk_ae_ids(prs=None): + prefix = os.path.join( + self.__class__.__name__, + 'id', 'aggregated-ethernet') + zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client + if not prs: + prs = [os.path.join(prefix, pr.name) for pr in pr_objs] + else: + if not isinstance(prs, list): + prs = [prs] + prs = [os.path.join(prefix, pr) for pr in prs] + ae_ids = {} + for pr in prs: + pr_org = os.path.split(pr)[-1] + ae_ids[pr_org] = zk_client.get_children(pr) + return ae_ids + + pi_per_pr = 15 + pi_objs = {} + pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for + i in range(1, pi_per_pr + 1)] + pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for + i in range(1, pi_per_pr + 1)] + pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names) + pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names) + pi_objs.update(pr1_pi_objs) + pi_objs.update(pr2_pi_objs) + + # create ten VPGs + vpg_count = 10 + vpg_names = ['vpg_%s_%s' % (test_id, i) for i in range( + 1, vpg_count + 1)] + vpg_objs = self._create_vpgs(fabric_obj, vpg_names) + + # record AE-IDs in ZK before creating any VPG + ae_ids = [x for x in get_zk_ae_ids().values() if x] + self.assertEqual(len(ae_ids), 0) + + # Case 1 + # Attach 3 PIs from PR1 to VPG-1 + ae_ids = {} + vpg_name = vpg_names[0] + vpg_obj = vpg_objs[vpg_name] + for pi in range(3): + vpg_obj.add_physical_interface(pi_objs[pr1_pi_names[pi]]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify PI-refs are correct + self.assertEqual(len(pi_refs), 3) + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1) + + # Case 2 + # Add 2 more PIs from PR-1 to VPG-1 + vpg_name = vpg_names[0] + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + for pi in range(3, 5): + vpg_obj.add_physical_interface(pi_objs[pr1_pi_names[pi]]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify PI-refs are correct + self.assertEqual(len(pi_refs), 5) + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1) + + # Case 3 + # Delete PI-2/PR-1 from VPG-1, no changes in AE-ID allocation + vpg_name = vpg_names[0] + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + vpg_obj.del_physical_interface(pi_objs[pr1_pi_names[1]]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify PI-refs are correct + self.assertEqual(len(pi_refs), 4) + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1) + + # Case 4 + # Create VPG-99 along with PI-5/PR1, PI-6/PR1, PI-7/PR1 + # Unsupported as of R2008 + vpg_name = 'vpg_%s_%s' % (test_id, 99) + vpg = VirtualPortGroup(vpg_name, parent_obj=fabric_obj) + for pi in range(5, 8): + vpg.add_physical_interface(pi_objs[pr1_pi_names[pi]]) + with ExpectedException(BadRequest): + self.api.virtual_port_group_create(vpg) + + # verification at Physical Routers + # No changes expected as VPG-99 should have failed + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1) + + # Case 5 + # Create VPG-2 with PI9/PR1, PI10/PR1 and PI1/PR2, PI2/PR2 + vpg_index = 1 + vpg_name = vpg_names[0] + vpg_obj = vpg_objs[vpg_names[vpg_index]] + for pi in range(8, 10): + vpg_obj.add_physical_interface(pr1_pi_objs[pr1_pi_names[pi]]) + for pi in range(2): + vpg_obj.add_physical_interface(pr2_pi_objs[pr2_pi_names[pi]]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify PI-refs are correct + self.assertEqual(len(pi_refs), 4) + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 2) + ae_id_sorted = process_ae_ids(ae_ids[vpg_name].values()) + self.assertEqual(ae_id_sorted, [0, 0, 1, 1]) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1]) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0]) + + # Case 6 + # Remove PI9/PR1 from VPG-2 + vpg_index = 1 + vpg_name = vpg_names[1] + vpg_obj = vpg_objs[vpg_names[vpg_index]] + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + vpg_obj.del_physical_interface(pr1_pi_objs[pr1_pi_names[8]]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify PI-refs are correct + self.assertEqual(len(pi_refs), 3) + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 2) + self.assertEqual(process_ae_ids(ae_ids[vpg_name].values()), [0, 0, 1]) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1]) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0]) + + # Case 7 + # remove PI10/PR1, so one ae_id from PR1 will be deallocated + # no change in PR2 + vpg_index = 1 + vpg_name = vpg_names[vpg_index] + vpg_obj = vpg_objs[vpg_names[vpg_index]] + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + vpg_obj.del_physical_interface(pr1_pi_objs[pr1_pi_names[9]]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify PI-refs are correct + self.assertEqual(len(pi_refs), 2) + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) + self.assertEqual(process_ae_ids(ae_ids[vpg_name].values()), [0, 0]) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0]) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0]) + + # Case 8 + # Create VPG-3 with PI11/PR1 and PI3/PR2 + vpg_index = 2 + vpg_name = vpg_names[vpg_index] + vpg_obj = vpg_objs[vpg_names[vpg_index]] + for pi in [11]: + vpg_obj.add_physical_interface(pr1_pi_objs[pr1_pi_names[pi]]) + for pi in [3]: + vpg_obj.add_physical_interface(pr2_pi_objs[pr2_pi_names[pi]]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify PI-refs are correct + self.assertEqual(len(pi_refs), 2) + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) + ae_id_sorted = process_ae_ids(ae_ids[vpg_name].values()) + self.assertEqual(ae_id_sorted, [1, 1]) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 2) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1]) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0, 1]) + + # Case 9 + # Create VPG-4 with PI4/PR2 and PI5/PR2 and no change in PR1 + vpg_index = 3 + vpg_name = vpg_names[vpg_index] + vpg_obj = vpg_objs[vpg_names[vpg_index]] + for pi in range(4, 6): + vpg_obj.add_physical_interface(pr2_pi_objs[pr2_pi_names[pi]]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify PI-refs are correct + self.assertEqual(len(pi_refs), 2) + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) + ae_id_sorted = process_ae_ids(ae_ids[vpg_name].values()) + self.assertEqual(ae_id_sorted, [2, 2]) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 3) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1]) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0, 1, 2]) + + # Case 10 + # Delete VPG-1 (0 is deallocated from PR-1 and PR-2 remains same) + vpg_index = 0 + vpg_name = vpg_names[vpg_index] + vpg_obj = vpg_objs[vpg_names[vpg_index]] + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + # Now delete VPG-1 + self.api.virtual_port_group_delete(id=vpg_obj.uuid) + with ExpectedException(NoIdError): + self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + for pi_ref in pi_refs: + pi_obj = self.api.physical_interface_read(id=pi_ref['uuid']) + self.assertFalse('virtual_port_group_back_refs' in pi_obj.__dict__) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 3) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [1]) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0, 1, 2]) + + # Case 11 + # Create VPG-5 with PI6/PR2 and PI7/PR2 and no change in PR1 + vpg_index = 4 + vpg_name = vpg_names[vpg_index] + vpg_obj = vpg_objs[vpg_names[vpg_index]] + for pi in range(6, 8): + vpg_obj.add_physical_interface(pr2_pi_objs[pr2_pi_names[pi]]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify PI-refs are correct + self.assertEqual(len(pi_refs), 2) + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) + ae_id_sorted = process_ae_ids(ae_ids[vpg_name].values()) + self.assertEqual(ae_id_sorted, [3, 3]) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 4) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [1]) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), + [0, 1, 2, 3]) + + # Case 12 + # Create VPG-6 with PI13/PR1 and PI9/PR2 and verify PR1 gets PR1/0,1 + vpg_index = 5 + vpg_name = vpg_names[vpg_index] + vpg_obj = vpg_objs[vpg_names[vpg_index]] + for pi in [12]: + vpg_obj.add_physical_interface(pr1_pi_objs[pr1_pi_names[pi]]) + for pi in [8]: + vpg_obj.add_physical_interface(pr2_pi_objs[pr2_pi_names[pi]]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify PI-refs are correct + self.assertEqual(len(pi_refs), 2) + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 2) + ae_id_sorted = process_ae_ids(ae_ids[vpg_name].values()) + self.assertEqual(ae_id_sorted, [0, 4]) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 5) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0, 1]) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), + [0, 1, 2, 3, 4]) + + # Case 13 + # Delete PI13/PR1 from VPG-6 verify both PI13/PR1 and + # PI9/PR2 loses ae-id + vpg_index = 5 + vpg_name = vpg_names[vpg_index] + vpg_obj = vpg_objs[vpg_names[vpg_index]] + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + for pi in [12]: + vpg_obj.del_physical_interface(pr1_pi_objs[pr1_pi_names[pi]]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify PI-refs are correct + self.assertEqual(len(pi_refs), 1) + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) + self.assertIsNone(list(set(ae_ids[vpg_name].values()))[0]) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 4) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [1]) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), + [0, 1, 2, 3]) + + # TO-Do cleanup + + def test_leftover_single_pi_deallocation(self): + """Leftover single PI from same PR is deallocated.""" + proj_obj, fabric_obj, pr_objs = self._create_prerequisites( + create_second_pr=True) + test_id = self.id() + + def process_ae_ids(x): + return [int(i) for i in sorted(x)] + + def get_zk_ae_ids(prs=None): + prefix = os.path.join( + self.__class__.__name__, + 'id', 'aggregated-ethernet') + zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client + if not prs: + prs = [os.path.join(prefix, pr.name) for pr in pr_objs] + else: + if not isinstance(prs, list): + prs = [prs] + prs = [os.path.join(prefix, pr) for pr in prs] + ae_ids = {} + for pr in prs: + pr_org = os.path.split(pr)[-1] + ae_ids[pr_org] = zk_client.get_children(pr) + return ae_ids + + pi_per_pr = 2 + pi_objs = {} + pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for + i in range(1, pi_per_pr + 1)] + pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names) + pi_objs.update(pr1_pi_objs) + + # create one VPG + vpg_count = 1 + vpg_names = ['vpg_%s_%s' % (test_id, i) for + i in range(1, vpg_count + 1)] + vpg_objs = self._create_vpgs(fabric_obj, vpg_names) + + # record AE-IDs in ZK before creating any VPG + ae_ids = [x for x in get_zk_ae_ids().values() if x] + self.assertEqual(len(ae_ids), 0) + + # Attach PI1/PR1 and PI2/PR1 to VPG-1 + ae_ids = {} + vpg_name = vpg_names[0] + vpg_obj = vpg_objs[vpg_name] + for pi in range(2): + vpg_obj.add_physical_interface(pi_objs[pr1_pi_names[pi]]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + + pi_refs = vpg_obj.get_physical_interface_refs() + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify PI-refs are correct + self.assertEqual(len(pi_refs), 2) + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0]) + + # Delete PI1/PR1 + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + vpg_obj.del_physical_interface(pr1_pi_objs[pr1_pi_names[0]]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + + # verify PI-refs are correct + pi_refs = vpg_obj.get_physical_interface_refs() + self.assertEqual(len(pi_refs), 1) + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), []) + + def test_delete_vpg_with_two_prs(self): + proj_obj, fabric_obj, pr_objs = self._create_prerequisites( + create_second_pr=True) + test_id = self.id() + + def process_ae_ids(x): + return [int(i) for i in sorted(x)] + + def get_zk_ae_ids(prs=None): + prefix = os.path.join( + self.__class__.__name__, + 'id', 'aggregated-ethernet') + zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client + if not prs: + prs = [os.path.join(prefix, pr.name) for pr in pr_objs] + else: + if not isinstance(prs, list): + prs = [prs] + prs = [os.path.join(prefix, pr) for pr in prs] + ae_ids = {} + for pr in prs: + pr_org = os.path.split(pr)[-1] + ae_ids[pr_org] = zk_client.get_children(pr) + return ae_ids + + pi_per_pr = 2 + pi_objs = {} + pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for + i in range(1, pi_per_pr + 1)] + pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for + i in range(1, pi_per_pr + 1)] + pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names) + pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names) + pi_objs.update(pr1_pi_objs) + pi_objs.update(pr2_pi_objs) + + # create two VPGs + vpg_count = 2 + vpg_names = ['vpg_%s_%s' % (test_id, i) for + i in range(1, vpg_count + 1)] + vpg_objs = self._create_vpgs(fabric_obj, vpg_names) + + # record AE-IDs in ZK before creating any VPG + ae_ids = [x for x in get_zk_ae_ids().values() if x] + self.assertEqual(len(ae_ids), 0) + + # Attach PI1/PR1, PI1/PR2, PI2/PR1, PI2/PR2 to VPG-1 + ae_ids = {} + vpg_index = 0 + vpg_name = vpg_names[0] + vpg_obj = vpg_objs[vpg_names[vpg_index]] + for pi in range(2): + vpg_obj.add_physical_interface(pr1_pi_objs[pr1_pi_names[pi]]) + for pi in range(2): + vpg_obj.add_physical_interface(pr2_pi_objs[pr2_pi_names[pi]]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify PI-refs are correct + self.assertEqual(len(pi_refs), 4) + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) + ae_id_sorted = process_ae_ids(ae_ids[vpg_name].values()) + self.assertEqual(ae_id_sorted, [0, 0, 0, 0]) + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0]) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0]) + + # Delete VPG-1 + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + self.api.virtual_port_group_delete(id=vpg_obj.uuid) + with ExpectedException(NoIdError): + self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + for pi_ref in pi_refs: + pi_obj = self.api.physical_interface_read(id=pi_ref['uuid']) + self.assertFalse('virtual_port_group_back_refs' in pi_obj.__dict__) + + # Verify no AE-ID at ZK + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), []) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), []) + + # Attach PI1/PR1, PI1/PR2, PI2/PR1, PI2/PR2 to VPG-2 + vpg_index = 1 + vpg_name = vpg_names[1] + vpg_obj = vpg_objs[vpg_names[vpg_index]] + for pi in range(2): + vpg_obj.add_physical_interface(pr1_pi_objs[pr1_pi_names[pi]]) + for pi in range(2): + vpg_obj.add_physical_interface(pr2_pi_objs[pr2_pi_names[pi]]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify PI-refs are correct + self.assertEqual(len(pi_refs), 4) + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) + ae_id_sorted = process_ae_ids(ae_ids[vpg_name].values()) + self.assertEqual(ae_id_sorted, [0, 0, 0, 0]) + + # verification at ZK + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0]) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0]) + + def test_adding_pi_refs_while_vpg_creation(self): + proj_obj, fabric_obj, pr_objs = self._create_prerequisites( + create_second_pr=True) + test_id = self.id() + + def process_ae_ids(x): + return [int(i) for i in sorted(x)] + + def get_zk_ae_ids(prs=None): + prefix = os.path.join( + self.__class__.__name__, + 'id', 'aggregated-ethernet') + zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client + if not prs: + prs = [os.path.join(prefix, pr.name) for pr in pr_objs] + else: + if not isinstance(prs, list): + prs = [prs] + prs = [os.path.join(prefix, pr) for pr in prs] + ae_ids = {} + for pr in prs: + pr_org = os.path.split(pr)[-1] + ae_ids[pr_org] = zk_client.get_children(pr) + return ae_ids + + pi_per_pr = 1 + pi_objs = {} + pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for + i in range(1, pi_per_pr + 1)] + pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for + i in range(1, pi_per_pr + 1)] + pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names) + pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names) + pi_objs.update(pr1_pi_objs) + pi_objs.update(pr2_pi_objs) + + # Create local VPG-99 + vpg_name = 'vpg_%s_%s' % (test_id, 99) + vpg = VirtualPortGroup(vpg_name, parent_obj=fabric_obj) + for pi in range(1): + vpg.add_physical_interface(pr1_pi_objs[pr1_pi_names[pi]]) + for pi in range(1): + vpg.add_physical_interface(pr2_pi_objs[pr2_pi_names[pi]]) + + # Add actual VPG to API server. This should fail + with ExpectedException(BadRequest): + self.api.virtual_port_group_create(vpg) + + def test_exhaust_ae_ids(self): + """ + Raise Exhaustion Exception when more than MAX-AE-ID VPGs are attached. + + MAX-AE-ID == 128 + """ + proj_obj, fabric_obj, pr_objs = self._create_prerequisites( + create_second_pr=True) + test_id = self.id().split('.')[-1] + + def process_ae_ids(x): + return [int(i) for i in sorted(x)] + + def get_zk_ae_ids(prs=None): + prefix = os.path.join( + self.__class__.__name__, + 'id', 'aggregated-ethernet') + zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client + if not prs: + prs = [os.path.join(prefix, pr.name) for pr in pr_objs] + else: + if not isinstance(prs, list): + prs = [prs] + prs = [os.path.join(prefix, pr) for pr in prs] + ae_ids = {} + for pr in prs: + pr_org = os.path.split(pr)[-1] + ae_ids[pr_org] = zk_client.get_children(pr) + return ae_ids + + # create PR1, PR2 and 129 PIs on each PR + pi_objs = {} + pi_per_pr = 135 + pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for + i in range(1, pi_per_pr + 1)] + pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for + i in range(1, pi_per_pr + 1)] + pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names) + pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names) + pi_objs.update(pr1_pi_objs) + pi_objs.update(pr2_pi_objs) + + # create VPGs + vpg_count = 135 + vpg_names = ['vpg_%s_%s' % (test_id, i) + for i in range(1, vpg_count + 1)] + vpg_objs = self._create_vpgs(fabric_obj, vpg_names) + + # record AE-IDs in ZK before creating any VPG + ae_ids = [x for x in get_zk_ae_ids().values() if x] + self.assertEqual(len(ae_ids), 0) + + def update_vpg(vpg_obj, pis, vpg_count): + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + for pi in pis: + vpg_obj.add_physical_interface(pi) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + self.assertEqual(len(pi_refs), 2) + vpg_ae_ids = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + self.assertEqual(len(set(vpg_ae_ids.values())), 1) + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), vpg_count) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), vpg_count) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), + [x for x in range(vpg_count)]) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), + [x for x in range(vpg_count)]) + + # Case 1: Update 128 VPGs with One PI from both PRs + # This is expected to PASS + for vpg_count in range(128): + vpg_pis = [pr1_pi_objs[pr1_pi_names[vpg_count]], + pr2_pi_objs[pr2_pi_names[vpg_count]]] + update_vpg(vpg_objs[vpg_names[vpg_count]], vpg_pis, vpg_count + 1) + + # Case 2: Try to update 129th VPG with a PI from both PRS + # This is expected to FAIL as max AE-ID allowed is 128 + index = 129 + vpg_name = vpg_names[index] + vpg_obj = vpg_objs[vpg_name] + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + vpg_pis = [pr1_pi_objs[pr1_pi_names[index]], + pr2_pi_objs[pr2_pi_names[index]]] + update_vpg(vpg_obj, vpg_pis, index) + + index = 130 + vpg_name = vpg_names[index] + vpg_obj = vpg_objs[vpg_name] + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + vpg_pis = [pr1_pi_objs[pr1_pi_names[index]], + pr2_pi_objs[pr2_pi_names[index]]] + with ExpectedException(BadRequest): + update_vpg(vpg_obj, vpg_pis, index) + + def _multiple_vpg_with_multiple_pi( + self, proj_obj, fabric_obj, pr_objs, validation): + """Test Steps. + + Add all test steps + """ + test_id = self.id().split('.')[-1] + fabric_name = fabric_obj.get_fq_name() + vlan_vn_count = 10 + + def get_zk_ae_ids(prs=None): + prefix = os.path.join( + self.__class__.__name__, + 'id', 'aggregated-ethernet') + zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client + if not prs: + prs = [os.path.join(prefix, pr.name) for pr in pr_objs] + else: + if not isinstance(prs, list): + prs = [prs] + prs = [os.path.join(prefix, pr) for pr in prs] + ae_ids = {} + for pr in prs: + pr_org = os.path.split(pr)[-1] + ae_ids[pr_org] = zk_client.get_children(pr) + return ae_ids + + vlan_ids = range(1, vlan_vn_count + 1) + vn_names = ['vn_%s_%s' % (test_id, i) + for i in range(1, vlan_vn_count + 1)] + vn_objs = self._create_vns(proj_obj, vn_names) + + # create four PIs, two on each PR + pi_objs = {} + pi_per_pr = 4 + pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for + i in range(1, pi_per_pr + 1)] + pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for + i in range(1, pi_per_pr + 1)] + pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names) + pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names) + pi_objs.update(pr1_pi_objs) + pi_objs.update(pr2_pi_objs) + + # create three VPGs + vpg_count = 10 + vpg_names = ['vpg_%s_%s' % (test_id, i) for + i in range(1, vpg_count + 1)] + vpg_objs = self._create_vpgs(fabric_obj, vpg_names) + + # record AE-IDs in ZK before creating any VPG + ae_ids = [x for x in get_zk_ae_ids().values() if x] + self.assertEqual(len(ae_ids), 0) + + # create 10 VMIs, 4 PIs (2 from each PR) in a VPG + vmi_infos = [] + vpg1_pis = [pi.get_fq_name() for pi in + [pr1_pi_objs[pr1_pi_names[0]], + pr1_pi_objs[pr1_pi_names[1]], + pr2_pi_objs[pr2_pi_names[0]], + pr2_pi_objs[pr2_pi_names[1]]]] + vpg2_pis = [pi.get_fq_name() for pi in + [pr1_pi_objs[pr1_pi_names[2]], + pr1_pi_objs[pr1_pi_names[3]], + pr2_pi_objs[pr2_pi_names[2]], + pr2_pi_objs[pr2_pi_names[3]]]] + vpg1_vmi_names = ['vmi_vpg1_%s_%s' % (test_id, vmi_id) for vmi_id in + range(1, vlan_vn_count + 1)] + vpg2_vmi_names = ['vmi_vpg2_%s_%s' % (test_id, vmi_id) for vmi_id in + range(1, vlan_vn_count + 1)] + for vmi_id in range(1, vlan_vn_count + 1): + info = { + 'name': vpg1_vmi_names[vmi_id - 1], + 'vmi_id': vmi_id, + 'parent_obj': proj_obj, + 'vn': vn_objs[vn_names[vmi_id - 1]], + 'vpg': vpg_objs[vpg_names[0]].uuid, + 'fabric': fabric_name, + 'pis': vpg1_pis, + 'vlan': vlan_ids[vmi_id - 1], + 'is_untagged': False} + vmi_infos.append(info) + info = { + 'name': vpg2_vmi_names[vmi_id - 1], + 'vmi_id': vmi_id, + 'parent_obj': proj_obj, + 'vn': vn_objs[vn_names[vmi_id - 1]], + 'vpg': vpg_objs[vpg_names[1]].uuid, + 'fabric': fabric_name, + 'pis': vpg2_pis, + 'vlan': vlan_ids[vmi_id - 1], + 'is_untagged': False} + vmi_infos.append(info) + vmi_objs = self._create_vmis(vmi_infos) + for vpg_name, vpg_obj in vpg_objs.items(): + vpg_objs[vpg_name] = self.api.virtual_port_group_read( + id=vpg_obj.uuid) + + # record AE-IDs allocated for each prouter + ae_ids = {} + for vpg in range(2): + vpg_name = vpg_names[vpg] + pi_refs = vpg_objs[vpg_name].get_physical_interface_refs() + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), len(pi_refs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 2) + + # Case #1 + # Remove PI-1 from PR-1 through VMI-1 of VPG-1 update + vmi_id = 1 + vmi_name = 'vmi_vpg1_%s_%s' % (test_id, vmi_id) + vpg_name = 'vpg_%s_%s' % (test_id, vmi_id) + vmi_infos = [ + {'name': vmi_name, + 'vmi_uuid': vmi_objs[vmi_name].uuid, + 'vpg': vpg_objs[vpg_name].uuid, + 'fabric': fabric_name, + 'pis': vpg1_pis[1:], + 'vlan': vlan_ids[vmi_id - 1], + 'is_untagged': False}] + self._update_vmis(vmi_infos) + + # re-read VPGs + for vpg, vpg_o in vpg_objs.items(): + vpg_objs[vpg] = self.api.virtual_port_group_read( + id=vpg_o.uuid) + + # Verifications at VPG-1 + # check PI-1 is removed from VPG-1 + pi_refs = vpg_objs[vpg_name].get_physical_interface_refs() + vpg1_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] + self.assertEqual(len(pi_refs), 3) + # verify AE-ID associated with VPG-1 + # AE-IDs of remaining PIs are unaffected + self.assertEqual(len(set(vpg1_ae_ids)), 1) + + # Verifications at VPG-2 + pi_refs = vpg_objs[vpg_names[1]].get_physical_interface_refs() + vpg2_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] + self.assertEqual(len(pi_refs), 4) + # verify AE-ID associated with VPG-2 + # AE-IDs of remaining PIs are unaffected + self.assertEqual(len(set(vpg2_ae_ids)), 1) + + # verification at Physical Routers + # since only PI-1 was removed, AE-ID allocation remains same + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 2) + + # Case #2 + # Remove all PIs but PI-1 in PR-1/VPG-1 through VMI-1 of VPG-1 update + vmi_id = 1 + vmi_name = 'vmi_vpg1_%s_%s' % (test_id, vmi_id) + vpg_name = 'vpg_%s_%s' % (test_id, vmi_id) + vmi_infos = [ + {'name': vmi_name, + 'vmi_uuid': vmi_objs[vmi_name].uuid, + 'vpg': vpg_objs[vpg_name].uuid, + 'fabric': fabric_name, + 'pis': vpg1_pis[0], + 'vlan': vlan_ids[vmi_id - 1], + 'is_untagged': False}] + self._update_vmis(vmi_infos) + + # re-read VPGs + for vpg, vpg_o in vpg_objs.items(): + vpg_objs[vpg] = self.api.virtual_port_group_read( + id=vpg_o.uuid) + + # Verifications at VPG-1 + # check PI-1 is removed from VPG-1 + pi_refs = vpg_objs[vpg_names[0]].get_physical_interface_refs() + vpg1_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] + self.assertEqual(len(pi_refs), 1) + # verify AE-ID associated with VPG-1 + # AE-IDs of remaining PIs are unaffected + self.assertEqual(len(set(vpg1_ae_ids)), 1) + self.assertIsNone(vpg1_ae_ids[0]) + + # Verifications at VPG-2 + pi_refs = vpg_objs[vpg_names[1]].get_physical_interface_refs() + vpg2_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] + self.assertEqual(len(pi_refs), 4) + # verify AE-ID associated with VPG-2 + # AE-IDs of remaining PIs are unaffected + self.assertEqual(len(set(vpg2_ae_ids)), 1) + + # verify at ZK Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1) + + # Case 3 + # Create a new VPG with two PIs, one from each PRs + case3_id = 99 + vpg3_uuid = vpg_objs[vpg_names[2]].uuid + case3_vn_name = 'vn_case3_%s_%s' % (test_id, case3_id) + case3_vn_objs = self._create_vns(proj_obj, [case3_vn_name]) + vn_objs.update(case3_vn_objs) + pi_per_pr = 1 + case3_pr1_pi_name = '%s_case3_pr1_pi%d' % (test_id, case3_id) + case3_pr2_pi_name = '%s_case3_pr2_pi%d' % (test_id, case3_id) + case3_pr1_pi_objs = self._create_pi_objects( + pr_objs[0], [case3_pr1_pi_name]) + case3_pr2_pi_objs = self._create_pi_objects( + pr_objs[1], [case3_pr2_pi_name]) + vpg3_pis = [pi.get_fq_name() for pi in + [case3_pr1_pi_objs[case3_pr1_pi_name], + case3_pr2_pi_objs[case3_pr2_pi_name]]] + pi_objs.update(case3_pr1_pi_objs) + pi_objs.update(case3_pr2_pi_objs) + vmi_info = { + 'name': 'vmi_vpg3_%s_%s' % (test_id, 99), + 'vmi_id': 99, + 'parent_obj': proj_obj, + 'vn': case3_vn_objs[case3_vn_name], + 'vpg': vpg3_uuid, + 'fabric': fabric_name, + 'pis': vpg3_pis, + 'vlan': case3_id, + 'is_untagged': False} + case3_vmi_obj = self._create_vmis([vmi_info]) + vmi_objs.update(case3_vmi_obj) + + # re-read VPG-3 + vpg_objs[vpg_names[2]] = self.api.virtual_port_group_read(id=vpg3_uuid) + # Verifications at VPG-3 + pi_refs = vpg_objs[vpg_names[2]].get_physical_interface_refs() + vpg3_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] + self.assertEqual(len(pi_refs), 2) + # verify an AE-ID is allocated + self.assertEqual(len(set(vpg3_ae_ids)), 1) + + # verify at ZK Physical Routers + # Since a new VPG is added with PIs at Case-3 + # only two AE-IDs should remain in each prouter + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 2) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 2) + # TO-DO + # Verify AE-ID is re-allocated instead of new one + + # Case 4 + # Add PI1/PR1, PI2/PR1 to VPG-1, so a new AE-ID is allocated + vmi_id = 9 + vmi_name = 'vmi_vpg1_%s_%s' % (test_id, vmi_id) + vpg_name = 'vpg_%s_%s' % (test_id, 1) + vmi_infos = [ + {'name': vmi_name, + 'vmi_uuid': vmi_objs[vmi_name].uuid, + 'vpg': vpg_objs[vpg_name].uuid, + 'fabric': fabric_name, + 'pis': vpg1_pis[0:2], + 'vlan': vlan_ids[vmi_id - 1], + 'is_untagged': False}] + self._update_vmis(vmi_infos) + + # re-read VPGs + for vpg_name, vpg_obj in vpg_objs.items(): + vpg_objs[vpg_name] = self.api.virtual_port_group_read( + id=vpg_obj.uuid) + + # Verifications at VPG-1 + # check PI1/PR1 and PI2/PR1 are added to VPG-1 + pi_refs = vpg_objs[vpg_names[0]].get_physical_interface_refs() + vpg1_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] + self.assertEqual(len(pi_refs), 2) + # verify AE-ID associated with VPG-1 + # A new AE-ID is allocated + self.assertEqual(len(set(vpg1_ae_ids)), 1) + + # Verifications at VPG-2 + pi_refs = vpg_objs[vpg_names[1]].get_physical_interface_refs() + vpg2_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] + self.assertEqual(len(pi_refs), 4) + # verify AE-ID associated with VPG-2 + # AE-IDs of remaining PIs are unaffected + self.assertEqual(len(set(vpg2_ae_ids)), 1) + + # verify at ZK Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 3) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 2) + # TO-DO + # Verify AE-ID is re-allocated instead of new one + + # Case X1 + # Create a new VPG with two PIs, both belonging to the same PR + caseX1_id = 101 + vpgX1_uuid = vpg_objs[vpg_names[3]].uuid + caseX1_vn_names = ['vn_caseX1_%s_%s' % (test_id, caseX1_id)] + caseX1_vn_objs = self._create_vns(proj_obj, caseX1_vn_names) + vn_objs.update(caseX1_vn_objs) + pi_per_pr = 2 + caseX1_pr1_pi_names = ['%s_caseX1_pr1_pi%d' % (test_id, caseX1_id), + '%s_caseX1_pr1_pi%d' % (test_id, caseX1_id + 1)] + caseX1_pr1_pi_objs = self._create_pi_objects( + pr_objs[0], caseX1_pr1_pi_names) + vpgX1_pis = [pi.get_fq_name() for pi in + [caseX1_pr1_pi_objs[caseX1_pr1_pi_names[0]], + caseX1_pr1_pi_objs[caseX1_pr1_pi_names[1]]]] + pi_objs.update(caseX1_pr1_pi_objs) + vmi_info = { + 'name': 'vmi_vpg4_%s_%s' % (test_id, caseX1_id), + 'vmi_id': caseX1_id, + 'parent_obj': proj_obj, + 'vn': caseX1_vn_objs[caseX1_vn_names[0]], + 'vpg': vpgX1_uuid, + 'fabric': fabric_name, + 'pis': vpgX1_pis, + 'vlan': caseX1_id, + 'is_untagged': False} + caseX1_vmi_obj = self._create_vmis([vmi_info]) + vmi_objs.update(caseX1_vmi_obj) + # re-read VPG-3 + vpg_objs[vpg_names[3]] = self.api.virtual_port_group_read( + id=vpgX1_uuid) + # Verifications at VPG-3 + pi_refs = vpg_objs[vpg_names[3]].get_physical_interface_refs() + vpgX1_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] + self.assertEqual(len(pi_refs), 2) + # verify an AE-ID is allocated + self.assertEqual(len(set(vpgX1_ae_ids)), 1) + + # verify at ZK Physical Routers + # Since a new VPG is added with PIs at Case-3 + # only two AE-IDs should remain in each prouter + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 4) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 2) + # TO-DO + # Verify AE-ID is re-allocated instead of new one + + # Case X2 + # Create a new VPG with two PIs,both belonging to the same PR + caseX2_id = 103 + vpgX2_uuid = vpg_objs[vpg_names[4]].uuid + caseX2_vn_names = ['vn_caseX2_%s_%s' % (test_id, caseX2_id)] + caseX2_vn_objs = self._create_vns(proj_obj, caseX2_vn_names) + vn_objs.update(caseX2_vn_objs) + pi_per_pr = 2 + caseX2_pr1_pi_names = ['%s_caseX2_pr1_pi%d' % (test_id, caseX2_id), + '%s_caseX2_pr1_pi%d' % (test_id, caseX2_id + 1)] + caseX2_pr1_pi_objs = self._create_pi_objects( + pr_objs[0], caseX2_pr1_pi_names) + vpgX2_pis = [pi.get_fq_name() for pi in + [caseX2_pr1_pi_objs[caseX2_pr1_pi_names[0]], + caseX2_pr1_pi_objs[caseX2_pr1_pi_names[1]]]] + pi_objs.update(caseX2_pr1_pi_objs) + vmi_info = { + 'name': 'vmi_vpg5_%s_%s' % (test_id, caseX2_id), + 'vmi_id': caseX2_id, + 'parent_obj': proj_obj, + 'vn': caseX2_vn_objs[caseX2_vn_names[0]], + 'vpg': vpgX2_uuid, + 'fabric': fabric_name, + 'pis': vpgX2_pis, + 'vlan': caseX2_id, + 'is_untagged': False} + caseX2_vmi_obj = self._create_vmis([vmi_info]) + vmi_objs.update(caseX2_vmi_obj) + # re-read VPG-3 + vpg_objs[vpg_names[4]] = self.api.virtual_port_group_read( + id=vpgX2_uuid) + # Verifications at VPG-3 + pi_refs = vpg_objs[vpg_names[4]].get_physical_interface_refs() + self.assertEqual(len(pi_refs), 2) + # verify an AE-ID is allocated + self.assertEqual(len(set(vpgX1_ae_ids)), 1) + + # verify at ZK Physical Routers + # Since a new VPG is added with PIs at Case-3 + # only two AE-IDs should remain in each prouter + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 5) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 2) + # TO-DO + # Verify AE-ID is re-allocated instead of new one + + # Case X3 + # Add PI2/PR1 to VPG-1, so a new AE-ID is allocated + vmi_id = 10 + vmi_name = 'vmi_vpg1_%s_%s' % (test_id, vmi_id) + vpg_name = 'vpg_%s_%s' % (test_id, 1) + vpg1_uuid = vpg_objs[vpg_names[0]].uuid + vmi_infos = [ + {'name': vmi_name, + 'vmi_uuid': vmi_objs[vmi_name].uuid, + 'vpg': vpg_objs[vpg_name].uuid, + 'fabric': fabric_name, + 'pis': vpg1_pis[1], + 'vlan': vlan_ids[vmi_id - 1], + 'is_untagged': False}] + self._update_vmis(vmi_infos) + + # re-read VPG1 + vpg_objs[vpg_names[0]] = self.api.virtual_port_group_read(id=vpg1_uuid) + + # Verifications at VPG-1 + # check PI1/PR1 and PI2/PR1 are added to VPG-1 + pi_refs = vpg_objs[vpg_names[0]].get_physical_interface_refs() + vpg1_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] + self.assertEqual(len(pi_refs), 1) + # verify AE-ID associated with VPG-1 + # A new AE-ID is allocated + self.assertEqual(len(set(vpg1_ae_ids)), 1) + + # Verifications at VPG-2 + # pi_refs = vpg_objs[vpg_names[1]].get_physical_interface_refs() + # vpg2_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] + # self.assertEqual(len(pi_refs), 4) + # verify AE-ID associated with VPG-2 + # AE-IDs of remaining PIs are unaffected + # self.assertEqual(len(set(vpg2_ae_ids)), 1) + + # verify at ZK Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 4) + # self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 2) + # TO-DO + # Verify AE-ID is re-allocated instead of new one + + # Case X4 + # Create a new VPG with two PIs, both belonging to the same PR + caseX4_id = 66 + vpgX4_uuid = vpg_objs[vpg_names[5]].uuid + caseX4_vn_names = ['vn_caseX4_%s_%s' % (test_id, caseX4_id)] + caseX4_vn_objs = self._create_vns(proj_obj, caseX4_vn_names) + vn_objs.update(caseX4_vn_objs) + pi_per_pr = 2 + caseX4_pr1_pi_names = ['%s_caseX4_pr1_pi%d' % (test_id, caseX4_id), + '%s_caseX4_pr1_pi%d' % (test_id, caseX4_id + 1)] + caseX4_pr1_pi_objs = self._create_pi_objects( + pr_objs[0], caseX4_pr1_pi_names) + vpgX4_pis = [pi.get_fq_name() for pi in + [caseX4_pr1_pi_objs[caseX4_pr1_pi_names[0]], + caseX4_pr1_pi_objs[caseX4_pr1_pi_names[1]]]] + pi_objs.update(caseX4_pr1_pi_objs) + vmi_info = { + 'name': 'vmi_vpg6_%s_%s' % (test_id, caseX4_id), + 'vmi_id': caseX4_id, + 'parent_obj': proj_obj, + 'vn': caseX4_vn_objs[caseX4_vn_names[0]], + 'vpg': vpgX4_uuid, + 'fabric': fabric_name, + 'pis': vpgX4_pis, + 'vlan': caseX4_id, + 'is_untagged': False} + caseX4_vmi_obj = self._create_vmis([vmi_info]) + vmi_objs.update(caseX4_vmi_obj) + # re-read VPG-5 + vpg_objs[vpg_names[5]] = self.api.virtual_port_group_read( + id=vpgX4_uuid) + # Verifications at VPG-5 + pi_refs = vpg_objs[vpg_names[5]].get_physical_interface_refs() + vpgX4_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] + self.assertEqual(len(pi_refs), 2) + # verify an AE-ID is allocated + self.assertEqual(len(set(vpgX4_ae_ids)), 1) + + # verify at ZK Physical Routers + # Since a new VPG is added with PIs at Case-3 + # only two AE-IDs should remain in each prouter + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 5) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 2) + # TO-DO + # Verify AE-ID is re-allocated instead of new one + + # Case 5 + vpg_objs[vpg_names[0]] = self.api.virtual_port_group_read( + id=vpg_objs[vpg_names[0]].uuid) + curr_pis = [ref['to'] for ref in + vpg_objs[vpg_names[0]].get_physical_interface_refs()] + case5_id = 100 + vpg_id = vpg_objs[vpg_names[0]].uuid + case5_vn_name = 'vn_case5_%s_%s' % (test_id, case5_id) + case5_vn_objs = self._create_vns(proj_obj, [case5_vn_name]) + vn_objs.update(case5_vn_objs) + pi_per_pr = 1 + case5_pr2_pi_name = '%s_case5_pr2_pi%d' % (test_id, case5_id) + case5_pr2_pi_objs = self._create_pi_objects( + pr_objs[1], [case5_pr2_pi_name]) + vpg1_case5_pis = [pi.get_fq_name() for pi in + [case5_pr2_pi_objs[case5_pr2_pi_name]]] + vpg1_case5_pis += curr_pis + pi_objs.update(case5_pr2_pi_objs) + vmi_info = { + 'name': 'vmi_vpg1_%s_%s' % (test_id, case5_id), + 'vmi_id': case5_id, + 'parent_obj': proj_obj, + 'vn': case5_vn_objs[case5_vn_name], + 'vpg': vpg_id, + 'fabric': fabric_name, + 'pis': vpg1_case5_pis, + 'vlan': case5_id, + 'is_untagged': False} + case5_vmi_obj = self._create_vmis([vmi_info]) + vmi_objs.update(case5_vmi_obj) + # re-read VPGs + for vpg_name, vpg_obj in vpg_objs.items(): + vpg_objs[vpg_name] = self.api.virtual_port_group_read( + id=vpg_obj.uuid) + + # Verifications at VPG-1 + # check PI-1 is removed from VPG-1 + pi_refs = vpg_objs[vpg_names[0]].get_physical_interface_refs() + vpg1_ae_ids = [pi_ref['attr'].ae_num for pi_ref in pi_refs] + self.assertEqual(len(pi_refs), 2) + # verify AE-ID associated with VPG-1 + # AE-IDs of remaining PIs are unaffected + self.assertEqual(len(set(vpg1_ae_ids)), 2) + + # verify at ZK Physical Routers + # Since a new PI added to existing two PIs to VPG-1 + # VPG-1 will have three PIs, but expects same AE-ID be + # reallocated + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 6) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 3) + # TO-DO + # Verify AE-ID is re-allocated instead of new one + + # cleanup + for _, vmi_obj in vmi_objs.items(): + self.api.virtual_machine_interface_delete(id=vmi_obj.uuid) + for _, vpg_obj in vpg_objs.items(): + self.api.virtual_port_group_delete(id=vpg_obj.uuid) + for _, pi_obj in pi_objs.items(): + self.api.physical_interface_delete(id=pi_obj.uuid) + for _, vn_obj in vn_objs.items(): + self.api.virtual_network_delete(id=vn_obj.uuid) + self.api.physical_router_delete(id=pr_objs[0].uuid) + self.api.physical_router_delete(id=pr_objs[1].uuid) + self.api.fabric_delete(id=fabric_obj.uuid) + self.api.project_delete(id=proj_obj.uuid) + + def _multiple_vpg_with_multiple_pi_old( + self, proj_obj, fabric_obj, pr_obj, validation): + pi_count = 300 + pi_per_vmi = 3 + fabric_name = fabric_obj.get_fq_name() + test_id = self.id() + vlan_vn_count = int(pi_count / 3) + 1 + + vlan_ids = range(1, vlan_vn_count) + vn_names = ['vn_%s_%s' % (test_id, i) + for i in range(1, vlan_vn_count)] + vn_objs = self._create_vns(proj_obj, vn_names) + + pi_names = ['phy_intf_%s_%s' % (test_id, i) for i in range( + 1, pi_count + 1)] + pi_objs = self._create_pi_objects(pr_obj, pi_names) + vpg_names = ['vpg_%s_%s' % (test_id, i) for i in range( + 1, vlan_vn_count)] + vpg_objs = self._create_vpgs(fabric_obj, vpg_names) + + vmi_infos = [] + for vmi_id in range(1, vlan_vn_count): + pinames = pi_names[(vmi_id - 1) * pi_per_vmi: vmi_id * pi_per_vmi] + pis = [pi_objs[pi].get_fq_name() for pi in pinames] + info = { + 'name': 'vmi_%s_%s' % (test_id, vmi_id), + 'vmi_id': vmi_id, + 'parent_obj': proj_obj, + 'vn': vn_objs[vn_names[vmi_id - 1]], + 'vpg': vpg_objs[vpg_names[vmi_id - 1]].uuid, + 'fabric': fabric_name, + 'pis': pis, + 'vlan': vlan_ids[vmi_id - 1], + 'is_untagged': False} + vmi_infos.append(info) + + vmi_objs = self._create_vmis(vmi_infos) + all_ae_nums = [] + for vpg_name in vpg_names: + vpg_uuid = vpg_objs[vpg_name].uuid + vpg_obj = self.api.virtual_port_group_read(id=vpg_uuid) + ae_nums = [ + vpg_obj.physical_interface_refs[i]['attr'].ae_num + for i in range(0, pi_per_vmi)] + # check AE-ID present in each PI ref + self.assertEqual( + len(ae_nums), len(vpg_obj.physical_interface_refs)) + # check AE-ID present in each PI is same + self.assertEqual(len(set(ae_nums)), 1) + all_ae_nums += list(set(ae_nums)) + # check unique AE-ID are allocted for each PI ref + self.assertEqual(len(all_ae_nums), int(pi_count / pi_per_vmi)) + + # replace with new pis on first VMI + extra_pi_start = pi_count + 1 + extra_pi_end = extra_pi_start + 4 + extra_pi_names = ['phy_intf_%s_%s' % (test_id, i) + for i in range(extra_pi_start, extra_pi_end)] + extra_pi_objs = self._create_pi_objects(pr_obj, extra_pi_names) + extra_pi_fq_names = [extra_pi_objs[pi].get_fq_name() + for pi, obj in extra_pi_objs.items()] + + vmi_obj_1 = vmi_objs[vmi_objs.keys()[0]] + vpg_obj_1 = vpg_objs[vpg_objs.keys()[0]] + vpg_obj_1 = self.api.virtual_port_group_read( + id=vpg_obj_1.uuid) + ae_nums_org = [ + vpg_obj_1.physical_interface_refs[i]['attr'].ae_num + for i in range(0, pi_per_vmi)] + kv_pairs = self._create_kv_pairs( + extra_pi_fq_names, fabric_name, + vpg_objs[vpg_objs.keys()[0]].get_fq_name()) + vmi_obj_1.set_virtual_machine_interface_bindings(kv_pairs) + self.api.virtual_machine_interface_update(vmi_obj_1) + vpg_obj_1 = self.api.virtual_port_group_read( + id=vpg_objs[vpg_objs.keys()[0]].uuid) + extra_ae_nums = [ + vpg_obj_1.physical_interface_refs[i]['attr'].ae_num + for i in range(0, extra_pi_end - extra_pi_start)] + + # check all AE-IDs are same + self.assertEqual(len(set(extra_ae_nums)), 1) + + # ensure AE_ID is same as before + self.assertEqual( + set(ae_nums_org), + set(extra_ae_nums), + 'AE numbers before (%s) and after (%s) ' + 'replacing PI are not same' % ( + ae_nums_org, extra_ae_nums)) + + # add a three more pis to first VMI + extra_2_pi_start = extra_pi_end + 1 + extra_2_pi_end = extra_2_pi_start + 4 + extra_2_pi_names = ['phy_intf_%s_%s' % (test_id, i) + for i in range(extra_2_pi_start, extra_2_pi_end)] + extra_2_pi_objs = self._create_pi_objects(pr_obj, extra_2_pi_names) + extra_2_pi_fq_names = [extra_2_pi_objs[pi].get_fq_name() + for pi, obj in extra_2_pi_objs.items()] + vmi_obj_1 = vmi_objs[vmi_objs.keys()[0]] + kv_pairs = self._create_kv_pairs( + extra_pi_fq_names + extra_2_pi_fq_names, fabric_name, + vpg_objs[vpg_objs.keys()[0]].get_fq_name()) + vmi_obj_1.set_virtual_machine_interface_bindings(kv_pairs) + self.api.virtual_machine_interface_update(vmi_obj_1) + vpg_obj_1 = self.api.virtual_port_group_read( + id=vpg_objs[vpg_objs.keys()[0]].uuid) + curr_pi_obj_count = ( + len(extra_pi_fq_names) + len(extra_2_pi_fq_names)) + ae_nums_new = [ + vpg_obj_1.physical_interface_refs[i]['attr'].ae_num + for i in range(0, curr_pi_obj_count)] + + # check AE-ID allocated for all PIs + self.assertEqual(len(ae_nums_new), curr_pi_obj_count) + # check AE-ID allocated for all PIs are same + self.assertEqual(len(set(ae_nums_new)), 1) + + # ensure AE_ID is same as before + self.assertEqual( + set(ae_nums_org), + set(ae_nums_new), + 'AE numbers before (%s) and after (%s) ' + 'replacing PI are not same' % ( + ae_nums_org, ae_nums_new)) + + # cleanup + for vmi_name, vmi_obj in vmi_objs.items(): + self.api.virtual_machine_interface_delete(id=vmi_obj.uuid) + for vpg_name, vpg_obj in vpg_objs.items(): + self.api.virtual_port_group_delete(id=vpg_obj.uuid) + total_pis = {} + total_pis.update(pi_objs) + total_pis.update(extra_pi_objs) + total_pis.update(extra_2_pi_objs) + for pi_name, pi_obj in total_pis.items(): + self.api.physical_interface_delete(id=pi_obj.uuid) + for vn_name, vn_obj in vn_objs.items(): + self.api.virtual_network_delete(id=vn_obj.uuid) + self.api.physical_router_delete(id=pr_obj.uuid) + self.api.fabric_delete(id=fabric_obj.uuid) + self.api.project_delete(id=proj_obj.uuid) + + def test_multiple_vpg_with_multiple_pi(self): + """Verify adding a PI to VMI in enterprise style.""" + proj_obj, fabric_obj, pr_objs = self._create_prerequisites( + create_second_pr=True) + self._multiple_vpg_with_multiple_pi( + proj_obj, fabric_obj, pr_objs, 'enterprise') + + proj_obj, fabric_obj, pr_objs = self._create_prerequisites( + create_second_pr=True) + self._multiple_vpg_with_multiple_pi( + proj_obj, fabric_obj, pr_objs, 'serviceprovider') + def _test_add_new_pi_to_vmi( self, proj_obj, fabric_obj, pr_obj, validation): vlan_1 = 42 @@ -2375,6 +3799,323 @@ def test_ae_id_deallocated_for_vpg_multihoming_interfaces(self): self.api.virtual_network_read(id=vn1_obj.uuid) self.api.fabric_delete(id=fabric_obj.uuid) + # Using VPG update logic + def test_same_pi_can_not_attach_to_different_vpg(self): + proj_obj, fabric_obj, pr_obj = self._create_prerequisites() + test_id = self.id() + + pi_per_pr = 4 + pi_names = ['%s_pr1_pi%d' % (test_id, i) for + i in range(1, pi_per_pr + 1)] + pi_objs = self._create_pi_objects(pr_obj, pi_names) + + vpg_count = 2 + vpg_names = ['vpg_%s_%s' % (test_id, i) for i in range( + 1, vpg_count + 1)] + vpg_objs = self._create_vpgs(fabric_obj, vpg_names) + + # Add all PIs to VPG-1 + vpg_obj = vpg_objs[vpg_names[0]] + for pi_name in pi_names: + vpg_obj.add_physical_interface(pi_objs[pi_name]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + self.assertEqual(len(pi_refs), 4) + + # Try Add PI1, PI2 to VPG-2 + vpg_obj = vpg_objs[vpg_names[1]] + vpg_obj.add_physical_interface(pi_objs[pi_names[0]]) + vpg_obj.add_physical_interface(pi_objs[pi_names[1]]) + with ExpectedException(BadRequest): + self.api.virtual_port_group_update(vpg_obj) + + # Delete PI1, PI2 from VPG-1 + vpg_obj = vpg_objs[vpg_names[0]] + for pi_name in pi_names[:2]: + vpg_obj.del_physical_interface(pi_objs[pi_name]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + self.assertEqual(len(pi_refs), 2) + + # Now try attach PI1, PI2 to VPG-2, + # should work as PI1, PI2 are no longer attached to VPG1 + vpg_obj = vpg_objs[vpg_names[1]] + vpg_obj.add_physical_interface(pi_objs[pi_names[0]]) + vpg_obj.add_physical_interface(pi_objs[pi_names[1]]) + self.api.virtual_port_group_update(vpg_obj) + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pi_refs = vpg_obj.get_physical_interface_refs() + self.assertEqual(len(pi_refs), 2) + + # cleanup + for _, vpg_obj in vpg_objs.items(): + self.api.virtual_port_group_delete(id=vpg_obj.uuid) + for _, pi_obj in pi_objs.items(): + self.api.physical_interface_delete(id=pi_obj.uuid) + self.api.physical_router_delete(id=pr_obj.uuid) + self.api.fabric_delete(id=fabric_obj.uuid) + self.api.project_delete(id=proj_obj.uuid) + + # To verify allocation and dellocation - through VMI + def test_ae_id_alloc_dealloc(self): + proj_obj, fabric_obj, pr_objs = self._create_prerequisites( + create_second_pr=True) + test_id = self.id().split('.')[-1] + fabric_name = fabric_obj.get_fq_name() + vlan_vn_count = 3 + + def process_ae_ids(x): + return [int(i) for i in sorted(x)] + + def get_zk_ae_ids(prs=None): + prefix = os.path.join( + self.__class__.__name__, + 'id', 'aggregated-ethernet') + zk_client = self._api_server._db_conn._zk_db._zk_client._zk_client + if not prs: + prs = [os.path.join(prefix, pr.name) for pr in pr_objs] + else: + if not isinstance(prs, list): + prs = [prs] + prs = [os.path.join(prefix, pr) for pr in prs] + ae_ids = {} + for pr in prs: + pr_org = os.path.split(pr)[-1] + ae_ids[pr_org] = zk_client.get_children(pr) + return ae_ids + + vlan_ids = range(1, vlan_vn_count + 1) + vn_names = ['vn_%s_%s' % (test_id, i) + for i in range(1, vlan_vn_count + 1)] + vn_objs = self._create_vns(proj_obj, vn_names) + + vmi_objs = {} + # create four PIs, two on each PR + pi_objs = {} + pi_per_pr = 1 + pr1_pi_names = ['%s_pr1_pi%d' % (test_id, i) for + i in range(1, pi_per_pr + 1)] + pr2_pi_names = ['%s_pr2_pi%d' % (test_id, i) for + i in range(1, pi_per_pr + 1)] + pr1_pi_objs = self._create_pi_objects(pr_objs[0], pr1_pi_names) + pr2_pi_objs = self._create_pi_objects(pr_objs[1], pr2_pi_names) + pi_objs.update(pr1_pi_objs) + pi_objs.update(pr2_pi_objs) + + # create VPGs + vpg_count = 2 + vpg_names = ['vpg_%s_%s' % (test_id, i) + for i in range(1, vpg_count + 1)] + vpg_objs = self._create_vpgs(fabric_obj, vpg_names) + + # record AE-IDs in ZK before creating any VPG + ae_ids = [x for x in get_zk_ae_ids().values() if x] + self.assertEqual(len(ae_ids), 0) + + # Case 1: Attach PI-1/PR-1 and PI-1/PR-2 to VPG-1 + # one AE-ID i.e 0 to be allocated to VPG-1 + vpg_index = 0 + vpg_name = vpg_names[0] + vpg_obj = vpg_objs[vpg_name] + vpg_pi_fqs = [pi.get_fq_name() for _, pi in pi_objs.items()] + vmi_infos = [] + vmi_vn_vlan_map = defaultdict(list) + for vmi_id in range(3): + vmi_name = 'vmi_%s_%s' % (test_id, vmi_id) + info = { + 'name': vmi_name, + 'vmi_id': '%s' % vmi_id, + 'parent_obj': proj_obj, + 'vn': vn_objs[vn_names[vmi_id - 1]], + 'vpg': vpg_obj.uuid, + 'fabric': fabric_name, + 'pis': vpg_pi_fqs, + 'vlan': vlan_ids[vmi_id - 1], + 'is_untagged': False} + vmi_infos.append(info) + vmi_vn_vlan_map[vmi_name] = ( + vn_objs[vn_names[vmi_id - 1]], vlan_ids[vmi_id - 1]) + vmi_objs = self._create_vmis(vmi_infos) + for vpg, vpg_o in vpg_objs.items(): + vpg_objs[vpg] = self.api.virtual_port_group_read( + id=vpg_o.uuid) + + # record AE-IDs allocated for each prouter + ae_ids = {} + pi_refs = vpg_objs[vpg_name].get_physical_interface_refs() + self.assertEqual(len(pi_refs), len(vpg_pi_fqs)) + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), + len(vpg_pi_fqs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) + self.assertEqual(list(ae_ids[vpg_name].values()), [0, 0]) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0]) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0]) + + # Case 2: Attach only one PI to VPG-1 + vpg_index = 0 + vpg_name = vpg_names[0] + vpg_obj = vpg_objs[vpg_name] + vpg_pi_fqs = [pr1_pi_objs[pr1_pi_names[0]].get_fq_name()] + vmi_infos = [] + vmi_names = list(vmi_objs.keys()) + for vmi_id in range(3): + info = { + 'name': vmi_names[vmi_id], + 'vmi_uuid': '%s' % vmi_objs[vmi_names[vmi_id]].uuid, + 'vn': vmi_vn_vlan_map[vmi_names[vmi_id]][0], + 'vpg': vpg_obj.uuid, + 'fabric': fabric_name, + 'pis': vpg_pi_fqs, + 'vlan': vmi_vn_vlan_map[vmi_names[vmi_id]][1], + 'is_untagged': False} + vmi_infos.append(info) + vmi_objs = self._update_vmis(vmi_infos) + for vpg, vpg_o in vpg_objs.items(): + vpg_objs[vpg] = self.api.virtual_port_group_read( + id=vpg_o.uuid) + + # record AE-IDs allocated for each prouter + ae_ids = {} + pi_refs = vpg_objs[vpg_name].get_physical_interface_refs() + self.assertEqual(len(pi_refs), len(vpg_pi_fqs)) + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), + len(vpg_pi_fqs)) + self.assertIsNone(list(ae_ids[vpg_name].values())[0]) + self.assertEqual(list(ae_ids[vpg_name].values()), [None]) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), []) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 0) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), []) + + # Case-3: Add All PIs back to VPG and update VMIs + # ensure AE-IDs are allocated back + vpg_index = 0 + vpg_name = vpg_names[0] + vpg_obj = vpg_objs[vpg_name] + vpg_pi_fqs = [pi.get_fq_name() for _, pi in pi_objs.items()] + vmi_infos = [] + vmi_names = list(vmi_objs.keys()) + for vmi_id in range(3): + info = { + 'name': vmi_names[vmi_id], + 'vmi_uuid': '%s' % vmi_objs[vmi_names[vmi_id]].uuid, + 'vn': vmi_vn_vlan_map[vmi_names[vmi_id]][0], + 'vpg': vpg_obj.uuid, + 'fabric': fabric_name, + 'pis': vpg_pi_fqs, + 'vlan': vmi_vn_vlan_map[vmi_names[vmi_id]][1], + 'is_untagged': False} + vmi_infos.append(info) + vmi_objs = self._update_vmis(vmi_infos) + for vpg, vpg_o in vpg_objs.items(): + vpg_objs[vpg] = self.api.virtual_port_group_read( + id=vpg_o.uuid) + + # record AE-IDs allocated for each prouter + ae_ids = {} + pi_refs = vpg_objs[vpg_name].get_physical_interface_refs() + self.assertEqual(len(pi_refs), len(vpg_pi_fqs)) + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), + len(vpg_pi_fqs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) + self.assertEqual(list(ae_ids[vpg_name].values()), [0, 0]) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0]) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0]) + + # Case-4: Delete two VMIs + # Ensure all AE-IDs are deallocated + for _, vmi_obj in list(vmi_objs.items())[0:2]: + self.api.virtual_machine_interface_delete(id=vmi_obj.uuid) + + # record AE-IDs allocated for each prouter + ae_ids = {} + pi_refs = vpg_objs[vpg_name].get_physical_interface_refs() + self.assertEqual(len(pi_refs), len(vpg_pi_fqs)) + ae_ids[vpg_name] = {ref['href'].split('/')[-1]: ref['attr'].ae_num + for ref in pi_refs} + # verify all AE-IDs allocated per prouter are unique + self.assertEqual(len(set(ae_ids[vpg_name].keys())), + len(vpg_pi_fqs)) + self.assertEqual(len(set(ae_ids[vpg_name].values())), 1) + self.assertEqual(list(ae_ids[vpg_name].values()), [0, 0]) + + # verification at Physical Routers + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 1) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), [0]) + self.assertEqual(len(pr_ae_ids[pr_objs[1].name]), 1) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), [0]) + + # Case 5 : Delete VPG-1 + vpg_index = 0 + vpg_obj = vpg_objs[vpg_names[vpg_index]] + vpg_obj = self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + for vmi_ref in vpg_obj.get_virtual_machine_interface_refs(): + self.api.virtual_machine_interface_delete(id=vmi_ref['uuid']) + del vpg_objs[vpg_names[0]] + # Now delete VPG-1 + self.api.virtual_port_group_delete(id=vpg_obj.uuid) + # Verification at Physical Routers + with ExpectedException(NoIdError): + self._vnc_lib.virtual_port_group_read(id=vpg_obj.uuid) + pr_ae_ids = get_zk_ae_ids() + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0) + self.assertEqual(len(pr_ae_ids[pr_objs[0].name]), 0) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[0].name]), []) + self.assertEqual(process_ae_ids(pr_ae_ids[pr_objs[1].name]), []) + + # cleanup + # remove VMIs + # already cleared during Case 5 + for _, vmi_obj in vmi_objs.items(): + try: + self.api.virtual_machine_interface_delete(id=vmi_obj.uuid) + except NoIdError: + pass + # remove VNs + for _, vn_obj in vn_objs.items(): + self.api.virtual_network_delete(id=vn_obj.uuid) + # remove VPGs + for _, vpg_obj in vpg_objs.items(): + try: + self.api.virtual_port_group_delete(id=vpg_obj.uuid) + except NoIdError: + pass + # remove PIs + for _, pi_obj in pi_objs.items(): + self.api.physical_interface_delete(id=pi_obj.uuid) + # remove PR + for pr_obj in pr_objs: + self.api.physical_router_delete(id=pr_obj.uuid) + # remove fabric + self.api.fabric_delete(id=fabric_obj.uuid) + # remove project + self.api.project_delete(id=proj_obj.uuid) + def test_two_virtual_port_groups_for_single_pi(self): # Similar to e.g. test_same_vn_with_same_vlan_across_vpg_in_enterprise, # but one of VPG's has no name and we try to bind one PI to both VPGs @@ -2428,11 +4169,14 @@ def test_two_virtual_port_groups_for_single_pi(self): vpg_obj_1.add_virtual_machine_interface(vmi_obj) self.api.virtual_port_group_update(vpg_obj_1) + # Create single VN for second VMI + vn2 = VirtualNetwork('vn2-%s' % (self.id()), parent_obj=proj_obj) + self.api.virtual_network_create(vn2) # Create a VMI that's attached to vpg-2 and having reference - # to vn1 + # to vn2 vmi_obj_2 = VirtualMachineInterface(self.id() + "2", parent_obj=proj_obj) - vmi_obj_2.set_virtual_network(vn1) + vmi_obj_2.set_virtual_network(vn2) # Create KV_Pairs for this VMI kv_pairs = self._create_kv_pairs(pi_fq_name,