diff --git a/src/config/api-server/tests/test_kombu.py b/src/config/api-server/tests/test_kombu.py index df977b38ce4..50f22089e6d 100644 --- a/src/config/api-server/tests/test_kombu.py +++ b/src/config/api-server/tests/test_kombu.py @@ -164,7 +164,7 @@ def _publish(args): self.username, self.password, self.vhost, 0, False) gevent.sleep(0) - kc.dbe_create_publish("network", [], {}) + kc.dbe_create_publish("network", [], {'fq_name': ['vn1']}) _lock.wait() # check if message is not missed out by publish error diff --git a/src/config/api-server/vnc_addr_mgmt.py b/src/config/api-server/vnc_addr_mgmt.py index e1f7c1b8f5c..e7ccd027859 100644 --- a/src/config/api-server/vnc_addr_mgmt.py +++ b/src/config/api-server/vnc_addr_mgmt.py @@ -527,10 +527,10 @@ def _uuid_to_obj_dict(self, req_obj_type, obj_uuid, req_fields=None): (ok, obj_dict) = db_conn.dbe_read( obj_type=req_obj_type, - obj_ids={'uuid':obj_uuid}, + obj_id=obj_uuid, obj_fields=req_fields) return (ok, obj_dict) - #end _uuid_to_obj_dict + # end _uuid_to_obj_dict def _fq_name_to_obj_dict(self, req_obj_type, fq_name, req_fields=None): db_conn = self._get_db_conn() @@ -689,12 +689,12 @@ def net_create_req(self, obj_dict): obj_dict, should_persist=True) # end net_create_req - def net_create_notify(self, obj_ids, obj_dict): + def net_create_notify(self, obj_id, obj_dict): db_conn = self._get_db_conn() try: (ok, result) = db_conn.dbe_read( 'virtual_network', - obj_ids={'uuid': obj_ids['uuid']}, + obj_id=obj_id, obj_fields=['fq_name', 'network_ipam_refs']) except cfgm_common.exceptions.NoIdError: return @@ -705,9 +705,8 @@ def net_create_notify(self, obj_ids, obj_dict): return vn_dict = result - vn_uuid = obj_ids['uuid'] vn_fq_name_str = ':'.join(vn_dict['fq_name']) - self._create_net_subnet_objs(vn_fq_name_str, vn_uuid, vn_dict, + self._create_net_subnet_objs(vn_fq_name_str, obj_id, vn_dict, should_persist=False) # end net_create_notify @@ -757,12 +756,12 @@ def net_update_req(self, vn_fq_name, db_vn_dict, req_vn_dict, obj_uuid=None): should_persist=True) # end net_update_req - def net_update_notify(self, obj_ids): + def net_update_notify(self, obj_id): db_conn = self._get_db_conn() try: (ok, result) = db_conn.dbe_read( obj_type='virtual_network', - obj_ids={'uuid': obj_ids['uuid']}, + obj_id=obj_id, obj_fields=['fq_name', 'network_ipam_refs']) except cfgm_common.exceptions.NoIdError: return @@ -774,8 +773,7 @@ def net_update_notify(self, obj_ids): vn_dict = result vn_fq_name_str = ':'.join(vn_dict['fq_name']) - vn_uuid = obj_ids['uuid'] - self._create_net_subnet_objs(vn_fq_name_str, vn_uuid, vn_dict, + self._create_net_subnet_objs(vn_fq_name_str, obj_id, vn_dict, should_persist=False) # end net_update_notify @@ -795,10 +793,9 @@ def net_delete_req(self, obj_dict): pass # end net_delete_req - def net_delete_notify(self, obj_ids, obj_dict): + def net_delete_notify(self, obj_id, obj_dict): try: - vn_uuid = obj_dict['uuid'] - del self._subnet_objs[vn_uuid] + del self._subnet_objs[obj_id] except KeyError: pass # end net_delete_notify @@ -1000,8 +997,7 @@ def _check_subnet_delete(self, subnets_set, vn_dict): instip_refs = vn_dict.get('instance_ip_back_refs') or [] for ref in instip_refs: try: - (ok, result) = db_conn.dbe_read( - 'instance_ip', {'uuid': ref['uuid']}) + (ok, result) = db_conn.dbe_read('instance_ip', ref['uuid']) except cfgm_common.exceptions.NoIdError: continue if not ok: @@ -1024,8 +1020,7 @@ def _check_subnet_delete(self, subnets_set, vn_dict): fip_pool_refs = vn_dict.get('floating_ip_pools') or [] for ref in fip_pool_refs: try: - (ok, result) = db_conn.dbe_read( - 'floating_ip_pool', {'uuid': ref['uuid']}) + (ok, result) = db_conn.dbe_read('floating_ip_pool', ref['uuid']) except cfgm_common.exceptions.NoIdError: continue if not ok: @@ -1039,7 +1034,7 @@ def _check_subnet_delete(self, subnets_set, vn_dict): for floating_ip in floating_ips: try: (read_ok, read_result) = db_conn.dbe_read( - 'floating_ip', {'uuid': floating_ip['uuid']}) + 'floating_ip', floating_ip['uuid']) except cfgm_common.exceptions.NoIdError: continue if not read_ok: @@ -1064,8 +1059,7 @@ def _check_subnet_delete(self, subnets_set, vn_dict): aip_pool_refs = vn_dict.get('alias_ip_pools') or [] for ref in aip_pool_refs: try: - (ok, result) = db_conn.dbe_read( - 'alias_ip_pool', {'uuid': ref['uuid']}) + (ok, result) = db_conn.dbe_read('alias_ip_pool', ref['uuid']) except cfgm_common.exceptions.NoIdError: continue if not ok: @@ -1081,7 +1075,7 @@ def _check_subnet_delete(self, subnets_set, vn_dict): # new subnet_list try: (read_ok, read_result) = db_conn.dbe_read( - 'alias_ip', {'uuid': floating_ip['uuid']}) + 'alias_ip', floating_ip['uuid']) except cfgm_common.exceptions.NoIdError: continue if not read_ok: @@ -1136,8 +1130,7 @@ def ipam_check_subnet_delete(self, db_ipam_dict, req_ipam_dict): for ref in vn_refs: vn_id = ref.get('uuid') try: - (ok, read_result) = db_conn.dbe_read('virtual_network', - {'uuid':vn_id}) + (ok, read_result) = db_conn.dbe_read('virtual_network', vn_id) except cfgm_common.exceptions.NoIdError: continue if not ok: @@ -1668,9 +1661,9 @@ def ipam_create_req(self, obj_dict): should_persist=True) # end ipam_create_req - def ipam_create_notify(self, obj_ids, obj_dict): + def ipam_create_notify(self, obj_id, obj_dict): if obj_dict.get('ipam_subnet_method') == 'flat-subnet': - self._create_ipam_subnet_objs(obj_ids['uuid'], obj_dict, + self._create_ipam_subnet_objs(obj_id, obj_dict, should_persist=False) # end ipam_create_notify @@ -1695,10 +1688,9 @@ def ipam_delete_req(self, obj_dict): pass # end ipam_delete_req - def ipam_delete_notify(self, obj_ids, obj_dict): + def ipam_delete_notify(self, obj_id, obj_dict): try: - ipam_uuid = obj_dict['uuid'] - del self._subnet_objs[ipam_uuid] + del self._subnet_objs[obj_id] except KeyError: pass # end ipam_delete_notify @@ -1751,12 +1743,10 @@ def ipam_update_req(self, ipam_fq_name, db_ipam_dict, req_ipam_dict, should_persist=True) # end ipam_update_req - def ipam_update_notify(self, obj_ids): + def ipam_update_notify(self, obj_id): db_conn = self._get_db_conn() try: - (ok, result) = db_conn.dbe_read( - 'network_ipam', - obj_ids={'uuid': obj_ids['uuid']}) + (ok, result) = db_conn.dbe_read('network_ipam', obj_id=obj_id) except cfgm_common.exceptions.NoIdError: return @@ -1766,8 +1756,7 @@ def ipam_update_notify(self, obj_ids): return ipam_dict = result - ipam_uuid = obj_ids['uuid'] - self._create_ipam_subnet_objs(ipam_uuid, ipam_dict, + self._create_ipam_subnet_objs(obj_id, ipam_dict, should_persist=False) # end ipam_update_notify diff --git a/src/config/api-server/vnc_cfg_api_server.py b/src/config/api-server/vnc_cfg_api_server.py index f7622a47090..d9e580e8401 100644 --- a/src/config/api-server/vnc_cfg_api_server.py +++ b/src/config/api-server/vnc_cfg_api_server.py @@ -506,7 +506,6 @@ def http_resource_create(self, obj_type): # State modification starts from here. Ensure that cleanup is done for all state changes cleanup_on_failure = [] - obj_ids = {} def stateful_create(): # Alloc and Store id-mappings before creating entry on pubsub store. @@ -517,8 +516,7 @@ def stateful_create(): return (ok, result) get_context().push_undo(db_conn.dbe_release, obj_type, fq_name) - obj_ids.update(result) - + obj_id = result env = get_request().headers.environ tenant_name = env.get(hdr_server_tenant()) or 'default-project' @@ -550,7 +548,7 @@ def _create(): ret['ok'] = ok ret['result'] = result return - (_ok, _result) = db_conn.dbe_create(obj_type, obj_ids, + (_ok, _result) = db_conn.dbe_create(obj_type, obj_id, obj_dict) ret['ok'] = _ok ret['result'] = _result @@ -561,8 +559,7 @@ def _create(): return ret['ok'], ret['result'] else: #normal execution - (ok, result) = db_conn.dbe_create(obj_type, obj_ids, - obj_dict) + (ok, result) = db_conn.dbe_create(obj_type, obj_id, obj_dict) if not ok: return (ok, result) @@ -573,14 +570,14 @@ def _create(): except Exception as e: ok = False err_msg = '%s:%s post_dbe_create had an exception: %s' %( - obj_type, obj_ids['uuid'], str(e)) + obj_type, obj_id, str(e)) err_msg += cfgm_common.utils.detailed_traceback() if not ok: # Create is done, log to system, no point in informing user self.config_log(err_msg, level=SandeshLevel.SYS_ERR) - return True, '' + return True, obj_id # end stateful_create try: @@ -598,8 +595,8 @@ def _create(): rsp_body = {} rsp_body['name'] = name rsp_body['fq_name'] = fq_name - rsp_body['uuid'] = obj_ids['uuid'] - rsp_body['href'] = self.generate_url(resource_type, obj_ids['uuid']) + rsp_body['uuid'] = result + rsp_body['href'] = self.generate_url(resource_type, result) if parent_class: # non config-root child, send back parent uuid/href rsp_body['parent_uuid'] = parent_uuid @@ -651,8 +648,7 @@ def http_resource_read(self, obj_type, id): db_conn = self._db_conn if etag: - obj_ids = {'uuid': id} - (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.strip('"')) + (ok, result) = db_conn.dbe_is_latest(id, etag.strip('"')) if not ok: # Not present in DB self.config_object_error( @@ -664,9 +660,7 @@ def http_resource_read(self, obj_type, id): # send Not-Modified, caches use this for read optimization bottle.response.status = 304 return - #end if etag - - obj_ids = {'uuid': id} + # end if etag # Generate field list for db layer obj_fields = r_class.prop_fields | r_class.ref_fields @@ -678,13 +672,13 @@ def http_resource_read(self, obj_type, id): if 'exclude_children' not in get_request().query: obj_fields |= r_class.children_fields - (ok, result) = r_class.pre_dbe_read(obj_ids['uuid'], db_conn) + (ok, result) = r_class.pre_dbe_read(id, db_conn) if not ok: (code, msg) = result raise cfgm_common.exceptions.HttpError(code, msg) try: - (ok, result) = db_conn.dbe_read(obj_type, obj_ids, + (ok, result) = db_conn.dbe_read(obj_type, id, list(obj_fields), ret_readonly=True) if not ok: self.config_object_error(id, None, obj_type, 'http_get', result) @@ -776,8 +770,7 @@ def http_resource_update(self, obj_type, id): if req_obj_type != obj_type: raise cfgm_common.exceptions.HttpError( 404, 'No %s object found for id %s' %(resource_type, id)) - obj_ids = {'uuid': id} - (read_ok, read_result) = db_conn.dbe_read(obj_type, obj_ids) + (read_ok, read_result) = db_conn.dbe_read(obj_type, id) if not read_ok: bottle.abort( 404, 'No %s object found for id %s' %(resource_type, id)) @@ -822,9 +815,7 @@ def http_resource_update(self, obj_type, id): # State modification starts from here. Ensure that cleanup is done for all state changes cleanup_on_failure = [] - obj_ids = {'uuid': id} - if 'uuid' not in obj_dict: - obj_dict['uuid'] = id + obj_dict['uuid'] = id def stateful_update(): get_context().set_state('PRE_DBE_UPDATE') @@ -835,8 +826,7 @@ def stateful_update(): return (ok, result) get_context().set_state('DBE_UPDATE') - (ok, result) = db_conn.dbe_update(obj_type, obj_ids, - obj_dict) + (ok, result) = db_conn.dbe_update(obj_type, id, obj_dict) if not ok: return (ok, result) @@ -908,9 +898,8 @@ def http_resource_delete(self, obj_type, id): self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE) # read in obj from db (accepting error) to get details of it - obj_ids = {'uuid': id} try: - (read_ok, read_result) = db_conn.dbe_read(obj_type, obj_ids) + (read_ok, read_result) = db_conn.dbe_read(obj_type, id) except NoIdError as e: raise cfgm_common.exceptions.HttpError(404, str(e)) if not read_ok: @@ -996,8 +985,7 @@ def stateful_delete(): cleanup_on_failure.append((callable, [id, read_result, db_conn])) get_context().set_state('DBE_DELETE') - (ok, del_result) = db_conn.dbe_delete( - obj_type, obj_ids, read_result) + (ok, del_result) = db_conn.dbe_delete(obj_type, id, read_result) if not ok: return (ok, del_result) @@ -1225,14 +1213,14 @@ def create_default_children(self, object_type, parent_obj): (ok, result) = self._db_conn.dbe_alloc(child_obj_type, child_dict) if not ok: return (ok, result) - obj_ids = result + obj_id = result # For virtual networks, allocate an ID if child_obj_type == 'virtual_network': - child_dict['virtual_network_network_id'] =\ - self.alloc_vn_id(child_obj.get_fq_name_str()) + child_dict['virtual_network_network_id'] = self.alloc_vn_id( + child_obj.get_fq_name_str()) - (ok, result) = self._db_conn.dbe_create(child_obj_type, obj_ids, + (ok, result) = self._db_conn.dbe_create(child_obj_type, obj_id, child_dict) if not ok: # DB Create failed, log and stop further child creation. @@ -1835,10 +1823,10 @@ def obj_chown_http_post(self): if not 'RW' in perms: raise cfgm_common.exceptions.HttpError(403, " Permission denied") - (ok, obj_dict) = self._db_conn.dbe_read(obj_type, {'uuid':obj_uuid}, - obj_fields=['perms2']) + (ok, obj_dict) = self._db_conn.dbe_read(obj_type, obj_uuid, + obj_fields=['perms2']) obj_dict['perms2']['owner'] = owner - self._db_conn.dbe_update(obj_type, {'uuid': obj_uuid}, obj_dict) + self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict) msg = "chown: %s owner set to %s" % (obj_uuid, owner) self.config_log(msg, level=SandeshLevel.SYS_NOTICE) @@ -1874,7 +1862,7 @@ def obj_chmod_http_post(self): owner_access = request_params.get('owner_access') global_access = request_params.get('global_access') - (ok, obj_dict) = self._db_conn.dbe_read(obj_type, {'uuid':obj_uuid}, + (ok, obj_dict) = self._db_conn.dbe_read(obj_type, obj_uuid, obj_fields=['perms2', 'is_shared']) obj_perms = obj_dict['perms2'] old_perms = '%s/%d %d %s' % (obj_perms['owner'], @@ -1916,12 +1904,12 @@ def obj_chmod_http_post(self): obj_perms['owner_access'], obj_perms['global_access'], ['%s:%d' % (item['tenant'], item['tenant_access']) for item in obj_perms['share']]) - self._db_conn.dbe_update(obj_type, {'uuid': obj_uuid}, obj_dict) + self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict) msg = "chmod: %s perms old=%s, new=%s" % (obj_uuid, old_perms, new_perms) self.config_log(msg, level=SandeshLevel.SYS_NOTICE) return {} - #end obj_chmod_http_post + # end obj_chmod_http_post def prop_collection_http_get(self): if 'uuid' not in get_request().query: @@ -2061,8 +2049,7 @@ def prop_collection_http_post(self): # Validations over. Invoke type specific hook and extension manager try: fq_name = self._db_conn.uuid_to_fq_name(obj_uuid) - (read_ok, read_result) = self._db_conn.dbe_read( - obj_type, {'uuid':obj_uuid}) + (read_ok, read_result) = self._db_conn.dbe_read(obj_type, obj_uuid) except NoIdError: raise cfgm_common.exceptions.HttpError( 404, 'Object Not Found: '+obj_uuid) @@ -2200,7 +2187,7 @@ def ref_update_http_post(self): if operation == 'ADD': try: (read_ok, read_result) = self._db_conn.dbe_read( - ref_obj_type, {'uuid': ref_uuid}, obj_fields=['fq_name']) + ref_obj_type, ref_uuid, obj_fields=['fq_name']) except NoIdError: raise cfgm_common.exceptions.HttpError( 404, 'Object Not Found: ' + ref_uuid) @@ -2211,7 +2198,7 @@ def ref_update_http_post(self): # To invoke type specific hook and extension manager try: (read_ok, read_result) = self._db_conn.dbe_read( - obj_type, get_request().json) + obj_type, get_request().json['uuid']) except NoIdError: raise cfgm_common.exceptions.HttpError( 404, 'Object Not Found: '+obj_uuid) @@ -2816,10 +2803,10 @@ def _db_init_entries(self): obj_type = 'network_ipam' fq_name = ['default-domain', 'default-project', 'default-network-ipam'] obj_uuid = self._db_conn.fq_name_to_uuid(obj_type, fq_name) - (ok, obj_dict) = self._db_conn.dbe_read(obj_type, {'uuid':obj_uuid}, - obj_fields=['perms2']) + (ok, obj_dict) = self._db_conn.dbe_read(obj_type, obj_uuid, + obj_fields=['perms2']) obj_dict['perms2']['global_access'] = PERMS_RX - self._db_conn.dbe_update(obj_type, {'uuid': obj_uuid}, obj_dict) + self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict) # end _db_init_entries # generate default rbac group rule @@ -2914,12 +2901,12 @@ def _create_singleton_entry(self, singleton_obj): obj_dict['id_perms'] = self._get_default_id_perms() obj_dict['perms2'] = self._get_default_perms2() (ok, result) = self._db_conn.dbe_alloc(obj_type, obj_dict) - obj_ids = result + obj_id = result # For virtual networks, allocate an ID if obj_type == 'virtual_network': vn_id = self.alloc_vn_id(s_obj.get_fq_name_str()) obj_dict['virtual_network_network_id'] = vn_id - self._db_conn.dbe_create(obj_type, obj_ids, obj_dict) + self._db_conn.dbe_create(obj_type, obj_id, obj_dict) self.create_default_children(obj_type, s_obj) return s_obj @@ -3395,7 +3382,7 @@ def vn_subnet_ip_count_http_post(self, id): # expected format {"subnet_list" : ["2.1.1.0/24", "1.1.1.0/24"] req_dict = get_request().json try: - (ok, result) = self._db_conn.dbe_read('virtual_network', {'uuid': id}) + (ok, result) = self._db_conn.dbe_read('virtual_network', id) except NoIdError as e: raise cfgm_common.exceptions.HttpError(404, str(e)) except Exception as e: diff --git a/src/config/api-server/vnc_cfg_types.py b/src/config/api-server/vnc_cfg_types.py index 772a831bc26..3c4d8036cd7 100644 --- a/src/config/api-server/vnc_cfg_types.py +++ b/src/config/api-server/vnc_cfg_types.py @@ -105,17 +105,17 @@ def post_dbe_delete(cls, id, obj_dict, db_conn): return True, '' @classmethod - def dbe_create_notification(cls, obj_ids, obj_dict): + def dbe_create_notification(cls, obj_id, obj_dict): pass #end dbe_create_notification @classmethod - def dbe_update_notification(cls, obj_ids): + def dbe_update_notification(cls, obj_id): pass #end dbe_update_notification @classmethod - def dbe_delete_notification(cls, obj_ids, obj_dict): + def dbe_delete_notification(cls, obj_id, obj_dict): pass #end dbe_delete_notification @@ -135,8 +135,7 @@ class Resource(ResourceDbMixin): @classmethod def dbe_read(cls, db_conn, res_type, obj_uuid, obj_fields=None): try: - ok, result = db_conn.dbe_read(res_type, - {'uuid': obj_uuid}, obj_fields) + ok, result = db_conn.dbe_read(res_type, obj_uuid, obj_fields) except cfgm_common.exceptions.NoIdError: return (False, (404, 'No %s: %s' %(res_type, obj_uuid))) if not ok: @@ -227,8 +226,7 @@ def _get_fip_pool_subnets(cls, fip_obj_dict, db_conn): fip_pool_fq_name = fip_obj_dict['fq_name'][:-1] fip_pool_uuid = db_conn.fq_name_to_uuid('floating_ip_pool', fip_pool_fq_name) - ok, res = cls.dbe_read(db_conn, 'floating_ip_pool', - fip_pool_uuid) + ok, res = cls.dbe_read(db_conn, 'floating_ip_pool', fip_pool_uuid) if ok: # Successful read returns fip pool. fip_pool_dict = res @@ -338,7 +336,7 @@ def post_dbe_delete(cls, id, obj_dict, db_conn): @classmethod - def dbe_create_notification(cls, obj_ids, obj_dict): + def dbe_create_notification(cls, obj_id, obj_dict): if obj_dict['parent_type'] == 'instance-ip': return @@ -348,7 +346,7 @@ def dbe_create_notification(cls, obj_ids, obj_dict): # end dbe_create_notification @classmethod - def dbe_delete_notification(cls, obj_ids, obj_dict): + def dbe_delete_notification(cls, obj_id, obj_dict): if obj_dict['parent_type'] == 'instance-ip': return @@ -406,14 +404,14 @@ def post_dbe_delete(cls, id, obj_dict, db_conn): @classmethod - def dbe_create_notification(cls, obj_ids, obj_dict): + def dbe_create_notification(cls, obj_id, obj_dict): aip_addr = obj_dict['alias_ip_address'] vn_fq_name = obj_dict['fq_name'][:-2] cls.addr_mgmt.ip_alloc_notify(aip_addr, vn_fq_name) # end dbe_create_notification @classmethod - def dbe_delete_notification(cls, obj_ids, obj_dict): + def dbe_delete_notification(cls, obj_id, obj_dict): aip_addr = obj_dict['alias_ip_address'] vn_fq_name = obj_dict['fq_name'][:-2] cls.addr_mgmt.ip_free_notify(aip_addr, vn_fq_name) @@ -554,8 +552,7 @@ def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, if req_ip_addr and req_ip_addr != db_ip_addr: return (False, (400, 'Instance IP Address can not be changed')) - ok, result = cls.dbe_read(db_conn, 'virtual_network', - vn_uuid, + ok, result = cls.dbe_read(db_conn, 'virtual_network', vn_uuid, obj_fields=['network_ipam_refs']) if not ok: return ok, result @@ -594,14 +591,14 @@ def post_dbe_delete(cls, id, obj_dict, db_conn): # end post_dbe_delete @classmethod - def dbe_create_notification(cls, obj_ids, obj_dict): + def dbe_create_notification(cls, obj_id, obj_dict): ip_addr = obj_dict['instance_ip_address'] vn_fq_name = obj_dict['virtual_network_refs'][0]['to'] cls.addr_mgmt.ip_alloc_notify(ip_addr, vn_fq_name) # end dbe_create_notification @classmethod - def dbe_delete_notification(cls, obj_ids, obj_dict): + def dbe_delete_notification(cls, obj_id, obj_dict): try: ip_addr = obj_dict['instance_ip_address'] except KeyError: @@ -661,9 +658,7 @@ def check_port_gateway_not_in_same_network(cls, db_conn, if lr_id: if ('virtual_network_refs' in obj_dict or 'virtual_machine_interface_refs' in obj_dict): - ok, read_result = cls.dbe_read(db_conn, - 'logical_router', - lr_id) + ok, read_result = cls.dbe_read(db_conn, 'logical_router', lr_id) if not ok: return ok, read_result if 'virtual_network_refs' in obj_dict: @@ -923,8 +918,7 @@ def post_dbe_create(cls, tenant_name, obj_dict, db_conn): def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, prop_collection_updates=None, **kwargs): - ok, read_result = cls.dbe_read( - db_conn, 'virtual_machine_interface', id) + ok, read_result = cls.dbe_read(db_conn, 'virtual_machine_interface', id) if not ok: return ok, read_result @@ -1032,7 +1026,8 @@ def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): vn_uuid = obj_dict.get('parent_uuid') if vn_uuid is None: vn_uuid = db_conn.fq_name_to_uuid('virtual_network', obj_dict['fq_name'][0:3]) - ok, result = cls.dbe_read(db_conn, 'virtual_network', vn_uuid, obj_fields=['bridge_domains']) + ok, result = cls.dbe_read(db_conn, 'virtual_network', vn_uuid, + obj_fields=['bridge_domains']) if not ok: return ok, result if 'bridge_domains' in result and len(result['bridge_domains']) == 1: @@ -1156,9 +1151,8 @@ def _check_ipam_network_subnets(cls, obj_dict, db_conn, vn_uuid, ipam_uuid = db_conn.fq_name_to_uuid('network_ipam', ipam_fq_name) - (ok, ipam_dict) = db_conn.dbe_read( - obj_type='network_ipam', - obj_ids={'uuid': ipam_uuid}) + (ok, ipam_dict) = db_conn.dbe_read(obj_type='network_ipam', + obj_id=ipam_uuid) if not ok: return (ok, 400, ipam_dict) @@ -1286,9 +1280,8 @@ def undo_vn_id(): if not ipam_uuid: ipam_uuid = db_conn.fq_name_to_uuid('network_ipam', ipam_fq_name) - (ok, ipam_dict) = db_conn.dbe_read( - obj_type='network_ipam', - obj_ids={'uuid': ipam_uuid}) + (ok, ipam_dict) = db_conn.dbe_read(obj_type='network_ipam', + obj_id=ipam_uuid) if not ok: return (ok, (400, ipam_dict)) @@ -1420,9 +1413,8 @@ def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, **kwargs): ipam_fq_name = ipam['to'] ipam_uuid = db_conn.fq_name_to_uuid('network_ipam', ipam_fq_name) - (ok, ipam_dict) = db_conn.dbe_read( - obj_type='network_ipam', - obj_ids={'uuid': ipam_uuid}) + (ok, ipam_dict) = db_conn.dbe_read(obj_type='network_ipam', + obj_id=ipam_uuid) if not ok: return (ok, (409, ipam_dict)) @@ -1472,8 +1464,7 @@ def post_dbe_delete(cls, id, obj_dict, db_conn): backref_fields = RoutingInstance.backref_fields children_fields = RoutingInstance.children_fields - ok, result = cls.dbe_read(db_conn, - 'routing_instance', ri_uuid, + ok, result = cls.dbe_read(db_conn, 'routing_instance', ri_uuid, obj_fields=backref_fields|children_fields) if not ok: return ok, result @@ -1541,18 +1532,18 @@ def subnet_ip_count(cls, vn_fq_name, subnet_list): # end subnet_ip_count @classmethod - def dbe_create_notification(cls, obj_ids, obj_dict): - cls.addr_mgmt.net_create_notify(obj_ids, obj_dict) + def dbe_create_notification(cls, obj_id, obj_dict): + cls.addr_mgmt.net_create_notify(obj_id, obj_dict) # end dbe_create_notification @classmethod - def dbe_update_notification(cls, obj_ids): - cls.addr_mgmt.net_update_notify(obj_ids) + def dbe_update_notification(cls, obj_id): + cls.addr_mgmt.net_update_notify(obj_id) # end dbe_update_notification @classmethod - def dbe_delete_notification(cls, obj_ids, obj_dict): - cls.addr_mgmt.net_delete_notify(obj_ids, obj_dict) + def dbe_delete_notification(cls, obj_id, obj_dict): + cls.addr_mgmt.net_delete_notify(obj_id, obj_dict) # end dbe_delete_notification # end class VirtualNetworkServer @@ -1653,8 +1644,7 @@ def ipam_mgmt_check(): for ref in vn_refs: vn_id = ref.get('uuid') try: - (ok, vn_dict) = db_conn.dbe_read('virtual_network', - {'uuid':vn_id}) + (ok, vn_dict) = db_conn.dbe_read('virtual_network', vn_id) except cfgm_common.exceptions.NoIdError: continue if not ok: @@ -1755,18 +1745,18 @@ def undo(): # end pre_dbe_delete @classmethod - def dbe_create_notification(cls, obj_ids, obj_dict): - cls.addr_mgmt.ipam_create_notify(obj_ids, obj_dict) + def dbe_create_notification(cls, obj_id, obj_dict): + cls.addr_mgmt.ipam_create_notify(obj_id, obj_dict) # end dbe_create_notification @classmethod - def dbe_update_notification(cls, obj_ids): - cls.addr_mgmt.ipam_update_notify(obj_ids) + def dbe_update_notification(cls, obj_id): + cls.addr_mgmt.ipam_update_notify(obj_id) # end dbe_update_notification @classmethod - def dbe_delete_notification(cls, obj_ids, obj_dict): - cls.addr_mgmt.ipam_delete_notify(obj_ids, obj_dict) + def dbe_delete_notification(cls, obj_id, obj_dict): + cls.addr_mgmt.ipam_delete_notify(obj_id, obj_dict) # end dbe_update_notification @classmethod @@ -2457,7 +2447,7 @@ def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): for pool in lb_pools: ok, result = cls.dbe_read(db_conn, 'loadbalancer_pool', - pool['uuid']) + pool['uuid']) if not ok: code, msg = result if code == 404: diff --git a/src/config/api-server/vnc_db.py b/src/config/api-server/vnc_db.py index 95016ae2121..386c4e91c73 100644 --- a/src/config/api-server/vnc_db.py +++ b/src/config/api-server/vnc_db.py @@ -6,7 +6,7 @@ Layer that transforms VNC config objects to database representation """ from cfgm_common.zkclient import ZookeeperClient, IndexAllocator -from gevent import ssl, monkey +from gevent import monkey monkey.patch_all() import gevent import gevent.event @@ -25,6 +25,7 @@ from vnc_rdbms import VncServerRDBMSClient from cfgm_common.vnc_kombu import VncKombuClient from cfgm_common.utils import cgitb_hook +from cfgm_common.utils import shareinfo_from_perms2 from cfgm_common import vnc_greenlets from cfgm_common import SGID_MIN_ALLOC @@ -263,7 +264,7 @@ def __init__(self, db_client_mgr, rabbit_ip, rabbit_port, self._db_client_mgr = db_client_mgr self._sandesh = db_client_mgr._sandesh listen_port = db_client_mgr.get_server_port() - q_name = 'vnc_config.%s-%s' %(socket.gethostname(), listen_port) + q_name = 'vnc_config.%s-%s' % (socket.gethostname(), listen_port) super(VncServerKombuClient, self).__init__( rabbit_ip, rabbit_port, rabbit_user, rabbit_password, rabbit_vhost, rabbit_ha_mode, q_name, self._dbe_subscribe_callback, @@ -330,19 +331,21 @@ def _dbe_subscribe_callback(self, oper_info): sandesh=self._sandesh, error_msg=errmsg) #end _dbe_subscribe_callback - def dbe_create_publish(self, obj_type, obj_ids, obj_dict): + def dbe_create_publish(self, obj_type, obj_id, obj_dict): req_id = get_trace_id() oper_info = {'request-id': req_id, 'oper': 'CREATE', 'type': obj_type, + 'uuid': obj_id, + 'fq_name': obj_dict['fq_name'], 'obj_dict': obj_dict} - oper_info.update(obj_ids) self.publish(oper_info) # end dbe_create_publish def _dbe_create_notification(self, obj_info): try: - (ok, result) = self._db_client_mgr.dbe_read(obj_info['type'], obj_info) + (ok, result) = self._db_client_mgr.dbe_read(obj_info['type'], + obj_info['uuid']) if not ok: raise Exception(result) obj_dict = result @@ -355,7 +358,7 @@ def _dbe_create_notification(self, obj_info): try: r_class = self._db_client_mgr.get_resource_class(obj_info['type']) if r_class: - r_class.dbe_create_notification(obj_info, obj_dict) + r_class.dbe_create_notification(obj_info['uuid'], obj_dict) except Exception as e: err_msg = ("Failed in type specific dbe_create_notification " + str(e)) @@ -363,36 +366,34 @@ def _dbe_create_notification(self, obj_info): raise # end _dbe_create_notification - def dbe_update_publish(self, obj_type, obj_ids): - oper_info = {'oper': 'UPDATE', 'type': obj_type} - oper_info.update(obj_ids) + def dbe_update_publish(self, obj_type, obj_id): + oper_info = {'oper': 'UPDATE', 'type': obj_type, 'uuid': obj_id} self.publish(oper_info) # end dbe_update_publish def _dbe_update_notification(self, obj_info): try: - (ok, result) = self._db_client_mgr.dbe_read(obj_info['type'], obj_info) + (ok, result) = self._db_client_mgr.dbe_read(obj_info['type'], + obj_info['uuid']) except NoIdError as e: # No error, we will hear a delete shortly return - new_obj_dict = result - - self.dbe_uve_trace("UPDATE", obj_info['type'], obj_info['uuid'], new_obj_dict) + self.dbe_uve_trace("UPDATE", obj_info['type'], obj_info['uuid'], result) try: r_class = self._db_client_mgr.get_resource_class(obj_info['type']) if r_class: - r_class.dbe_update_notification(obj_info) + r_class.dbe_update_notification(obj_info['uuid']) except: msg = "Failed to invoke type specific dbe_update_notification" self.config_log(msg, level=SandeshLevel.SYS_ERR) raise # end _dbe_update_notification - def dbe_delete_publish(self, obj_type, obj_ids, obj_dict): - oper_info = {'oper': 'DELETE', 'type': obj_type, 'obj_dict': obj_dict} - oper_info.update(obj_ids) + def dbe_delete_publish(self, obj_type, obj_id, obj_dict): + oper_info = {'oper': 'DELETE', 'type': obj_type, 'uuid': obj_id, + 'fq_name': obj_dict['fq_name'], 'obj_dict': obj_dict} self.publish(oper_info) # end dbe_delete_publish @@ -408,7 +409,7 @@ def _dbe_delete_notification(self, obj_info): try: r_class = self._db_client_mgr.get_resource_class(obj_info['type']) if r_class: - r_class.dbe_delete_notification(obj_info, obj_dict) + r_class.dbe_delete_notification(obj_info['uuid'], obj_dict) except: msg = "Failed to invoke type specific dbe_delete_notification" self.config_log(msg, level=SandeshLevel.SYS_ERR) @@ -632,14 +633,14 @@ def __init__(self, api_svr_mgr, db_srv_list, rabbit_servers, rabbit_port, self._sandesh = api_svr_mgr._sandesh self._UVEMAP = { - "virtual_network" : ("ObjectVNTable", False), - "service_instance" : ("ObjectSITable", False), - "virtual_router" : ("ObjectVRouter", True), - "analytics_node" : ("ObjectCollectorInfo", True), - "database_node" : ("ObjectDatabaseInfo", True), - "config_node" : ("ObjectConfigNode", True), - "service_chain" : ("ServiceChain", False), - "physical_router" : ("ObjectPRouter", True), + "virtual_network": ("ObjectVNTable", False), + "service_instance": ("ObjectSITable", False), + "virtual_router": ("ObjectVRouter", True), + "analytics_node": ("ObjectCollectorInfo", True), + "database_node": ("ObjectDatabaseInfo", True), + "config_node": ("ObjectConfigNode", True), + "service_chain": ("ServiceChain", False), + "physical_router": ("ObjectPRouter", True), "bgp_router": ("ObjectBgpRouter", True), } @@ -691,7 +692,7 @@ def _update_default_quota(self): proj_id = self.fq_name_to_uuid('project', ['default-domain', 'default-project']) try: - (ok, result) = self.dbe_read('project', {'uuid':proj_id}) + (ok, result) = self.dbe_read('project', proj_id) except NoIdError as e: ok = False result = 'Project Not Found: %s' %(proj_id) @@ -701,10 +702,8 @@ def _update_default_quota(self): return proj_dict = result - quota = QuotaType() - proj_dict['quota'] = default_quota - self.dbe_update('project', {'uuid':proj_id}, proj_dict) + self.dbe_update('project', proj_id, proj_dict) # end _update_default_quota def get_api_server(self): @@ -960,12 +959,12 @@ def _dbe_read(self, obj_type, obj_uuids): # end _dbe_read @ignore_exceptions - def _generate_db_request_trace(self, oper, obj_type, obj_ids, obj_dict): + def _generate_db_request_trace(self, oper, obj_type, obj_id, obj_dict): req_id = get_trace_id() body = dict(obj_dict) body['type'] = obj_type - body.update(obj_ids) + body['uuid'] = obj_id db_trace = DBRequestTrace(request_id=req_id) db_trace.operation = oper db_trace.body = json.dumps(body) @@ -985,9 +984,7 @@ def dbe_alloc(self, obj_type, obj_dict, uuid_requested=None): except ResourceExistsError as e: return (False, (409, str(e))) - obj_ids = { 'uuid': obj_dict['uuid'] } - - return (True, obj_ids) + return (True, obj_dict['uuid']) # end dbe_alloc def dbe_uve_trace(self, oper, type, uuid, obj_dict): @@ -1027,11 +1024,11 @@ def dbe_uve_trace(self, oper, type, uuid, obj_dict): def dbe_trace(oper): def wrapper1(func): - def wrapper2(self, obj_type, obj_ids, obj_dict): + def wrapper2(self, obj_type, obj_id, obj_dict): trace = self._generate_db_request_trace(oper, obj_type, - obj_ids, obj_dict) + obj_id, obj_dict) try: - ret = func(self, obj_type, obj_ids, obj_dict) + ret = func(self, obj_type, obj_id, obj_dict) trace_msg([trace], 'DBRequestTraceBuf', self._sandesh) return ret @@ -1047,18 +1044,16 @@ def wrapper2(self, obj_type, obj_ids, obj_dict): # create/update indexes if object is shared def build_shared_index(oper): def wrapper1(func): - def wrapper2(self, obj_type, obj_ids, obj_dict): + def wrapper2(self, obj_type, obj_id, obj_dict): - obj_uuid = obj_ids['uuid'] # fetch current share information to identify what might have changed try: - cur_perms2 = self.uuid_to_obj_perms2(obj_uuid) + cur_perms2 = self.uuid_to_obj_perms2(obj_id) except Exception as e: cur_perms2 = self.get_default_perms2() - pass # don't build sharing indexes if operation (create/update) failed - (ok, result) = func(self, obj_type, obj_ids, obj_dict) + (ok, result) = func(self, obj_type, obj_id, obj_dict) if not ok: return (ok, result) @@ -1076,9 +1071,9 @@ def wrapper2(self, obj_type, obj_ids, obj_dict): # change in global access? if cur_perms2['global_access'] != global_access: if global_access: - self._object_db.set_shared(obj_type, obj_uuid, rwx = global_access) + self._object_db.set_shared(obj_type, obj_id, rwx = global_access) else: - self._object_db.del_shared(obj_type, obj_uuid) + self._object_db.del_shared(obj_type, obj_id) # change in shared list? Construct temporary sets to compare cur_shared_list = set(item['tenant']+':'+str(item['tenant_access']) for item in cur_perms2['share']) @@ -1089,15 +1084,15 @@ def wrapper2(self, obj_type, obj_ids, obj_dict): # delete sharing if no longer in shared list for share_info in cur_shared_list - new_shared_list: # sharing information => [share-type, uuid, rwx bits] - (share_type, share_id, share_perms) = cfgm_common.utils.shareinfo_from_perms2(share_info) - self._object_db.del_shared(obj_type, obj_uuid, - share_id = share_id, share_type = share_type) + (share_type, share_id, share_perms) = shareinfo_from_perms2(share_info) + self._object_db.del_shared(obj_type, obj_id, + share_id=share_id, share_type=share_type) # share this object with specified tenants for share_info in new_shared_list - cur_shared_list: # sharing information => [share-type, uuid, rwx bits] - (share_type, share_id, share_perms) = cfgm_common.utils.shareinfo_from_perms2(share_info) - self._object_db.set_shared(obj_type, obj_uuid, + (share_type, share_id, share_perms) = shareinfo_from_perms2(share_info) + self._object_db.set_shared(obj_type, obj_id, share_id = share_id, share_type = share_type, rwx = int(share_perms)) return (ok, result) @@ -1106,29 +1101,27 @@ def wrapper2(self, obj_type, obj_ids, obj_dict): @dbe_trace('create') @build_shared_index('create') - def dbe_create(self, obj_type, obj_ids, obj_dict): - (ok, result) = self._object_db.object_create( - obj_type, obj_ids['uuid'], obj_dict) + def dbe_create(self, obj_type, obj_id, obj_dict): + (ok, result) = self._object_db.object_create(obj_type, obj_id, obj_dict) if ok: # publish to msgbus - self._msgbus.dbe_create_publish(obj_type, obj_ids, obj_dict) + self._msgbus.dbe_create_publish(obj_type, obj_id, obj_dict) return (ok, result) # end dbe_create # input id is uuid - def dbe_read(self, obj_type, obj_ids, obj_fields=None, + def dbe_read(self, obj_type, obj_id, obj_fields=None, ret_readonly=False): try: (ok, cassandra_result) = self._object_db.object_read( - obj_type, [obj_ids['uuid']], obj_fields, - ret_readonly=ret_readonly) + obj_type, [obj_id], obj_fields, ret_readonly=ret_readonly) except NoIdError as e: # if NoIdError is for obj itself (as opposed to say for parent # or ref), let caller decide if this can be handled gracefully # by re-raising - if e._unknown_id == obj_ids['uuid']: + if e._unknown_id == obj_id: raise return (False, str(e)) @@ -1150,9 +1143,9 @@ def dbe_get_relaxed_refs(self, obj_id): return self._object_db.get_relaxed_refs(obj_id) # end dbe_get_relaxed_refs - def dbe_is_latest(self, obj_ids, tstamp): + def dbe_is_latest(self, obj_id, tstamp): try: - is_latest = self._object_db.is_latest(obj_ids['uuid'], tstamp) + is_latest = self._object_db.is_latest(obj_id, tstamp) return (True, is_latest) except Exception as e: return (False, str(e)) @@ -1160,12 +1153,12 @@ def dbe_is_latest(self, obj_ids, tstamp): @dbe_trace('update') @build_shared_index('update') - def dbe_update(self, obj_type, obj_ids, new_obj_dict): + def dbe_update(self, obj_type, obj_id, new_obj_dict): (ok, cassandra_result) = self._object_db.object_update( - obj_type, obj_ids['uuid'], new_obj_dict) + obj_type, obj_id, new_obj_dict) # publish to message bus (rabbitmq) - self._msgbus.dbe_update_publish(obj_type, obj_ids) + self._msgbus.dbe_update_publish(obj_type, obj_id) return (ok, cassandra_result) # end dbe_update @@ -1183,7 +1176,7 @@ def _owner_id(self): domain = 'default-domain' domain = self._db_conn.fq_name_to_uuid('domain', [domain]) if domain: - domain = domain.replace('-','') + domain = domain.replace('-', '') return domain, tenant_uuid def dbe_list_rdbms(self, obj_type, parent_uuids=None, back_ref_uuids=None, @@ -1198,7 +1191,8 @@ def dbe_list_rdbms(self, obj_type, parent_uuids=None, back_ref_uuids=None, return self._object_db.object_list( obj_type, parent_uuids=parent_uuids, back_ref_uuids=back_ref_uuids, obj_uuids=obj_uuids, - count=is_count, filters=filters, is_detail=is_detail, field_names=field_names, tenant_id=tenant_id, domain=domain) + count=is_count, filters=filters, is_detail=is_detail, + field_names=field_names, tenant_id=tenant_id, domain=domain) def dbe_list(self, obj_type, parent_uuids=None, back_ref_uuids=None, obj_uuids=None, is_count=False, filters=None, @@ -1255,12 +1249,12 @@ def dbe_list(self, obj_type, parent_uuids=None, back_ref_uuids=None, # end dbe_list @dbe_trace('delete') - def dbe_delete(self, obj_type, obj_ids, obj_dict): + def dbe_delete(self, obj_type, obj_id, obj_dict): (ok, cassandra_result) = self._object_db.object_delete( - obj_type, obj_ids['uuid']) + obj_type, obj_id) # publish to message bus (rabbitmq) - self._msgbus.dbe_delete_publish(obj_type, obj_ids, obj_dict) + self._msgbus.dbe_delete_publish(obj_type, obj_id, obj_dict) # finally remove mapping in zk self.dbe_release(obj_type, obj_dict['fq_name']) @@ -1364,7 +1358,7 @@ def prop_collection_update(self, obj_type, obj_uuid, updates): return self._object_db.prop_collection_update(obj_type, obj_uuid, updates) - self._msgbus.dbe_update_publish(obj_type, {'uuid': obj_uuid}) + self._msgbus.dbe_update_publish(obj_type, obj_uuid) return True, '' # end prop_collection_update @@ -1372,7 +1366,7 @@ def ref_update(self, obj_type, obj_uuid, ref_obj_type, ref_uuid, ref_data, operation): self._object_db.ref_update(obj_type, obj_uuid, ref_obj_type, ref_uuid, ref_data, operation) - self._msgbus.dbe_update_publish(obj_type, {'uuid': obj_uuid}) + self._msgbus.dbe_update_publish(obj_type, obj_uuid) # ref_update def ref_relax_for_delete(self, obj_uuid, ref_uuid): diff --git a/src/config/api-server/vnc_perms.py b/src/config/api-server/vnc_perms.py index 5f854ca0786..2ddd9c532e7 100644 --- a/src/config/api-server/vnc_perms.py +++ b/src/config/api-server/vnc_perms.py @@ -237,7 +237,7 @@ def check_perms_delete(self, request, obj_type, obj_uuid, parent_uuid): if self._rbac: # delete only allowed for owner (ok, obj_dict) = self._server_mgr._db_conn.dbe_read(obj_type, - {'uuid':obj_uuid}, obj_fields=['perms2']) + obj_uuid, obj_fields=['perms2']) obj_owner=obj_dict['perms2']['owner'] return self.validate_perms_rbac(request, parent_uuid, PERMS_W, obj_owner_for_delete = obj_owner) elif self._multi_tenancy: diff --git a/src/config/api-server/vnc_quota.py b/src/config/api-server/vnc_quota.py index 5fdb53321bc..9eb8a7a784d 100644 --- a/src/config/api-server/vnc_quota.py +++ b/src/config/api-server/vnc_quota.py @@ -14,8 +14,8 @@ class QuotaHelper(object): @classmethod def get_project_dict_for_quota(cls, proj_uuid, db_conn): try: - (ok, proj_dict) = db_conn.dbe_read('project', {'uuid': proj_uuid}, - obj_fields=['quota']) + (ok, proj_dict) = db_conn.dbe_read('project', proj_uuid, + obj_fields=['quota']) except cfgm_common.exceptions.NoIdError as e: return (False, str(e)) diff --git a/src/config/api-server/vnc_rbac.py b/src/config/api-server/vnc_rbac.py index c202bccb38f..fb9e5d6413c 100644 --- a/src/config/api-server/vnc_rbac.py +++ b/src/config/api-server/vnc_rbac.py @@ -75,10 +75,9 @@ def read_default_rbac_rules(self, conf_file): return rbac_rules def get_rbac_rules_object(self, obj_type, obj_uuid): - obj_ids = {'uuid' : obj_uuid} obj_fields = ['api_access_lists'] try: - (ok, result) = self._db_conn.dbe_read(obj_type, obj_ids, obj_fields) + (ok, result) = self._db_conn.dbe_read(obj_type, obj_uuid, obj_fields) except NoIdError: ok = False if not ok or 'api_access_lists' not in result: @@ -86,8 +85,8 @@ def get_rbac_rules_object(self, obj_type, obj_uuid): api_access_lists = result['api_access_lists'] obj_fields = ['api_access_list_entries'] - obj_ids = {'uuid' : api_access_lists[0]['uuid']} - (ok, result) = self._db_conn.dbe_read('api_access_list', obj_ids, obj_fields) + (ok, result) = self._db_conn.dbe_read( + 'api_access_list', api_access_lists[0]['uuid'], obj_fields) if not ok or 'api_access_list_entries' not in result: return [] # {u'rbac_rule': [{u'rule_object': u'*', u'rule_perms': [{u'role_crud': u'CRUD', u'role_name': u'admin'}], u'rule_field': None}]} @@ -106,7 +105,7 @@ def get_rbac_rules(self, request): if domain_id is None: ok = False try: - (ok, result) = self._db_conn.dbe_read('project', {'uuid' : project_id}, ['fq_name']) + (ok, result) = self._db_conn.dbe_read('project', project_id, ['fq_name']) except Exception as e: ok = False pass diff --git a/src/config/common/tests/test_utils.py b/src/config/common/tests/test_utils.py index 9e441e9feeb..9f99708b7e6 100644 --- a/src/config/common/tests/test_utils.py +++ b/src/config/common/tests/test_utils.py @@ -271,8 +271,7 @@ def _column_within_range(self, column_name, column_start, column_finish): return True # end _column_within_range - def get( - self, key, columns=None, column_start=None, column_finish=None, + def get(self, key, columns=None, column_start=None, column_finish=None, column_count=0, include_timestamp=False, include_ttl=False): if not key in self._rows: raise pycassa.NotFoundException diff --git a/src/config/common/vnc_amqp.py b/src/config/common/vnc_amqp.py index b52ab9c0838..8c58addff4e 100644 --- a/src/config/common/vnc_amqp.py +++ b/src/config/common/vnc_amqp.py @@ -103,11 +103,15 @@ def vnc_subscribe_actions(self): return self.evaluate_dependency() + def _get_key_from_oper_info(self): + if self.db_cls._indexed_by_name: + return ':'.join(self.oper_info['fq_name']) + return self.oper_info['uuid'] + def handle_create(self): - obj_dict = self.oper_info['obj_dict'] - obj_key = self.db_cls.get_key_from_dict(obj_dict) + obj_key = self._get_key_from_oper_info() obj_id = self.oper_info['uuid'] - obj_fq_name = obj_dict['fq_name'] + obj_fq_name = self.oper_info['fq_name'] self.db_cls._object_db.cache_uuid_to_fq_name_add( obj_id, obj_fq_name, self.obj_type) self.obj = self.obj_class.locate(obj_key) @@ -161,7 +165,7 @@ def handle_delete(self): self.dependency_tracker = DependencyTracker( self.db_cls.get_obj_type_map(), self.reaction_map) self.dependency_tracker.evaluate(self.obj_type, self.obj) - obj_key = self.db_cls.get_key_from_dict(self.oper_info['obj_dict']) + obj_key = self._get_key_from_oper_info() self.obj_class.delete(obj_key) def handle_unknown(self): diff --git a/src/config/common/vnc_db.py b/src/config/common/vnc_db.py index 19f9bc2e9c5..7be18c20ea9 100644 --- a/src/config/common/vnc_db.py +++ b/src/config/common/vnc_db.py @@ -352,14 +352,6 @@ def get_obj_type_map(cls): if cls.__module__ == x.obj_type] return dict((x.obj_type, x) for x in module_base[0].__subclasses__()) - @classmethod - def get_key_from_dict(cls, obj_dict): - if cls._indexed_by_name: - obj_key = ':'.join(obj_dict['fq_name']) - else: - obj_key = obj_dict['uuid'] - return obj_key - @classmethod def get_by_uuid(cls, uuid, *args): name_or_uuid = uuid diff --git a/src/config/svc-monitor/svc_monitor/tests/test_svc_monitor.py b/src/config/svc-monitor/svc_monitor/tests/test_svc_monitor.py index e420e290b5b..c60589bdede 100644 --- a/src/config/svc-monitor/svc_monitor/tests/test_svc_monitor.py +++ b/src/config/svc-monitor/svc_monitor/tests/test_svc_monitor.py @@ -15,24 +15,12 @@ si_add_info = { u'oper': u'CREATE', u'uuid': u'fake-instance', u'type': u'service-instance', - u'obj_dict': { - u'fq_name': [u'fake-domain', u'fake-project', u'fake-instance'], - u'uuid': u'fake-instance', - u'parent_uuid': u'fake-domain:fake-project', - u'service_instance_properties': { - u'scale_out': {u'max_instances': 2}, - u'interface_list': [ - {u'virtual_network': u''}, - {u'virtual_network': u'fake-domain:fake-project:left_vn'}, - {u'virtual_network': u'fake-domain:fake-project:right_vn'} - ] - }, - u'parent_type': u'project' - } + u'fq_name': [u'fake-domain', u'fake-project', u'fake-instance'] } si_del_info = { u'oper': u'DELETE', u'uuid': u'fake-instance', u'type': u'service-instance', + u'fq_name': [u'fake-domain', u'fake-project', u'fake-instance'], u'obj_dict': { u'virtual_machine_back_refs': [{u'to': [u'fake-vm'], u'uuid': u'fake-vm'}], u'fq_name': [u'fake-domain', u'fake-project', u'fake-instance'], @@ -54,26 +42,17 @@ vn_add_info = { u'oper': u'CREATE', u'uuid': u'left-vn', u'type': u'virtual-network', - u'obj_dict': { - u'uuid': u'left-vn', - u'fq_name': [u'fake-domain', u'fake-project', u'left-vn'], - u'parent_uuid': u'fake-domain:fake-project', - u'parent_type': u'project' - } + u'fq_name': [u'fake-domain', u'fake-project', u'left-vn'], } vmi_add_info = { u'oper': u'CREATE', u'uuid': u'left-vmi', u'type': u'virtual-machine-interface', - u'obj_dict': { - u'fq_name': [u'fake-domain', u'fake-project', u'fake-domain__fake-project__fake-instance__1__left__0'], - u'uuid': u'left-vmi', - u'parent_uuid': u'fake-domain:fake-project', - u'parent_type': u'project' - } + u'fq_name': [u'fake-domain', u'fake-project', u'fake-domain__fake-project__fake-instance__1__left__0'], } vmi_del_info = { u'oper': u'DELETE', u'uuid': u'left-vmi', u'type': u'virtual-machine-interface', + u'fq_name': [u'fake-domain', u'fake-project', u'left-vmi'], u'obj_dict': { u'fq_name': [u'fake-domain', u'fake-project', u'left-vmi'], u'uuid': u'left-vmi', @@ -83,44 +62,15 @@ } sas_add_info = { - u'imid': u'contrail:service-appliance-set:default-global-system-config:Test-SAS', - u'obj_dict': { - u'display_name': u'Test-SAS', - u'fq_name': [u'default-global-system-config', u'Test-SAS'], - u'id_perms': {u'created': u'2015-09-23T10:24:56.464362', - u'creator': None, - u'description': None, - u'enable': True, - u'last_modified': u'2015-09-23T10:24:56.464362', - u'permissions': {u'group': u'admin', - u'group_access': 7, - u'other_access': 7, - u'owner': u'admin', - u'owner_access': 7}, - u'user_visible': True, - u'uuid': {u'uuid_lslong': 11604282682608356844L, - u'uuid_mslong': 11461005920023169084L}}, - u'parent_type': u'global-system-config', - u'service_appliance_driver': u'svc_monitor.tests.fake_lb_driver.OpencontrailFakeLoadbalancerDriver', - u'service_appliance_set_properties': {u'key_value_pair': [{u'key': u'sync_mode', - u'value': u'replication'}, - {u'key': u'num_snat', - u'value': u'1'}, - {u'key': u'use_snat', - u'value': u'True'}, - {u'key': u'global_routed_mode', - u'value': u'True'}]}, - u'uuid': u'sas' - }, + u'fq_name': [u'default-global-system-config', u'Test-SAS'], u'oper': u'CREATE', - u'parent_imid': u'contrail:global-system-config:default-global-system-config', u'request-id': u'req-9977e0e7-910e-41e5-9378-974d2a1820ef', u'type': u'service-appliance-set', u'uuid': u'sas' } sas_del_info = { - u'imid': u'contrail:service-appliance-set:default-global-system-config:Test-SAS', + u'fq_name': [u'default-global-system-config', u'Test-SAS'], u'obj_dict': { u'display_name': u'Test-SAS', u'fq_name': [u'default-global-system-config', u'Test-SAS'], @@ -150,46 +100,21 @@ u'uuid': u'sas' }, u'oper': u'DELETE', - u'parent_imid': u'contrail:global-system-config:default-global-system-config', u'request-id': u'req-9977e0e7-910e-41e5-9378-974d2a1820ef', u'type': u'service-appliance-set', u'uuid': u'sas' } sa_add_info = { - u'imid': u'contrail:service-appliance:default-global-system-config:Test-SAS:Test-SA', - u'obj_dict': { - u'display_name': u'Test-SA', - u'fq_name': [u'default-global-system-config', u'Test-SAS', u'Test-SA'], - u'id_perms': {u'created': u'2015-09-23T10:24:59.261198', - u'creator': None, - u'description': None, - u'enable': True, - u'last_modified': u'2015-09-23T10:24:59.261198', - u'permissions': {u'group': u'admin', - u'group_access': 7, - u'other_access': 7, - u'owner': u'admin', - u'owner_access': 7}, - u'user_visible': True, - u'uuid': {u'uuid_lslong': 10774623880662702549L, - u'uuid_mslong': 1841697908979158050}}, - u'parent_type': u'service-appliance-set', - u'service_appliance_ip_address': u'10.102.44.30', - u'service_appliance_properties': {u'key_value_pair': []}, - u'service_appliance_user_credentials': {u'password': u'Bond', - u'username': u'James'}, - u'uuid': u'sa' - }, + u'fq_name': [u'default-global-system-config', u'Test-SAS', u'Test-SA'], u'oper': u'CREATE', - u'parent_imid': u'contrail:service-appliance-set:default-global-system-config:Test-SAS', u'request-id': u'req-3cd178f7-9662-48ad-8cb5-984c02d4d981', u'type': u'service-appliance', u'uuid': u'sa' } sa_del_info = { - u'imid': u'contrail:service-appliance:default-global-system-config:Test-SAS:Test-SA', + u'fq_name': [u'default-global-system-config', u'Test-SAS', u'Test-SA'], u'obj_dict': { u'display_name': u'Test-SA', u'fq_name': [u'default-global-system-config', u'Test-SAS', u'Test-SA'], @@ -214,46 +139,14 @@ u'uuid': u'sa' }, u'oper': u'DELETE', - u'parent_imid': u'contrail:service-appliance-set:default-global-system-config:Test-SAS', u'request-id': u'req-3cd178f7-9662-48ad-8cb5-984c02d4d981', u'type': u'service-appliance', u'uuid': u'sa' } pool_add_info = { - u'imid': u'contrail:loadbalancer-pool:default-domain:admin:mypool', - u'obj_dict': { - u'display_name': u'Test-pool', - u'fq_name': [u'default-domain', u'admin', u'Test-pool'], - u'id_perms': {u'created': u'2015-09-23T10:17:26.193693', - u'creator': None, - u'description': u'Test pool', - u'enable': True, - u'last_modified': u'2015-09-23T10:17:26.193693', - u'permissions': {u'group': u'admin', - u'group_access': 7, - u'other_access': 7, - u'owner': u'neutron', - u'owner_access': 7}, - u'user_visible': True, - u'uuid': {u'uuid_lslong': 12634730708897037914L, - u'uuid_mslong': 8496742968641014440}}, - u'loadbalancer_pool_properties': {u'admin_state': True, - u'loadbalancer_method': u'ROUND_ROBIN', - u'protocol': u'TCP', - u'status': None, - u'status_description': None, - u'subnet_id': u'subnet-id'}, - u'loadbalancer_pool_provider': u'Test-SAS', - u'parent_type': u'project', - u'parent_uuid': u'fakeproject', - u'service_appliance_set_refs': [{u'to': [u'default-global-system-config', - u'Test-SAS'], - u'uuid': u'sas'}], - u'uuid': u'pool' - }, + u'fq_name': [u'default-domain', u'admin', u'Test-pool'], u'oper': u'CREATE', - u'parent_imid': u'contrail:project:default-domain:fakeproject', u'request-id': u'req-fad2a313-ed58-48cc-a2b1-3f03a6ca8ca7', u'type': u'loadbalancer-pool', u'uuid': u'pool' @@ -266,7 +159,7 @@ } pool_del_info = { - u'imid': u'contrail:loadbalancer-pool:default-domain:admin:mypool', + u'fq_name': [u'default-domain', u'admin', u'Test-pool'], u'obj_dict': { u'display_name': u'Test-pool', u'fq_name': [u'default-domain', u'admin', u'Test-pool'], @@ -301,43 +194,14 @@ u'uuid': u'pool' }, u'oper': u'DELETE', - u'parent_imid': u'contrail:project:default-domain:fakeproject', u'type': u'loadbalancer-pool', u'uuid': u'pool' } member_add_info = { - u'imid': u'contrail:loadbalancer-member:default-domain:admin:mypool:058f2511-08af-4330-9ea3-119e09408969', - u'obj_dict': { - u'display_name': u'058f2511-08af-4330-9ea3-119e09408969', - u'fq_name': [u'default-domain', - u'admin', - u'mypool', - u'058f2511-08af-4330-9ea3-119e09408969'], - u'id_perms': {u'created': u'2015-09-23T10:29:24.359873', - u'creator': None, - u'description': u'Test Pool member', - u'enable': True, - u'last_modified': u'2015-09-23T10:29:24.359873', - u'permissions': {u'group': u'admin', - u'group_access': 7, - u'other_access': 7, - u'owner': u'neutron', - u'owner_access': 7}, - u'user_visible': True, - u'uuid': {u'uuid_lslong': 11430999649654180201L, - u'uuid_mslong': 400579646949638960}}, - u'loadbalancer_member_properties': {u'address': u'1.1.4.5', - u'admin_state': True, - u'protocol_port': 91, - u'status': None, - u'status_description': None, - u'weight': 1}, - u'parent_type': u'loadbalancer-pool', - u'uuid': u'member' - }, + u'fq_name': [u'default-domain', u'admin', u'mypool', + u'058f2511-08af-4330-9ea3-119e09408969'], u'oper': u'CREATE', - u'parent_imid': u'contrail:loadbalancer-pool:default-domain:admin:Test-pool', u'request-id': u'req-5f243860-8512-4ae0-9ff3-55c4fe8844d9', u'type': u'loadbalancer-member', u'uuid': u'member' @@ -350,7 +214,8 @@ } member_del_info = { - u'imid': u'contrail:loadbalancer-member:default-domain:admin:mypool:058f2511-08af-4330-9ea3-119e09408969', + u'fq_name': [u'default-domain', u'admin', u'mypool', + u'058f2511-08af-4330-9ea3-119e09408969'], u'obj_dict': { u'display_name': u'058f2511-08af-4330-9ea3-119e09408969', u'fq_name': [u'default-domain', @@ -380,53 +245,14 @@ u'uuid': u'member' }, u'oper': u'DELETE', - u'parent_imid': u'contrail:loadbalancer-pool:default-domain:admin:Test-pool', u'request-id': u'req-5f243860-8512-4ae0-9ff3-55c4fe8844d9', u'type': u'loadbalancer-member', u'uuid': u'member' } vip_add_info = { - u'imid': u'contrail:virtual-ip:default-domain:admin:myvip1_ftp', - u'obj_dict': { - u'display_name': u'Test-vip', - u'fq_name': [u'default-domain', u'admin', u'Test-vip'], - u'id_perms': {u'created': u'2015-09-23T10:32:33.447634', - u'creator': None, - u'description': u'Test vip', - u'enable': True, - u'last_modified': u'2015-09-23T10:32:33.447634', - u'permissions': {u'group': u'admin', - u'group_access': 7, - u'other_access': 7, - u'owner': u'neutron', - u'owner_access': 7}, - u'user_visible': True, - u'uuid': {u'uuid_lslong': 9959603007601504245L, - u'uuid_mslong': 16499857160913372641L}}, - u'loadbalancer_pool_refs': [{u'to': [u'default-domain', - u'admin', - u'Test-pool'], - u'uuid': u'pool'}], - u'parent_type': u'project', - u'uuid': u'vip', - u'virtual_ip_properties': {u'address': u'4.4.4.3', - u'admin_state': True, - u'connection_limit': -1, - u'persistence_cookie_name': None, - u'persistence_type': None, - u'protocol': u'TCP', - u'protocol_port': 91, - u'status': None, - u'status_description': None, - u'subnet_id': u'subnet_id'}, - u'virtual_machine_interface_refs': [{u'to': [u'default-domain', - u'admin', - u'e4fb44aa-f8ed-45e1-8a37-9e2acbffeff5'], - u'uuid': u'dda69314-cb20-486f-a108-5f1067c60c6a'}] - }, + u'fq_name': [u'default-domain', u'admin', u'Test-vip'], u'oper': u'CREATE', - u'parent_imid': u'contrail:project:default-domain:fakeproject', u'request-id': u'req-eee836f8-9fd4-4d52-aa73-579afe8c830a', u'type': u'virtual-ip', u'uuid': u'vip' @@ -439,7 +265,7 @@ } vip_del_info = { - u'imid': u'contrail:virtual-ip:default-domain:admin:Test-vip', + u'fq_name': [u'default-domain', u'admin', u'Test-vip'], u'obj_dict': { u'display_name': u'Test-vip', u'fq_name': [u'default-domain', u'admin', u'Test-vip'], @@ -478,7 +304,6 @@ u'uuid': u'dda69314-cb20-486f-a108-5f1067c60c6a'}] }, u'oper': u'DELETE', - u'parent_imid': u'contrail:project:default-domain:admin', u'request-id': u'req-eee836f8-9fd4-4d52-aa73-579afe8c830a', u'type': u'virtual-ip', u'uuid': u'vip'