Skip to content

Commit

Permalink
Merge "Add check service for the PNF service instance, Fix the bug 15…
Browse files Browse the repository at this point in the history
…78636" into R3.0
  • Loading branch information
Zuul authored and opencontrail-ci-admin committed May 7, 2016
2 parents 8862eed + 07edad0 commit 9ff8add
Show file tree
Hide file tree
Showing 3 changed files with 84 additions and 36 deletions.
1 change: 1 addition & 0 deletions src/config/svc-monitor/svc_monitor/config_db.py
Original file line number Diff line number Diff line change
Expand Up @@ -1059,6 +1059,7 @@ def update(self, obj=None):
'service_appliance_user_credentials', None)
self.ip_address = obj.get('service_appliance_ip_address', None)
self.service_appliance_set = self.get_parent_uuid(obj)
self.physical_interfaces = {}
ref_objs = obj.get("physical_interface_refs",[])
for ref in ref_objs:
self.physical_interfaces[ref[
Expand Down
114 changes: 80 additions & 34 deletions src/config/svc-monitor/svc_monitor/physical_service_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
ServiceApplianceSM,
PhysicalInterfaceSM,
ServiceInstanceSM,
ServiceTemplateSM,
PortTupleSM)
from cfgm_common import svc_info

Expand All @@ -17,46 +18,35 @@ class PhysicalServiceManager(InstanceManager):
def create_service(self, st, si):
if not self.validate_network_config(st, si):
return
# if service already inited do nothing
if self.check_service(si):
return

# get service appliances from service template
sa_set = st.service_appliance_set
if not sa_set:
if not st.service_appliance_set:
self.logger.error("Can't find service appliances set")
return
service_appliance_set = ServiceApplianceSetSM.get(sa_set)
service_appliances = service_appliance_set.service_appliances
sa_set_obj = ServiceApplianceSetSM.get(st.service_appliance_set)
sa_list = list(sa_set_obj.service_appliances)

# validation
if not service_appliances:
if not sa_list:
self.logger.error("Can't find service appliances")
return

service_appliances = list(service_appliances)
si_obj = ServiceInstanceSM.get(si.uuid)

#clean all existed staff before create new
self.clean_service(si)
# create a fake VM for the schmea transfer to use
vm_uuid_list = list(si_obj.virtual_machines)
vm_list = [None]*si.max_instances
for vm_uuid in vm_uuid_list:
vm = VirtualMachineSM.get(vm_uuid)
if not vm:
continue
if (vm.index + 1) > si.max_instances:
self.delete_service(vm)
continue
vm_list[vm.index] = vm_uuid

# get the port-tuple
pt_list = [None]*si.max_instances
pts = list(si.port_tuples)
for i in range(0, len(pts)):
pt_list[i] = pts[i]

if si.max_instances > len(service_appliances):
if si.max_instances > len(sa_list):
self.logger.info(
"There are not enough Service appliance \
for that Service instance "+si.uuid)
return
for idx, sa_uuid in enumerate(service_appliances):
for idx, sa_uuid in enumerate(sa_list):
if idx > si.max_instances:
return

Expand Down Expand Up @@ -86,23 +76,46 @@ def create_service(self, st, si):
si.state = "active"

def delete_service(self, vm):
if not vm.virtual_machine_interfaces:
return
vmi_list = list(vm.virtual_machine_interfaces)
pt_uuid = VirtualMachineInterfaceSM.get(vmi_list[0]).port_tuple
self.cleanup_pi_connections(vmi_list)
self.cleanup_svc_vm_ports(vmi_list)
try:
self._vnc_lib.port_tuple_delete(id=pt_uuid)
PortTupleSM.delete(pt_uuid)
except NoIdError:
pass
self.delete_vm(vm)

def clean_service(self, si):
self.cleanup_si_iip_connections(si)
vm_uuid_list = list(si.virtual_machines)
for vm_uuid in vm_uuid_list:
vm_obj = VirtualMachineSM.get(vm_uuid)
if vm_obj:
self.delete_vm(vm_obj)

def delete_vm(self,vm):
if vm.virtual_machine_interfaces:
vmi_list = list(vm.virtual_machine_interfaces)
pt_uuid = VirtualMachineInterfaceSM.get(vmi_list[0]).port_tuple
self.cleanup_pi_connections(vmi_list)
self.cleanup_svc_vm_ports(vmi_list)
try:
self._vnc_lib.port_tuple_delete(id=pt_uuid)
PortTupleSM.delete(pt_uuid)
except NoIdError:
pass
try:
self._vnc_lib.virtual_machine_delete(id=vm.uuid)
VirtualMachineSM.delete(vm.uuid)
except NoIdError:
pass

def cleanup_si_iip_connections(self,si):
iip_list = list(si.instance_ips)
for iip_id in iip_list:
try:
self._vnc_lib.ref_update('service-instance',
si.uuid,
'instance_ip_refs',
iip_id,
None,
'DELETE')
except:
pass

def cleanup_pi_connections(self, vmi_list):
for vmi_id in vmi_list:
try:
Expand All @@ -118,4 +131,37 @@ def cleanup_pi_connections(self, vmi_list):
pass

def check_service(self, si):
if si.max_instances>len(si.port_tuples):
return False

pt_list = list(si.port_tuples)
pi_list = []
all_possible_pi=[]

for pt_uuid in pt_list:
pt_obj = PortTupleSM.get(pt_uuid)
for vmi_uuid in pt_obj.virtual_machine_interfaces:
vmi_obj = VirtualMachineInterfaceSM.get(vmi_uuid)
pi_list.append(vmi_obj.physical_interface)

st_obj = ServiceTemplateSM.get(si.service_template)
if not st_obj.service_appliance_set:
return False

sa_set_obj = ServiceApplianceSetSM.get(st_obj.service_appliance_set)
for sa_uuid in sa_set_obj.service_appliances:
sa_obj = ServiceApplianceSM.get(sa_uuid)
for key in sa_obj.physical_interfaces:
all_possible_pi.append(sa_obj.physical_interfaces[key])

if not pi_list and all_possible_pi and si.max_instances>0:
return False

if not all_possible_pi and pi_list:
return False

for pi_uuid in pi_list:
if not pi_uuid in all_possible_pi:
return False

return True
5 changes: 3 additions & 2 deletions src/config/svc-monitor/svc_monitor/reaction_map.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
REACTION_MAP = {
"service_appliance_set": {
'self': [],
'service_appliance': []
'service_appliance': ['service_template']
},
"service_appliance": {
'self': ['service_appliance_set','physical_interface'],
Expand Down Expand Up @@ -37,6 +37,7 @@
'virtual_machine_interface' : [],
'service_health_check': [],
'interface_route_table': [],
'service_template': [],
},
"instance_ip": {
'self': [],
Expand All @@ -52,6 +53,7 @@
},
"service_template": {
'self': [],
'service_appliance_set':['service_instance'],
},
"physical_router": {
'self': [],
Expand Down Expand Up @@ -107,4 +109,3 @@
'self': [],
},
}

0 comments on commit 9ff8add

Please sign in to comment.