Skip to content

Commit

Permalink
support is added to create the fab network in distribued switch
Browse files Browse the repository at this point in the history
Change-Id: I581ae4d9079ddc1a2fed9c47dad0847062887380
  • Loading branch information
ymariappan committed Apr 30, 2015
1 parent 6fe8eb6 commit 0401378
Show file tree
Hide file tree
Showing 3 changed files with 180 additions and 5 deletions.
30 changes: 26 additions & 4 deletions fabfile/tasks/provision.py
Expand Up @@ -20,7 +20,7 @@
from fabfile.utils.vcenter import *
from fabfile.tasks.tester import setup_test_env
from fabfile.tasks.rabbitmq import setup_rabbitmq_cluster
from fabfile.tasks.vmware import provision_vcenter,\
from fabfile.tasks.vmware import provision_vcenter, provision_dvs_fab,\
configure_esxi_network, create_esxi_compute_vm
from fabfile.utils.cluster import get_vgw_details, get_orchestrator,\
get_vmware_details, get_esxi_vms_and_hosts
Expand Down Expand Up @@ -1921,17 +1921,39 @@ def prov_esxi(*args):
else:
host_list = esxi_info.keys()

std_switch = False
dv_switch_fab = False
power_on = False

for host in host_list:
with settings(host=host):
if host in esxi_info.keys():
apply_esxi_defaults(esxi_info[host])
configure_esxi_network(esxi_info[host])
if 'dv_switch_fab' in vcenter_info.keys():
if not 'fabric_vswitch' in esxi_info[host].keys():
dv_switch_fab = True
std_switch = False
else:
std_switch = True
else:
std_switch = True
if (std_switch == True):
apply_esxi_defaults(esxi_info[host])
configure_esxi_network(esxi_info[host])
power_on = True
else:
apply_esxi_defaults(esxi_info[host])
esxi_info[host]['fabric_vswitch'] = None
power_on = False
if orch == 'openstack':
create_esxi_compute_vm(esxi_info[host], None)
if orch == 'vcenter':
create_esxi_compute_vm(esxi_info[host], vcenter_info)
create_esxi_compute_vm(esxi_info[host], vcenter_info, power_on)
else:
print 'Info: esxi_hosts block does not have the esxi host.Exiting'

if (dv_switch_fab == True):
sleep(30)
provision_dvs_fab(vcenter_info, esxi_info, host_list)
#end prov_compute_vm

@roles('build')
Expand Down
126 changes: 126 additions & 0 deletions fabfile/tasks/vcenter_prov.py
Expand Up @@ -5,6 +5,132 @@
import atexit
import time

class dvs_fab(object):
def __init__(self, dvs_params):
self.pyVmomi = __import__("pyVmomi")

self.name = dvs_params['name']
self.dvportgroup_name = dvs_params['dvportgroup_name']
self.dvportgroup_num_ports = dvs_params['dvportgroup_num_ports']
self.dvportgroup_uplink = dvs_params['dvportgroup_uplink']

self.cluster_name = dvs_params['cluster_name']
self.datacenter_name = dvs_params['datacenter_name']

self.vcenter_server = dvs_params['vcenter_server']
self.vcenter_username = dvs_params['vcenter_username']
self.vcenter_password = dvs_params['vcenter_password']

self.esxi_info = dvs_params['esxi_info']
self.host_list = dvs_params['host_list']

try:
self.connect_to_vcenter()
dvs = self.get_obj([self.pyVmomi.vim.DistributedVirtualSwitch], self.name)
self.add_dvPort_group(self.service_instance, dvs, self.dvportgroup_name, self.dvportgroup_uplink)
for host in self.host_list:
vswitch = self.esxi_info[host]['fabric_vswitch']
if vswitch == None:
vm_name = "ContrailVM" + "-" + self.datacenter_name + "-" + self.esxi_info[host]['ip']
self.add_vm_to_dvpg(self.service_instance, vm_name, dvs, self.dvportgroup_name)
except self.pyVmomi.vmodl.MethodFault as error:
print "Caught vmodl fault : " + error.msg
return

def get_obj(self, vimtype, name):
"""
Get the vsphere object associated with a given text name
"""
obj = None
container = self.content.viewManager.CreateContainerView(self.content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return

def get_dvs_portgroup(self, vimtype, portgroup_name, dvs_name):
"""
Get the vsphere object associated with a given text name
"""
obj = None
container = self.content.viewManager.CreateContainerView(self.content.rootFolder, vimtype, True)
for c in container.view:
if c.name == portgroup_name:
if c.config.distributedVirtualSwitch.name == dvs_name:
obj = c
break
return

def connect_to_vcenter(self):
from pyVim import connect
self.service_instance = connect.SmartConnect(host=self.vcenter_server,
user=self.vcenter_username,
pwd=self.vcenter_password,
port=443)
self.content = self.service_instance.RetrieveContent()
atexit.register(connect.Disconnect, self.service_instance)

def wait_for_task(self, task, actionName='job', hideResult=False):
while task.info.state == (self.pyVmomi.vim.TaskInfo.State.running or self.pyVmomi.vim.TaskInfo.State.queued):
time.sleep(2)
if task.info.state == self.pyVmomi.vim.TaskInfo.State.success:
if task.info.result is not None and not hideResult:
out = '%s completed successfully, result: %s' % (actionName, task.info.result)
print out
else:
out = '%s completed successfully.' % actionName
print out
elif task.info.state == self.pyVmomi.vim.TaskInfo.State.error:
out = 'Error - %s did not complete successfully: %s' % (actionName, task.info.error)
raise ValueError(out)
return task.info.result

def add_dvPort_group(self, si, dv_switch, dv_port_name, dv_port_uplink):
dv_pg = self.get_dvs_portgroup([self.pyVmomi.vim.dvs.DistributedVirtualPortgroup], dv_port_name, dv_switch.name)
if dv_pg is not None:
print("dv port group already exists")
return dv_pg
else:
dv_pg_spec = self.pyVmomi.vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
dv_pg_spec.name = dv_port_name
dv_pg_spec.numPorts = int(self.dvportgroup_num_ports)
dv_pg_spec.type = self.pyVmomi.vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
dv_pg_spec.defaultPortConfig = self.pyVmomi.vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
dv_pg_spec.defaultPortConfig.securityPolicy = self.pyVmomi.vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
dv_pg_spec.defaultPortConfig.uplinkTeamingPolicy = self.pyVmomi.vim.VmwareUplinkPortTeamingPolicy()
dv_pg_spec.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder = self.pyVmomi.vim.VMwareUplinkPortOrderPolicy()
dv_pg_spec.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder.activeUplinkPort = dv_port_uplink
task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
self.wait_for_task(task, si)
print "Successfully created DV Port Group ", dv_port_name

def add_vm_to_dvpg(self, si, vm_name, dv_switch, dv_port_name):
devices = []
print "Adding Contrail VM: %s to the DV port group" %(vm_name)
vm = self.get_obj([self.pyVmomi.vim.VirtualMachine], vm_name)
for device in vm.config.hardware.device:
if isinstance(device, self.pyVmomi.vim.vm.device.VirtualEthernetCard):
nicspec = self.pyVmomi.vim.vm.device.VirtualDeviceSpec()
nicspec.operation = self.pyVmomi.vim.vm.device.VirtualDeviceSpec.Operation.edit
nicspec.device = device
nicspec.device.wakeOnLanEnabled = True
pg_obj = self.get_dvs_portgroup([self.pyVmomi.vim.dvs.DistributedVirtualPortgroup], dv_port_name, dv_switch.name)
dvs_port_connection = self.pyVmomi.vim.dvs.PortConnection()
dvs_port_connection.portgroupKey= pg_obj.key
dvs_port_connection.switchUuid= pg_obj.config.distributedVirtualSwitch.uuid
nicspec.device.backing = self.pyVmomi.vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
nicspec.device.backing.port = dvs_port_connection
devices.append(nicspec)
break
vmconf = self.pyVmomi.vim.vm.ConfigSpec(deviceChange=devices)
task = vm.ReconfigVM_Task(vmconf)
self.wait_for_task(task, si)
print "Turning VM: %s On" %(vm_name)
task = vm.PowerOn()
self.wait_for_task(task, si)
print "Succesfully added ContrailVM:%s to the DV port group" %(vm_name)

class Vcenter(object):
def __init__(self, vcenter_params):
self.pyVmomi = __import__("pyVmomi")
Expand Down
29 changes: 28 additions & 1 deletion fabfile/tasks/vmware.py
Expand Up @@ -84,7 +84,7 @@ def create_vmx (esxi_host, vm_name):
return vmx_file
#end create_vmx

def create_esxi_compute_vm (esxi_host, vcenter_info):
def create_esxi_compute_vm (esxi_host, vcenter_info, power_on):
'''Spawns contrail vm on openstack managed esxi server (non vcenter env)'''
orch = get_orchestrator()
datastore = esxi_host['datastore']
Expand Down Expand Up @@ -139,6 +139,10 @@ def create_esxi_compute_vm (esxi_host, vcenter_info):
if out.failed:
raise Exception("Unable to register VM %s on %s:%s" % (vm_name,
esxi_host['ip'], out))

if (power_on == False):
return

out = run("vim-cmd vmsvc/power.on %s" % out)
if out.failed:
raise Exception("Unable to power on %s on %s:%s" % (vm_name,
Expand All @@ -157,6 +161,29 @@ def _template_substitute_write(template, vals, filename):
outfile.close()
#end _template_substitute_write

@task
def provision_dvs_fab(vcenter_info, esxi_info, host_list):
apt_install(['contrail-vmware-utils'])
dvs_params = {}

dvs_params['name'] = vcenter_info['dv_switch_fab']['dv_switch_name']
dvs_params['dvportgroup_name'] = vcenter_info['dv_port_group_fab']['dv_portgroup_name']
dvs_params['dvportgroup_num_ports'] = vcenter_info['dv_port_group_fab']['number_of_ports']
dvs_params['dvportgroup_uplink'] = vcenter_info['dv_port_group_fab']['uplink']

dvs_params['vcenter_server'] = vcenter_info['server']
dvs_params['vcenter_username'] = vcenter_info['username']
dvs_params['vcenter_password'] = vcenter_info['password']

dvs_params['cluster_name'] = vcenter_info['cluster']
dvs_params['datacenter_name'] = vcenter_info['datacenter']

dvs_params['esxi_info'] = esxi_info
dvs_params['host_list'] = host_list

dvs_fab(dvs_params)
#end provision_dvs_fab

@task
def provision_vcenter(vcenter_info, hosts, clusters, vms, update_dvs):
apt_install(['contrail-vmware-utils'])
Expand Down

0 comments on commit 0401378

Please sign in to comment.