Skip to content

Commit

Permalink
Cache tier support/Storage provision fix
Browse files Browse the repository at this point in the history
Closes-Bug: #1578753
Closes-Bug: #1579210
Added support for Ceph cache tier
Fixed issue in provision of stats daemon in multi-openstack
node setup. The variable provided in the provision script was
corrected.

Change-Id: I2a17f377f6ca2877562e8fabc77ec0c4d6f3eb87
  • Loading branch information
Jeya ganesh babu J committed May 6, 2016
1 parent 994d16d commit 7df1195
Show file tree
Hide file tree
Showing 3 changed files with 182 additions and 9 deletions.
2 changes: 2 additions & 0 deletions contrail_provisioning/storage/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ def parse_args(self, args_str):
parser.add_argument("--orig-hostnames", help = "Actual Host names of storage nodes", nargs='+', type=str)
parser.add_argument("--service-dbpass", help = "DB password for Openstack cinder db user")
parser.add_argument("--region-name", help = "Region name of the cinder service")
parser.add_argument("--ssd-cache-tier", help = "Enable SSD cache tier")


self._args = parser.parse_args(self.remaining_argv)
Expand Down Expand Up @@ -147,6 +148,7 @@ def enable_storage(self):
storage_setup_args = storage_setup_args + " --orig-hostnames %s" %(' '.join(self._args.orig_hostnames))
storage_setup_args = storage_setup_args + " --service-dbpass %s" % self._args.service_dbpass
storage_setup_args = storage_setup_args + " --region-name %s" % self._args.region_name
storage_setup_args = storage_setup_args + " --ssd-cache-tier %s" % self._args.ssd_cache_tier

#Setup storage if storage is defined in testbed.py
with settings(host_string=self._args.storage_master, password=storage_master_passwd):
Expand Down
74 changes: 72 additions & 2 deletions contrail_provisioning/storage/storagefs/ceph_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,8 @@ class SetupCephUtils(object):
# Used during pool, virsh, pg/pgp count configurations
global ceph_pool_list
ceph_pool_list = []
global ceph_tier_list
ceph_tier_list = []

# Function to check if Chassis configuration is disabled or not
# Returns False if enabled
Expand Down Expand Up @@ -1398,12 +1400,13 @@ def do_pool_config(self, input_crush, storage_hostnames,
# Sets ruleset based on pool/chassis configuration
def do_configure_pools(self, storage_hostnames, storage_disk_config,
storage_ssd_disk_config, chassis_config,
replica_size = None):
replica_size = None, ssd_cache_tier = False):
global host_hdd_dict
global host_ssd_dict
global hdd_pool_count
global ssd_pool_count
global ceph_pool_list
global ceph_tier_list
global chassis_hdd_ruleset
global chassis_ssd_ruleset

Expand Down Expand Up @@ -1612,6 +1615,73 @@ def do_configure_pools(self, storage_hostnames, storage_disk_config,
pool_index = pool_index + 1
if pool_index >= ssd_pool_count:
break

if ssd_cache_tier == 'True' and storage_ssd_disk_config[0] != 'none':
pool_index = 0
while True:
if hdd_pool_count == 0:
pool_present = self.exec_local('sudo rados lspools | \
grep -w ssd_tier | wc -l')
if pool_present == '0':
self.exec_local('sudo rados mkpool ssd_tier')
self.exec_local('sudo ceph osd pool set \
ssd_tier crush_ruleset %d'
%(host_ssd_dict[('ruleid', '%s'
%(pool_index))]))
if host_ssd_dict[('hostcount', '%s' %(pool_index))] <= 1:
self.exec_local('sudo ceph osd pool set ssd_tier size %s'
%(REPLICA_ONE))
elif replica_size != 'None':
self.exec_local('sudo ceph osd pool set ssd_tier size %s'
%(replica_size))
else:
self.exec_local('sudo ceph osd pool set ssd_tier size %s'
%(REPLICA_DEFAULT))
self.set_pg_pgp_count(host_ssd_dict[('totalcount', '%s'
%(pool_index))], 'ssd_tier',
host_ssd_dict[('hostcount', '%s'
%(pool_index))])
ceph_tier_list.append('ssd_tier')
else:
if hdd_pool_count == ssd_pool_count:
pool_name = host_hdd_dict[('poolname',
'%s' %(pool_index))]
rule_id = host_ssd_dict[('ruleid',
'%s'%(pool_index))]
host_count = host_ssd_dict[('hostcount',
'%s' %(pool_index))]
total_count = host_ssd_dict[('totalcount',
'%s' %(pool_index))]
else:
pool_name = host_hdd_dict[('poolname',
'%s' %(pool_index))]
rule_id = host_ssd_dict[('ruleid','0')]
host_count = host_ssd_dict[('hostcount', '0')]
total_count = host_ssd_dict[('totalcount', '0')]
pool_present = self.exec_local('sudo rados lspools | \
grep -w ssd_tier_%s | wc -l'
%(pool_name))
if pool_present == '0':
self.exec_local('sudo rados mkpool ssd_tier_%s'
%(pool_name))
self.exec_local('sudo ceph osd pool set \
ssd_tier_%s crush_ruleset %d'
%(pool_name, rule_id))
if host_hdd_dict[('hostcount', '%s' %(pool_index))] <= 1:
self.exec_local('sudo ceph osd pool set ssd_tier_%s size %s'
%(pool_name, REPLICA_ONE))
elif replica_size != 'None':
self.exec_local('sudo ceph osd pool set ssd_tier_%s size %s'
%(pool_name, replica_size))
else:
self.exec_local('sudo ceph osd pool set ssd_tier_%s size %s'
%(pool_name, REPLICA_DEFAULT))
self.set_pg_pgp_count(total_count,
'ssd_tier_%s' %(pool_name), host_count)
ceph_tier_list.append('ssd_tier_%s' %(pool_name))
pool_index = pool_index + 1
if pool_index >= hdd_pool_count:
break
# Without HDD/SSD pool
else:
# Find the host count
Expand Down Expand Up @@ -1669,7 +1739,7 @@ def do_configure_pools(self, storage_hostnames, storage_disk_config,
else:
self.exec_local('sudo ceph osd pool set images crush_ruleset 0')
self.exec_local('sudo ceph osd pool set volumes crush_ruleset 0')
return ceph_pool_list
return {'ceph_pool_list': ceph_pool_list, 'ceph_tier_list': ceph_tier_list}
#end do_configure_pools()

def create_and_apply_cinder_patch(self):
Expand Down
115 changes: 108 additions & 7 deletions contrail_provisioning/storage/storagefs/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -649,6 +649,7 @@ def create_osd_map_config(self):
# Top level function for crush map changes
def do_crush_map_pool_config(self):
global ceph_pool_list
global ceph_tier_list

crush_setup_utils = SetupCephUtils()

Expand All @@ -675,12 +676,15 @@ def do_crush_map_pool_config(self):
crush_setup_utils.apply_crush(crush_map)

# Configure Pools
ceph_pool_list = crush_setup_utils.do_configure_pools(
result = crush_setup_utils.do_configure_pools(
self._args.storage_hostnames,
self._args.storage_disk_config,
self._args.storage_ssd_disk_config,
self._args.storage_chassis_config,
self._args.storage_replica_size)
self._args.storage_replica_size,
self._args.ssd_cache_tier)
ceph_pool_list = result['ceph_pool_list']
ceph_tier_list = result['ceph_tier_list']
#end do_crush_map_pool_config()

# Function for NFS cinder configuration
Expand Down Expand Up @@ -1841,13 +1845,36 @@ def do_configure_ceph_auth(self):

if self.is_multi_pool_disabled() == FALSE or \
self.is_ssd_pool_disabled() == FALSE:
index = 0
for pool_name in ceph_pool_list:
list_length = len(ceph_tier_list)
if index < list_length:
tier_name = ceph_tier_list[index]
else:
tier_name = ''
# Run local for storage-master for HDD/SSD pools
local('sudo ceph auth get-or-create client.%s mon \
if tier_name == '':
local('sudo ceph auth get-or-create client.%s mon \
\'allow r\' osd \
\'allow class-read object_prefix rbd_children, allow rwx pool=%s, allow rx pool=images\' \
-o /etc/ceph/client.%s.keyring'
%(pool_name, pool_name, pool_name))
else:
auth_present = local('sudo ceph auth list 2>&1 | \
grep -w %s| wc -l' %(pool_name),
shell='/bin/bash',
capture=True)
if auth_present != '0':
local('sudo ceph auth caps client.%s mon \
\'allow r\' osd \
\'allow class-read object_prefix rbd_children, allow rwx pool=%s, allow rx pool=images, allow rwx pool=%s\' \
-o /etc/ceph/client.%s.keyring'
%(pool_name, pool_name, tier_name, pool_name))
local('sudo ceph auth get-or-create client.%s mon \
\'allow r\' osd \
\'allow class-read object_prefix rbd_children, allow rwx pool=%s, allow rx pool=images, allow rwx pool=%s\' \
-o /etc/ceph/client.%s.keyring'
%(pool_name, pool_name, tier_name, pool_name))
local('sudo openstack-config --set %s client.%s keyring \
/etc/ceph/client.%s.keyring'
%(CEPH_CONFIG_FILE, pool_name, pool_name))
Expand All @@ -1861,13 +1888,22 @@ def do_configure_ceph_auth(self):
self._args.storage_os_host_tokens):
with settings(host_string = 'root@%s' %(entries),
password = entry_token):
run('sudo ceph -k %s auth get-or-create \
if tier_name == '':
run('sudo ceph -k %s auth get-or-create \
client.%s mon \
\'allow r\' osd \
\'allow class-read object_prefix rbd_children, allow rwx pool=%s, allow rx pool=images\' \
-o /etc/ceph/client.%s.keyring'
%(CEPH_ADMIN_KEYRING, pool_name,
pool_name, pool_name))
else:
run('sudo ceph -k %s auth get-or-create \
client.%s mon \
\'allow r\' osd \
\'allow class-read object_prefix rbd_children, allow rwx pool=%s, allow rx pool=images, allow rwx pool=%s\' \
-o /etc/ceph/client.%s.keyring'
%(CEPH_ADMIN_KEYRING, pool_name,
pool_name, tier_name, pool_name))
run('sudo openstack-config --set %s client.%s \
keyring /etc/ceph/client.%s.keyring'
%(CEPH_CONFIG_FILE, pool_name, pool_name))
Expand All @@ -1881,23 +1917,84 @@ def do_configure_ceph_auth(self):
if entries != self._args.storage_master:
with settings(host_string = 'root@%s' %(entries),
password = entry_token):
run('sudo ceph -k %s auth get-or-create \
if tier_name == '':
run('sudo ceph -k %s auth get-or-create \
client.%s mon \
\'allow r\' osd \
\'allow class-read object_prefix rbd_children, allow rwx pool=%s, allow rx pool=images\' \
-o /etc/ceph/client.%s.keyring'
%(CEPH_ADMIN_KEYRING, pool_name,
pool_name, pool_name))
else:
run('sudo ceph -k %s auth get-or-create \
client.%s mon \
\'allow r\' osd \
\'allow class-read object_prefix rbd_children, allow rwx pool=%s, allow rx pool=images, allow rwx pool=%s\' \
-o /etc/ceph/client.%s.keyring'
%(CEPH_ADMIN_KEYRING, pool_name,
pool_name, tier_name, pool_name))
run('sudo openstack-config --set %s client.%s \
keyring /etc/ceph/client.%s.keyring'
%(CEPH_CONFIG_FILE, pool_name, pool_name))
run('sudo ceph-authtool -p -n client.%s \
/etc/ceph/client.%s.keyring > \
/etc/ceph/client.%s'
%(pool_name, pool_name, pool_name))
index += 1
return
#end do_configure_ceph_auth()

# Function to configure Ceph cache tier
def do_configure_ceph_cache_tier(self):
global ceph_pool_list
global ceph_tier_list
num_hdd_pool = len(ceph_tier_list)
if num_hdd_pool == 0:
return
index = 0
for entry in ceph_pool_list:
if index >= num_hdd_pool:
return
total_ssd_size_st = local('sudo ceph df | grep -w %s | \
awk \'{print $5}\''
%(ceph_tier_list[index]), capture=True,
shell='/bin/bash')
size_mult_st = total_ssd_size_st[len(total_ssd_size_st) - 1]
if size_mult_st == 'T':
size_mult = 1024 * 1024 * 1024 * 1024
elif size_mult_st == 'G':
size_mult = 1024 * 1024 * 1024
elif size_mult_st == 'M':
size_mult = 1024 * 1024
elif size_mult_st == 'K':
size_mult = 1024
total_ssd_size = int(total_ssd_size_st[:-1])
total_ssd_size = total_ssd_size * size_mult
if self._args.storage_replica_size != 'None':
replica_size = int(self._args.storage_replica_size)
else:
replica_size = 2
cache_size = total_ssd_size / replica_size
local('sudo ceph osd tier add %s %s'
%(ceph_pool_list[index], ceph_tier_list[index]))
local('sudo ceph osd tier cache-mode %s writeback'
%(ceph_tier_list[index]))
local('sudo ceph osd tier set-overlay %s %s'
%(ceph_pool_list[index], ceph_tier_list[index]))
local('sudo ceph osd pool set %s hit_set_type bloom'
%(ceph_tier_list[index]))
local('sudo ceph osd pool set %s hit_set_count 1'
%(ceph_tier_list[index]))
local('sudo ceph osd pool set %s hit_set_period 3600'
%(ceph_tier_list[index]))
local('sudo ceph osd pool set %s target_max_bytes %s'
%(ceph_tier_list[index], cache_size))
local('ceph osd pool set %s min_read_recency_for_promote 1'
%(ceph_tier_list[index]))
index += 1
return
#end do_configure_ceph_cache_tier

# Function for Virsh/Cinder configurations for Ceph
def do_configure_virsh_cinder_rbd(self):

Expand Down Expand Up @@ -2412,7 +2509,7 @@ def do_configure_lvm(self):
commonport.RABBIT_PORT))
run('sudo openstack-config --set %s %s %s \
mysql://cinder:%s@%s:33306/cinder'
%(CINDER_CONFIG_FILE,
%(CINDER_CONFIG_FILE,
sql_section, sql_key,
self._args.service_dbpass,
self._args.cinder_vip))
Expand Down Expand Up @@ -3087,7 +3184,7 @@ def do_configure_stats_daemon(self):
%(CONTRAIL_STORAGE_STATS_CONF, \
self._args.cfg_host))
if self._args.storage_os_hosts[0] != 'none':
for os_entry in zip(self._args.os_hosts):
for os_entry in self._args.storage_os_hosts:
if os_entry == entries:
master_node = 1
break
Expand Down Expand Up @@ -3557,6 +3654,9 @@ def do_storage_setup(self):
# Configure glance to use Ceph
self.do_configure_glance_rbd()

# Configure Cache tier
self.do_configure_ceph_cache_tier()

# Configure base cinder
self.do_configure_cinder()

Expand Down Expand Up @@ -3759,6 +3859,7 @@ def _parse_args(self, args_str):
parser.add_argument("--orig-hostnames", help = "Actual Host names of storage nodes", nargs='+', type=str)
parser.add_argument("--service-dbpass", help = "Database password for openstack service db user.")
parser.add_argument("--region-name", help = "Region name of the cinder service")
parser.add_argument("--ssd-cache-tier", help = "Enable SSD cache tier")

self._args = parser.parse_args(remaining_argv)

Expand Down

0 comments on commit 7df1195

Please sign in to comment.