/
setup.py
executable file
·3966 lines (3694 loc) · 208 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/python
import argparse
import ConfigParser
import platform
import os
import sys
import time
import re
import string
import socket
import netifaces, netaddr
import subprocess
import fnmatch
import struct
import shutil
import json
from pprint import pformat
import xml.etree.ElementTree as ET
import platform
import commonport
import StringIO
import tempfile
from fabric.api import local, env, sudo, run, settings
from fabric.operations import get, put
from fabric.context_managers import lcd, settings
from fabric.api import local, env, run
from fabric.operations import get, put
from fabric.context_managers import lcd, settings
from contrail_provisioning.storage.storagefs.ceph_utils import SetupCephUtils
from distutils.version import LooseVersion
sys.path.insert(0, os.getcwd())
class SetupCeph(object):
# Added global defines for the files.
# Use the variables instead of the filenames directly in the script
# to avoid typos and readability.
# The following are read-only globals
# Add read/writable global variables at the end of this section.
global CINDER_CONFIG_FILE
CINDER_CONFIG_FILE = '/etc/cinder/cinder.conf'
global NOVA_CONFIG_FILE
NOVA_CONFIG_FILE = '/etc/nova/nova.conf'
global CEPH_CONFIG_FILE
CEPH_CONFIG_FILE = '/etc/ceph/ceph.conf'
global CEPH_ADMIN_KEYRING
CEPH_ADMIN_KEYRING = '/etc/ceph/ceph.client.admin.keyring'
global SYSLOGD_CONF
SYSLOGD_CONF = '/etc/rsyslog.d/50-default.conf'
global COLLECTOR_CONF
COLLECTOR_CONF = '/etc/contrail/contrail-collector.conf'
global COLLECTOR_TMP_CONF
COLLECTOR_TMP_CONF = '/tmp/contrail-collector.conf'
global NFS_SERVER_LIST_FILE
NFS_SERVER_LIST_FILE = '/etc/cinder/nfs_server_list.txt'
global GLANCE_API_CONF
GLANCE_API_CONF = '/etc/glance/glance-api.conf'
global VOLUMES_KEYRING
VOLUMES_KEYRING = '/etc/ceph/client.volumes.keyring'
global CLIENT_VOLUMES
CLIENT_VOLUMES = '/etc/ceph/client.volumes'
global IMAGES_KEYRING
IMAGES_KEYRING = '/etc/ceph/client.images.keyring'
global STORAGE_NODEMGR_CONF
STORAGE_NODEMGR_CONF = '/etc/contrail/contrail-storage-nodemgr.conf'
global CENTOS_INITD_CINDER_VOLUME
CENTOS_INITD_CINDER_VOLUME = '/etc/init.d/openstack-cinder-volume'
global CENTOS_TMP_CINDER_VOLUME
CENTOS_TMP_CINDER_VOLUME = '/tmp/openstack-cinder-volume.tmp'
global CEPH_REST_API_CONF
CEPH_REST_API_CONF = '/etc/init/ceph-rest-api.conf'
global CEPH_VOLUME_KEYRING
CEPH_VOLUME_KEYRING = '/etc/ceph/client.volumes.keyring'
global CEPH_BOOTSTRAP_OSD_KEYRING
CEPH_BOOTSTRAP_OSD_KEYRING = '/var/lib/ceph/bootstrap-osd/ceph.keyring'
global CONTRAIL_STORAGE_STATS_INIT
CONTRAIL_STORAGE_STATS_INIT = '/etc/init/contrail-storage-stats.conf'
global CONTRAIL_STORAGE_STATS_TMP_INIT
CONTRAIL_STORAGE_STATS_TMP_INIT = '/tmp/contrail-storage-stats.conf'
global CONTRAIL_STORAGE_STATS_CONF
CONTRAIL_STORAGE_STATS_CONF = '/etc/contrail/contrail-storage-nodemgr.conf'
global CINDER_VOLUME_INIT_CONFIG
CINDER_VOLUME_INIT_CONFIG = '/etc/init/cinder-volume.conf'
global LIBVIRT_BIN_INIT_CONFIG
LIBVIRT_BIN_INIT_CONFIG = '/etc/init/libvirt-bin.conf'
global LIBVIRT_BIN_INIT_CFG_BAK
LIBVIRT_BIN_INIT_CFG_BAK = '/tmp/libvirt-bin.conf.bak'
global CINDER_PATCH_FILE
CINDER_PATCH_FILE = '/tmp/manager.patch'
global CINDER_VOLUME_MGR_PY
CINDER_VOLUME_MGR_PY = '/usr/lib/python2.7/dist-packages/cinder/volume/manager.py'
global OPENSTACK_RC_FILE
OPENSTACK_RC_FILE = '/etc/contrail/openstackrc'
global SYSFS_CONF
SYSFS_CONF = '/etc/sysfs.conf'
global LIBVIRT_AA_HELPER_TMP_FILE
LIBVIRT_AA_HELPER_TMP_FILE = '/tmp/usr.lib.libvirt.virt-aa-helper'
global LIBVIRT_AA_HELPER_FILE
LIBVIRT_AA_HELPER_FILE = '/etc/apparmor.d/usr.lib.libvirt.virt-aa-helper'
global RBD_WORKERS
RBD_WORKERS = 120
global RBD_STORE_CHUNK_SIZE
RBD_STORE_CHUNK_SIZE = 8
global TRUE
TRUE = 1
global FALSE
FALSE = 0
global MAX_SECTORS_KB
MAX_SECTORS_KB = 4096
global MAX_NR_REQS
MAX_NR_REQS = 512
global MAX_READ_AHEAD
MAX_READ_AHEAD = 4096
global IO_NOOP_SCHED
IO_NOOP_SCHED = 'noop'
global KILO_VERSION
KILO_VERSION = 2015
global LIBERTY_VERSION
LIBERTY_VERSION = 2016
# Denotes the OS type whether Ubuntu or Centos.
global pdist
pdist = platform.dist()[0]
# Maximum monitors to be created
global MAX_MONS
MAX_MONS = 3
# RBD cache size
global RBD_CACHE_SIZE
RBD_CACHE_SIZE = 536870912
# Ceph OP thread count
global CEPH_OP_THREADS
CEPH_OP_THREADS = 4
# Ceph disk thread count
global CEPH_DISK_THREADS
CEPH_DISK_THREADS = 2
# Global variables used across functions
# The following are read/writable globals
# Ceph storage disk list, populated during journal initialization
global storage_disk_list
storage_disk_list = []
# OSD count, populated during HDD/SSD pool configuration
# Used during pg/pgp count configuration
global osd_count
osd_count = 0
# LVM types and name list, populated during LVM configuration
# Used during cinder type creation
global cinder_lvm_type_list
cinder_lvm_type_list = []
global cinder_lvm_name_list
cinder_lvm_name_list = []
# Global variable to indicate if Ceph storage is enabled
global configure_with_ceph
configure_with_ceph = 0
# Global variable to indicate if Cinder NFS storage is enabled
global create_nfs_disk_volume
create_nfs_disk_volume = 0
global cinder_version
cinder_version = 2014
# global mon host string
global ceph_mon_hosts
ceph_mon_hosts = ''
# global mon host list
global ceph_mon_hosts_list
ceph_mon_hosts_list = []
global ceph_mon_entry_list
ceph_mon_entry_list = []
# monitor count
global ceph_mon_count
ceph_mon_count = 0
# global all host list
global ceph_all_hosts
ceph_all_hosts = ''
global storage_only_node
storage_only_node = []
global sql_section
sql_section = 'DEFAULT'
global sql_key
sql_key = 'sql_connection'
global rabbit_host_section
rabbit_host_section = 'DEFAULT'
global cinder_command
cinder_command = 'cinder'
global glance_store
glance_store = 'DEFAULT'
global glance_known_store
glance_known_store = 'known_stores'
global keystone_endpt_create
keystone_endpt_create = 'keystone endpoint-create'
global keystone_svc_create
keystone_svc_create = 'keystone service-create'
global keystone_endpt_list
keystone_endpt_list = 'keystone endpoint-list'
global keystone_svc_list
keystone_svc_list = 'keystone service-list'
# The function create a script which runs and lists the mons
# running on the local node
def reset_mon_local_list(self):
local('echo "get_local_daemon_ulist() {" > /tmp/mon_local_list.sh')
local('echo "if [ -d \\"/var/lib/ceph/mon\\" ]; then" >> \
/tmp/mon_local_list.sh')
local('echo "for i in \`find -L /var/lib/ceph/mon -mindepth 1 \
-maxdepth 1 -type d -printf \'%f\\\\\\n\'\`; do" >> \
/tmp/mon_local_list.sh')
local('echo "if [ -e \\"/var/lib/ceph/mon/\$i/upstart\\" ]; then" >> \
/tmp/mon_local_list.sh')
local('echo "id=\`echo \$i | sed \'s/[^-]*-//\'\`" >> \
/tmp/mon_local_list.sh')
local('echo "sudo stop ceph-mon id=\$id" >> /tmp/mon_local_list.sh')
local('echo "fi done fi" >> /tmp/mon_local_list.sh')
local('echo "}" >> /tmp/mon_local_list.sh')
local('echo "get_local_daemon_ulist" >> /tmp/mon_local_list.sh')
local('echo "exit 0" >> /tmp/mon_local_list.sh')
local('chmod a+x /tmp/mon_local_list.sh')
local('/tmp/mon_local_list.sh')
#end reset_mon_local_list()
# The function create a script which runs and lists the osds
# running on the local node
def reset_osd_local_list(self):
local('echo "get_local_daemon_ulist() {" > /tmp/osd_local_list.sh')
local('echo "if [ -d \\"/var/lib/ceph/osd\\" ]; then" >> \
/tmp/osd_local_list.sh')
local('echo "for i in \`find -L /var/lib/ceph/osd -mindepth 1 \
-maxdepth 1 -type d -printf \'%f\\\\\\n\'\`; do" >> \
/tmp/osd_local_list.sh')
local('echo "if [ -e \\"/var/lib/ceph/osd/\$i/upstart\\" ]; then" >> \
/tmp/osd_local_list.sh')
local('echo "id=\`echo \$i | sed \'s/[^-]*-//\'\`" >> \
/tmp/osd_local_list.sh')
local('echo "sudo stop ceph-osd id=\$id" >> /tmp/osd_local_list.sh')
local('echo "fi done fi" >> /tmp/osd_local_list.sh')
local('echo "}" >> /tmp/osd_local_list.sh')
local('echo "get_local_daemon_ulist" >> /tmp/osd_local_list.sh')
local('echo "exit 0" >> /tmp/osd_local_list.sh')
local('chmod a+x /tmp/osd_local_list.sh')
local('/tmp/osd_local_list.sh')
#end reset_osd_local_list()
# The function create a script which runs and lists the mons
# running on the remote node
def reset_mon_remote_list(self):
run('echo "get_local_daemon_ulist() {" > /tmp/mon_local_list.sh')
run('echo "if [ -d \\\\"/var/lib/ceph/mon\\\\" ]; then" >> \
/tmp/mon_local_list.sh')
run('echo "for i in \\\\`find -L /var/lib/ceph/mon -mindepth 1 \
-maxdepth 1 -type d -printf \'%f\\\\\\n\'\\\\`; do" >> \
/tmp/mon_local_list.sh')
run('echo "if [ -e \\\\"/var/lib/ceph/mon/\\\\$i/upstart\\\\" ]; \
then" >> /tmp/mon_local_list.sh')
run('echo "id=\\\\`echo \\\\$i | sed \'s/[^-]*-//\'\\\\`" >> \
/tmp/mon_local_list.sh')
run('echo "sudo stop ceph-mon id=\\\\$id" >> /tmp/mon_local_list.sh')
run('echo "fi done fi" >> /tmp/mon_local_list.sh')
run('echo "}" >> /tmp/mon_local_list.sh')
run('echo "get_local_daemon_ulist" >> /tmp/mon_local_list.sh')
run('echo "exit 0" >> /tmp/mon_local_list.sh')
run('chmod a+x /tmp/mon_local_list.sh')
run('/tmp/mon_local_list.sh')
#end reset_mon_remote_list()
# The function create a script which runs and lists the osds
# running on the remote node
def reset_osd_remote_list(self):
run('echo "get_local_daemon_ulist() {" > /tmp/osd_local_list.sh')
run('echo "if [ -d \\\\"/var/lib/ceph/osd\\\\" ]; then" >> \
/tmp/osd_local_list.sh')
run('echo "for i in \\\\`find -L /var/lib/ceph/osd -mindepth 1 \
-maxdepth 1 -type d -printf \'%f\\\\\\n\'\\\\`; do" >> \
/tmp/osd_local_list.sh')
run('echo "if [ -e \\\\"/var/lib/ceph/osd/\\\\$i/upstart\\\\" ]; \
then" >> /tmp/osd_local_list.sh')
run('echo "id=\\\\`echo \\\\$i | sed \'s/[^-]*-//\'\\\\`" >> \
/tmp/osd_local_list.sh')
run('echo "sudo stop ceph-osd id=\\\\$id" >> /tmp/osd_local_list.sh')
run('echo "fi done fi" >> /tmp/osd_local_list.sh')
run('echo "}" >> /tmp/osd_local_list.sh')
run('echo "get_local_daemon_ulist" >> /tmp/osd_local_list.sh')
run('echo "exit 0" >> /tmp/osd_local_list.sh')
run('chmod a+x /tmp/osd_local_list.sh')
run('/tmp/osd_local_list.sh')
#end reset_osd_remote_list()
# Function to create ceph rest api service and start it
def ceph_rest_api_service_add(self):
# check for ceph-rest-api.conf
# create /etc/init/ceph-rest-api.conf for service upstrart
# if service not running then replace app.ceph_port to 5005
# start the ceph-rest-api service
# This works only on Ubuntu
# first master node
rest_api_conf_available = local('ls %s 2>/dev/null | wc -l'
%(CEPH_REST_API_CONF), capture=True)
if rest_api_conf_available == '0':
local('sudo echo description \\"Ceph REST API\\" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo >> %s' %(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "start on started rc RUNLEVEL=[2345]" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "stop on runlevel [!2345]" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "respawn" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "respawn limit 5 30" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "limit nofile 16384 16384" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "pre-start script" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo " set -e" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo " test -x /usr/bin/ceph-rest-api || { stop; exit 0; }" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "end script" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "# this breaks oneiric" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "#usage \\"ceph-rest-api -c <conf-file> -n <client-name>\\"" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "exec ceph-rest-api -c /etc/ceph/ceph.conf -n client.admin" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "post-stop script" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "# nothing to do for now" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "end script" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
local('sudo echo "" >> %s'
%(CEPH_REST_API_CONF), shell='/bin/bash')
ceph_rest_api_process_running = local('ps -ef | grep -v grep | \
grep ceph-rest-api | wc -l', capture=True)
if ceph_rest_api_process_running == '0':
# Change the port to 5006 (in vip case) or 5005 (in non-vip case) and start the service
entry_present = local('grep \"app.run(host=app.ceph_addr, port=app.ceph_port)\" /usr/bin/ceph-rest-api | wc -l', capture=True)
if entry_present == '1':
if self._args.cinder_vip != 'none':
local('sudo sed -i "s/app.run(host=app.ceph_addr, port=app.ceph_port)/app.run(host=app.ceph_addr, port=5006)/" /usr/bin/ceph-rest-api')
else:
local('sudo sed -i "s/app.run(host=app.ceph_addr, port=app.ceph_port)/app.run(host=app.ceph_addr, port=5005)/" /usr/bin/ceph-rest-api')
local('sudo service ceph-rest-api start', shell='/bin/bash')
# remaining configured master nodes for HA
if self._args.storage_os_hosts[0] != 'none':
for entries, entry_token in zip(self._args.storage_os_hosts, self._args.storage_os_host_tokens):
with settings(host_string = 'root@%s' %(entries), password = entry_token):
# check for rest api conf file
rest_api_conf_avail = run('ls %s 2>/dev/null | wc -l' %(CEPH_REST_API_CONF))
# if not present copy from first master to other master nodes
if rest_api_conf_avail == '0':
put(CEPH_REST_API_CONF, '/etc/init/')
# check for ceph rest api running status
ceph_rest_api_process_running=run('ps -ef|grep -v grep|grep ceph-rest-api|wc -l')
if ceph_rest_api_process_running == '0':
entry_present=run('grep \"app.run(host=app.ceph_addr, port=app.ceph_port)\" /usr/bin/ceph-rest-api | wc -l')
# Change the port to 5006 (in vip case) or 5005 (in non-vip case) and start the service
if entry_present == '1':
if self._args.cinder_vip != 'none':
run('sudo sed -i "s/app.run(host=app.ceph_addr, port=app.ceph_port)/app.run(host=app.ceph_addr, port=5006)/" /usr/bin/ceph-rest-api')
else:
run('sudo sed -i "s/app.run(host=app.ceph_addr, port=app.ceph_port)/app.run(host=app.ceph_addr, port=5005)/" /usr/bin/ceph-rest-api')
run('sudo service ceph-rest-api start')
#end ceph_rest_api_service_add()
# Function to remove ceph-rest-api service
# Stop the service if running
# Remove the file /etc/init/ceph-rest-api.conf
def ceph_rest_api_service_remove(self):
# check the ceph-rest-api service
# if it is running then trigger ceph-rest-api stop
# finally removing ceph-rest-api.conf
ceph_rest_api_process_running = local('ps -ef | grep -v grep | \
grep ceph-rest-api | wc -l',
capture=True)
if ceph_rest_api_process_running != '0':
local('sudo service ceph-rest-api stop', shell='/bin/bash')
rest_api_conf_available = local('ls %s 2>/dev/null | wc -l' %(CEPH_REST_API_CONF), capture=True)
if rest_api_conf_available != '0':
local('sudo rm -rf %s' %(CEPH_REST_API_CONF), shell='/bin/bash')
# remaining configured master nodes for HA
if self._args.storage_os_hosts[0] != 'none':
for entries, entry_token in zip(self._args.storage_os_hosts, self._args.storage_os_host_tokens):
with settings(host_string = 'root@%s' %(entries), password = entry_token):
# check the ceph-rest-api service and stop it on remaining master nodes
ceph_rest_api_process_running=run('ps -ef|grep -v grep|grep ceph-rest-api|wc -l')
if ceph_rest_api_process_running != '0':
run('sudo service ceph-rest-api stop')
# remove rest api conf file from remaining master nodes
rest_api_conf_avail=run('ls %s 2>/dev/null | wc -l' %(CEPH_REST_API_CONF))
if rest_api_conf_available != '0':
run('sudo rm -rf %s' %(CEPH_REST_API_CONF))
#end ceph_rest_api_service_remove()
# stop contrail-storage-stats service on all compute nodes
def contrail_storage_stats_service_remove(self):
# check if contrail-storage-stats is running
# if it is running then trigger contrail-storage-stats stop
# finally revert discovery contrail-storage-stats config
for entries, entry_token, hostname in zip(self._args.storage_hosts,
self._args.storage_host_tokens, self._args.storage_hostnames):
with settings(host_string = 'root@%s' %(entries),
password = entry_token):
contrail_stats_process_running = run('ps -ef| \
grep -v grep| grep contrail-storage-stats |wc -l')
if contrail_stats_process_running != '0':
run('sudo service contrail-storage-stats stop')
# reset disc_server_ip
run('sudo openstack-config --set \
/etc/contrail/contrail-storage-nodemgr.conf \
DEFAULTS disc_server_ip 127.0.0.1')
#end contrail_storage_stats_service_remove
# Function to configure syslog for Ceph
def do_configure_syslog(self):
# log ceph.log to syslog
local('ceph tell mon.* injectargs -- --mon_cluster_log_to_syslog=true')
# set ceph.log to syslog config in ceph.conf
local('sudo openstack-config --set %s mon \
"mon cluster log to syslog" true'
%(CEPH_CONFIG_FILE))
for entries, entry_token in zip(self._args.storage_hosts,
self._args.storage_host_tokens):
if entries != self._args.storage_master:
with settings(host_string = 'root@%s' %(entries),
password = entry_token):
run('sudo openstack-config --set %s mon \
"mon cluster log to syslog" true'
%(CEPH_CONFIG_FILE))
# enable server:port syslog remote logging
for entries in self._args.collector_hosts:
syslog_present = local('grep "*.* @%s:%s" %s | wc -l'
%(entries, commonport.SYSLOG_LOGPORT,
SYSLOGD_CONF), capture=True)
if syslog_present == '0':
local('echo "*.* @%s:%s" >> %s'
%(entries, commonport.SYSLOG_LOGPORT,
SYSLOGD_CONF))
# find and replace syslog port in collector
for entries, entry_token in zip(self._args.collector_hosts,
self._args.collector_host_tokens):
with settings(host_string = 'root@%s' %(entries),
password = entry_token):
syslog_port = run('grep "# syslog_port=-1" %s | wc -l'
%(COLLECTOR_CONF))
if syslog_port == '1':
run('cat %s | sed "s/# syslog_port=-1/syslog_port=4514/" > \
%s; mv %s %s' %(COLLECTOR_CONF,
COLLECTOR_TMP_CONF,
COLLECTOR_TMP_CONF,
COLLECTOR_CONF))
syslog_port = run('grep "syslog_port=-1" %s | wc -l'
%(COLLECTOR_CONF))
if syslog_port == '1':
run('cat %s | sed "s/syslog_port=-1/syslog_port=4514/" > \
%s; mv %s %s' %(COLLECTOR_CONF,
COLLECTOR_TMP_CONF,
COLLECTOR_TMP_CONF,
COLLECTOR_CONF))
# restart collector after syslog port change
run('service contrail-collector restart')
# restart rsyslog service after remote logging enabled
local('service rsyslog restart')
return
#end do_configure_syslog()
# Funtion to remove the syslog configuration
def unconfigure_syslog(self):
# disable server:port syslog remote logging
for entries in self._args.collector_hosts:
syslog_present = local('grep "*.* @%s:%s" %s | wc -l'
%(entries,
commonport.SYSLOG_LOGPORT,
SYSLOGD_CONF), capture=True)
if syslog_present == '1':
local('sed -i "/*.* @%s:%s/d" %s'
%(entries,
commonport.SYSLOG_LOGPORT,
SYSLOGD_CONF))
# restart rsyslog service after remote logging enabled
local('service rsyslog restart')
# find and replace syslog port to default in collector
for entries, entry_token in zip(self._args.collector_hosts,
self._args.collector_host_tokens):
with settings(host_string = 'root@%s' %(entries),
password = entry_token):
syslog_port = run('grep "syslog_port=4514" %s | wc -l'
%(COLLECTOR_CONF))
if syslog_port == '1':
run('cat %s | sed "s/syslog_port=4514/syslog_port=-1/" >\
%s; mv %s %s' %(COLLECTOR_CONF,
COLLECTOR_TMP_CONF,
COLLECTOR_TMP_CONF,
COLLECTOR_CONF))
# restart collector after syslog default port change
run('service contrail-collector restart')
#end unconfigure_syslog()
def do_patch_cinder(self):
cinder_patch_utils = SetupCephUtils()
cinder_patch_utils.create_and_apply_cinder_patch()
if self._args.storage_os_hosts[0] != 'none':
for entry, entry_token in zip(self._args.storage_os_hosts,
self._args.storage_os_host_tokens):
with settings(host_string = 'root@%s' %(entry),
password = entry_token):
if entry != self._args.storage_master:
put('%s' %(CINDER_PATCH_FILE), '%s' %(CINDER_PATCH_FILE),
use_sudo=True)
sudo('patch -N %s %s'
%(CINDER_VOLUME_MGR_PY, CINDER_PATCH_FILE),
warn_only=True)
return
#end do_patch_cinder()
def do_patch_ceph_deploy(self):
ceph_deploy_patch_utils = SetupCephUtils()
ceph_deploy_patch_utils.create_and_apply_ceph_deploy_patch()
return
#end do_patch_ceph_deploy()
# Function to check if multipool is disabled or not
# Returns False if enabled
# Returns True if disabled
# Checks for 'P' (for Pool) entry in the disk list in
# the 2nd or 3rd field.
def is_multi_pool_disabled(self):
global storage_disk_list
for disks in storage_disk_list:
journal_available = disks.count(':')
disksplit = disks.split(':')
diskcount = disks.count(':')
if diskcount == 3:
if disksplit[3][0] == 'P':
return FALSE
elif diskcount == 2:
if disksplit[2][0] == 'P':
return FALSE
return TRUE
#end is_multi_pool_disabled()
# Function to check if SSD pool is disabled or not
# Returns False if enabled
# Returns True if disabled
def is_ssd_pool_disabled(self):
if self._args.storage_ssd_disk_config[0] == 'none':
return TRUE
else:
return FALSE
#end is_ssd_pool_disabled()
# Function to check if Chassis configuration is disabled or not
# Returns False if enabled
# Returns True if disabled
def is_chassis_disabled(self):
if self._args.storage_chassis_config[0] == 'none':
return TRUE
else:
return FALSE
#end is_chassis_disabled()
# Function to check if LVM storage configuration is disabled or not
# Returns False if enabled
# Returns True if disabled
def is_lvm_config_disabled(self):
if self._args.storage_local_disk_config[0] != 'none' or \
self._args.storage_local_ssd_disk_config[0] != 'none':
return FALSE
return TRUE
#end is_lvm_config_disabled()
# Function to create osd number to drive mapping
# Uses the storage_disk_config and storage_ssd_disk_config and creates
# a new list in the format hostname:diskname:osd-num
# The osd number is found by checking the drive mount path.
# /dev/sdb1 /var/lib/ceph/osd/ceph-5 xfs rw,...
# The above will give the OSD number 5. This is a unique number
def create_osd_map_config(self):
osd_map_config = []
for hostname, entries, entry_token in zip(self._args.storage_hostnames,
self._args.storage_hosts,
self._args.storage_host_tokens):
for disks in self._args.storage_disk_config:
disksplit = disks.split(':')
diskcount = disks.count(':')
pool_index = 0
# For each disk, check the mounted OSD and get the osd number.
# /dev/sdb1 /var/lib/ceph/osd/ceph-5 xfs rw,...
# The above will give the OSD number 5. This is a unique number
# assigned to each OSD in the cluster.
if disksplit[0] == hostname:
with settings(host_string = 'root@%s' %(entries),
password = entry_token):
osddet = run('sudo mount | grep %s | awk \'{ print $3 }\''
%(disksplit[1]))
osdnum = osddet.split('-')[1]
osd_map_config.append('%s:%s:%s' %(disksplit[0],
disksplit[1], osdnum))
for disks in self._args.storage_ssd_disk_config:
disksplit = disks.split(':')
diskcount = disks.count(':')
pool_index = 0
# For each disk, check the mounted OSD and get the osd number.
# /dev/sdb1 /var/lib/ceph/osd/ceph-5 xfs rw,...
# The above will give the OSD number 5. This is a unique number
# assigned to each OSD in the cluster.
if disksplit[0] == hostname:
with settings(host_string = 'root@%s' %(entries),
password = entry_token):
osddet = run('sudo mount | grep %s | awk \'{ print $3 }\''
%(disksplit[1]))
osdnum = osddet.split('-')[1]
osd_map_config.append('%s:%s:%s' %(disksplit[0],
disksplit[1], osdnum))
return osd_map_config
# create_osd_map_config()
# Top level function for crush map changes
def do_crush_map_pool_config(self):
global ceph_pool_list
global ceph_tier_list
crush_setup_utils = SetupCephUtils()
# If there is no mutlipool/ssd pool/chassis configuration, return
if self.is_multi_pool_disabled() != TRUE or \
self.is_ssd_pool_disabled() != TRUE or \
self.is_chassis_disabled() != TRUE:
# Initialize crush map
crush_map = crush_setup_utils.initialize_crush()
# Get the osd -> drive map
osd_map_config = self.create_osd_map_config()
# Do pool configuration
crush_map = crush_setup_utils.do_pool_config(crush_map,
self._args.storage_hostnames,
self._args.storage_disk_config,
self._args.storage_ssd_disk_config,
osd_map_config)
# Do chassis configuration
crush_map = crush_setup_utils.do_chassis_config(crush_map,
self._args.storage_hostnames,
self._args.storage_chassis_config)
# Apply crushmap
crush_setup_utils.apply_crush(crush_map)
# Configure Pools
result = crush_setup_utils.do_configure_pools(
self._args.storage_hostnames,
self._args.storage_disk_config,
self._args.storage_ssd_disk_config,
self._args.storage_chassis_config,
self._args.storage_replica_size,
self._args.ssd_cache_tier,
self._args.object_storage,
self._args.object_storage_pool)
ceph_pool_list = result['ceph_pool_list']
ceph_tier_list = result['ceph_tier_list']
#end do_crush_map_pool_config()
# Function for NFS cinder configuration
def do_configure_nfs(self):
global create_nfs_disk_volume
if self._args.storage_nfs_disk_config[0] == 'none':
return
# Create NFS mount list file
file_present = local('sudo ls %s 2>/dev/null | wc -l' %(NFS_SERVER_LIST_FILE),
capture=True)
if file_present == '0':
local('sudo touch %s' %(NFS_SERVER_LIST_FILE), capture=True)
local('sudo chown root:cinder %s' %(NFS_SERVER_LIST_FILE),
capture=True)
local('sudo chmod 0640 %s' %(NFS_SERVER_LIST_FILE),
capture=True)
# Add NFS mount list to file
for entry in self._args.storage_nfs_disk_config:
entry_present = local('cat %s | grep \"%s\" | wc -l'
%(NFS_SERVER_LIST_FILE, entry),
capture=True)
if entry_present == '0':
local('echo %s >> %s' %(entry, NFS_SERVER_LIST_FILE))
# Cinder configuration to create backend
cinder_configured = local('sudo cat %s | grep enabled_backends | \
grep nfs | wc -l'
%(CINDER_CONFIG_FILE),
capture=True)
if cinder_configured == '0':
existing_backends = local('sudo cat %s | grep enabled_backends | \
awk \'{print $3}\''
%(CINDER_CONFIG_FILE),
shell='/bin/bash',
capture=True)
if existing_backends != '':
new_backend = existing_backends + ',' + 'nfs'
local('sudo openstack-config --set %s DEFAULT \
enabled_backends %s'
%(CINDER_CONFIG_FILE,
new_backend))
else:
local('sudo openstack-config --set %s DEFAULT \
enabled_backends nfs'
%(CINDER_CONFIG_FILE))
local('sudo openstack-config --set %s nfs nfs_shares_config %s'
%(CINDER_CONFIG_FILE,
NFS_SERVER_LIST_FILE))
local('sudo openstack-config --set %s nfs nfs_sparsed_volumes True'
%(CINDER_CONFIG_FILE))
local('sudo openstack-config --set %s nfs volume_driver \
cinder.volume.drivers.nfs.NfsDriver'
%(CINDER_CONFIG_FILE))
local('sudo openstack-config --set %s nfs volume_backend_name NFS'
%(CINDER_CONFIG_FILE))
create_nfs_disk_volume = 1
return
#end do_configure_nfs()
# do_ssh_config(): The function configures the /etc/hosts with all the
# storage-compute hostnames/ip address.
# The function also creates entry of the masters rsa public id to all the
# storage-computes authorized keys and to the known hosts.
# This is done so that ceph-deploy doesn't ask user to input 'yes' and
# the password during the ssh login.
def do_ssh_config(self):
storage_master_hostname = ''
# Add all the storage-compute hostnames/ip to the /etc/host of master
for entries, entry_token in zip(self._args.storage_hosts,
self._args.storage_host_tokens):
with settings(host_string = 'root@%s' %(entries),
password = entry_token):
if self._args.storage_hostnames[0] == \
self._args.orig_hostnames[0]:
for hostname, host_ip in zip(self._args.storage_hostnames,
self._args.storage_hosts):
run('cat /etc/hosts | grep -v -w %s$ > /tmp/hosts; \
a=`cat /tmp/hosts | grep -w "%s[ ]*%s" | wc -l`; \
if [ "$a" == "0" ]; then echo %s %s >> /tmp/hosts; fi ; \
cp -f /tmp/hosts /etc/hosts' \
% (hostname, host_ip, hostname, host_ip, hostname))
for hostname, host_ip, orig_hostname in zip(
self._args.storage_hostnames,
self._args.storage_hosts,
self._args.orig_hostnames):
if host_ip == self._args.storage_master:
storage_master_hostname = hostname
run('cat /etc/hosts | grep -v -w %s > /tmp/hosts'
%(host_ip))
match = run('cat /etc/hosts | grep -w "%s"'
%(host_ip), warn_only=True)
if match == '':
match = '%s %s'%(host_ip, hostname)
if run('echo "%s" | grep -e %s[[:blank:]] -e %s$ | wc -l '
%(match, hostname, hostname)) == '0':
match = '%s %s' %(match, hostname)
if run('echo "%s" | grep -e %s[[:blank:]] -e %s$ | wc -l '
%(match, orig_hostname, orig_hostname)) == '0':
match = '%s %s' %(match, orig_hostname)
run('echo "%s" >> /tmp/hosts' %(match))
run('cp -f /tmp/hosts /etc/hosts')
# Check for chkconfig and add if not present
chkconfig = run('ls /sbin/chkconfig 2>/dev/null | wc -l')
if chkconfig == '0':
run('ln -s /bin/true /sbin/chkconfig')
# Generate public id using ssh-keygen and first add the key to the
# authorized keys file and the known_hosts file in the master itself.
# This is required when ceph-deploy does an ssh to master to add
# the first monitor
rsa_present = local('sudo ls ~/.ssh/id_rsa | wc -l', capture=True)
if rsa_present != '1':
local('sudo ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa')
sshkey = local('cat ~/.ssh/id_rsa.pub', capture=True)
local('sudo mkdir -p ~/.ssh')
known_host_key = local('ssh-keyscan -t rsa %s,%s'
%(storage_master_hostname,
self._args.storage_master), capture=True)
already_present = local('grep "%s" ~/.ssh/known_hosts 2> /dev/null | \
wc -l' % (known_host_key), capture=True)
if already_present == '0':
local('sudo echo "%s" >> ~/.ssh/known_hosts' % (known_host_key))
already_present = local('grep "%s" ~/.ssh/authorized_keys 2>/dev/null |\
wc -l' % (sshkey), capture=True)
if already_present == '0':
local('sudo echo "%s" >> ~/.ssh/authorized_keys' % (sshkey))
# Add the master public key to all the storage-compute's known_hosts
# and authorized_keys file.
for entries, entry_token, hostname in zip(self._args.storage_hosts,
self._args.storage_host_tokens, self._args.storage_hostnames):
if entries != self._args.storage_master:
with settings(host_string = 'root@%s' %(entries),
password = entry_token):
run('sudo mkdir -p ~/.ssh')
already_present = run('grep "%s" ~/.ssh/known_hosts \
2> /dev/null | wc -l'
%(known_host_key))
if already_present == '0':
run('sudo echo "%s" >> ~/.ssh/known_hosts'
%(known_host_key), shell='/bin/bash')
already_present = run('grep "%s" ~/.ssh/authorized_keys \
2> /dev/null | wc -l'
%(sshkey))
if already_present == '0':
run('sudo echo "%s" >> ~/.ssh/authorized_keys' % (sshkey))
hostfound = local('sudo grep %s,%s ~/.ssh/known_hosts | \
wc -l' %(hostname,entries),
capture=True)
if hostfound == "0":
out = run('sudo ssh-keyscan -t rsa %s,%s 2>/dev/null'
%(hostname, entries))
local('sudo echo "%s" >> ~/.ssh/known_hosts' % (out))
rem_rsa_present = run('sudo ls ~/.ssh/id_rsa | wc -l')
if rem_rsa_present != '1':
run('sudo ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa')
rsshkey = run('cat ~/.ssh/id_rsa.pub')
already_present = local('grep "%s" ~/.ssh/authorized_keys \
2>/dev/null | \
wc -l' % (rsshkey), capture=True)
if already_present == '0':
local('sudo echo "%s" >> ~/.ssh/authorized_keys'
% (rsshkey))
return
#end do_ssh_config()
# create monlist
def do_create_monlist(self):
global configure_with_ceph
global ceph_mon_hosts
global ceph_mon_hosts_list
global ceph_mon_count
global ceph_all_hosts
global ceph_mon_entry_list
# Find existing mons
ceph_mon_entries = local('ceph --connect-timeout 5 mon stat 2>&1 |grep quorum | \
awk \'{print $11}\'', capture=True)
if ceph_mon_entries != '':
ceph_mon_list = ceph_mon_entries.split(',')
for entry in ceph_mon_list:
ceph_mon_count += 1;
ceph_mon_hosts_list.append(entry)
# Storage master needs to be the first mon
for entries, entry_token, hostname in zip(self._args.storage_hosts,
self._args.storage_host_tokens,
self._args.storage_hostnames):
if entries == self._args.storage_master:
ceph_mon_hosts = ceph_mon_hosts + hostname + ' '
entry = ''
for entry in ceph_mon_hosts_list:
if entry == hostname:
break
if ceph_mon_count < MAX_MONS:
if entry != hostname:
ceph_mon_count += 1;
ceph_mon_hosts_list.append(hostname)
# Next try to use configured compute monitor list
# if configured monitor list is empty then start
# monitors on first "N" computes
# where master monitor list + "N" compute monitors < MAX_MONS
if self._args.storage_mon_hosts[0] != 'none':
for hostname in self._args.storage_mon_hosts:
if ceph_mon_count < MAX_MONS:
ceph_mon_hosts = ceph_mon_hosts + hostname + ' '
entry = ''
for entry in ceph_mon_hosts_list:
if entry == hostname:
break
if entry != hostname:
ceph_mon_count += 1;
ceph_mon_hosts_list.append(hostname)
# Next use the openstack nodes
for entries, entry_token, hostname in zip(self._args.storage_hosts,
self._args.storage_host_tokens,
self._args.storage_hostnames):
if entries == self._args.storage_master:
ceph_mon_hosts = ceph_mon_hosts + hostname + ' '
entry = ''
for entry in ceph_mon_hosts_list:
if entry == hostname:
break
if ceph_mon_count < MAX_MONS:
if entry != hostname:
ceph_mon_count += 1;
ceph_mon_hosts_list.append(hostname)
if self._args.storage_os_hosts[0] != 'none':
for osnode in self._args.storage_os_hosts:
if entries == osnode:
ceph_mon_hosts = ceph_mon_hosts + hostname + ' '
entry = ''
for entry in ceph_mon_hosts_list:
if entry == hostname:
break
if ceph_mon_count < MAX_MONS:
if entry != hostname:
ceph_mon_count += 1;
ceph_mon_hosts_list.append(hostname)
# Finally try to use other storage nodes
if ceph_mon_count < MAX_MONS:
for entries, entry_token, hostname in zip(self._args.storage_hosts,
self._args.storage_host_tokens,
self._args.storage_hostnames):
if entries == self._args.storage_master:
continue
entry_hit = 0
if self._args.storage_os_hosts[0] != 'none':
for osnode in self._args.storage_os_hosts:
if entries == osnode:
entry_hit = 1
break
if entry_hit == 0:
if ceph_mon_count < MAX_MONS:
ceph_mon_hosts = ceph_mon_hosts + hostname + ' '
entry = ''
for entry in ceph_mon_hosts_list:
if entry == hostname:
break
if entry != hostname:
ceph_mon_count += 1;
ceph_mon_hosts_list.append(hostname)
for mon_entry in ceph_mon_hosts_list:
for entry, hostname in zip(self._args.storage_hosts,
self._args.storage_hostnames):
if hostname == mon_entry:
ceph_mon_entry_list.append(entry)
break
for entries in self._args.storage_hostnames:
ceph_all_hosts = ceph_all_hosts + entries + ' '
# end do_create_monlist
# Function to unconfigure Storage
# This will remove all the storage configurations
def do_storage_unconfigure(self):
global configure_with_ceph
if self._args.storage_directory_config[0] != 'none' or \
self._args.storage_disk_config[0] != 'none' or \
self._args.storage_ssd_disk_config[0] != 'none':
configure_with_ceph = 1
else:
configure_with_ceph = 0
# Remove the glance configuration if Ceph is configured.
if configure_with_ceph:
while True:
glance_image = local('(. /etc/contrail/openstackrc ; \
glance image-list |grep active | \
awk \'{print $2}\' | head -n 1)',
capture=True, shell='/bin/bash')
if glance_image != '':
local('(. /etc/contrail/openstackrc ; glance image-delete %s)'
%(glance_image))
else:
break
local('sudo openstack-config --set %s %s default_store file'