/
provision.py
3507 lines (3175 loc) · 148 KB
/
provision.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import os
import string
import textwrap
import json
import socket
from time import sleep
from fabric.contrib.files import exists
from fabfile.config import *
from fabfile.utils.fabos import *
from fabfile.utils.host import *
from fabfile.utils.interface import *
from fabfile.utils.multitenancy import *
from fabfile.utils.migration import *
from fabfile.utils.storage import *
from fabfile.utils.analytics import *
from fabfile.utils.config import get_value
from fabfile.tasks.install import *
from fabfile.tasks.verify import *
from fabfile.tasks.helpers import *
from fabfile.utils.commandline import *
from fabfile.tasks.tester import setup_test_env
from fabfile.tasks.rabbitmq import setup_rabbitmq_cluster
from fabfile.tasks.zookeeper import *
from fabfile.tasks.services import *
from fabfile.tasks.vmware import provision_vcenter, provision_dvs_fab,\
configure_esxi_network, create_esxi_compute_vm, deprovision_vcenter,\
provision_vcenter_features, provision_pci_fab, provision_sr_iov_fab
from fabfile.utils.cluster import get_vgw_details, get_orchestrator,\
get_vmware_details, get_tsn_nodes, get_toragent_nodes,\
get_esxi_vms_and_hosts, get_mode, is_contrail_node,\
create_esxi_vrouter_map_file, update_esxi_vrouter_map_file,\
get_qos_nodes, get_qos_niantic_nodes
from fabfile.tasks.esxi_defaults import apply_esxi_defaults
from fabfile.tasks.ssl import (setup_keystone_ssl_certs_node,
setup_apiserver_ssl_certs_node, copy_keystone_ssl_certs_to_node,
copy_apiserver_ssl_certs_to_node, copy_vnc_api_lib_ini_to_node,
copy_certs_for_neutron_node, copy_certs_for_heat)
FAB_UTILS_DIR = '/opt/contrail/utils/fabfile/utils/'
@task
@EXECUTE_TASK
@roles('all')
def bash_autocomplete_systemd():
host = env.host_string
output = sudo('uname -a')
if 'xen' in output or 'el6' in output or 'ubuntu' in output:
pass
else:
#Assume Fedora
sudo("echo 'source /etc/bash_completion.d/systemd-bash-completion.sh' >> ~/.bashrc")
@roles('cfgm')
@task
def setup_cfgm():
"""Provisions config services in all nodes defined in cfgm role."""
if env.roledefs['cfgm']:
execute("setup_cfgm_node", env.host_string)
@roles('cfgm')
@task
def fix_cfgm_config():
"""Regenerate the config file in all the cfgm nodes"""
if env.roledefs['cfgm']:
execute("fix_cfgm_config_node", env.host_string)
@task
def fix_cfgm_config_node(*args):
for host_string in args:
with settings(host_string = host_string):
cmd = frame_vnc_config_cmd(host_string, "update-cfgm-config")
sudo(cmd)
@roles('rally')
@task
def setup_rally():
if env.roledefs['rally']:
sudo('mkdir -p /etc/contrail/')
copy_openstackrc()
sudo('source /etc/contrail/openstackrc; rally deployment create --name contrail --fromenv')
#end setup_rally
@roles('collector')
@task
def fix_collector_config():
"""Regenerate the collector file in all the analytics nodes"""
if env.roledefs['collector']:
execute("fix_collector_config_node", env.host_string)
@task
def fix_collector_config_node(*args):
for host_string in args:
with settings(host_string = host_string):
cmd = frame_vnc_collector_cmd(host_string, "update-collector-config")
sudo(cmd)
@roles('webui')
@task
def fix_webui_config():
"""Regenerate the webui config file in all the webui nodes"""
if env.roledefs['webui']:
execute("fix_webui_config_node", env.host_string)
@task
def fix_webui_config_node(*args):
for host_string in args:
with settings(host_string = host_string):
cmd = frame_vnc_webui_cmd(host_string, "update-webui-config")
sudo(cmd)
def fixup_restart_haproxy_in_all_cfgm(nworkers):
template = string.Template("""
#contrail-config-marker-start
global
tune.maxrewrite 1024
listen contrail-config-stats :5937
mode http
stats enable
stats uri /
stats auth $__contrail_hap_user__:$__contrail_hap_passwd__
$__quantum_server_frontend__
default_backend quantum-server-backend
$__contrail_api_frontend__
default_backend contrail-api-backend
timeout client 3m
frontend contrail-discovery *:5998
default_backend contrail-discovery-backend
backend quantum-server-backend
option nolinger
balance roundrobin
$__quantum_ssl_forwarding__
$__contrail_quantum_servers__
#server 10.84.14.2 10.84.14.2:9697 check
backend contrail-api-backend
option nolinger
timeout server 3m
balance roundrobin
$__contrail_api_ssl_forwarding__
$__contrail_api_backend_servers__
#server 10.84.14.2 10.84.14.2:9100 check
#server 10.84.14.2 10.84.14.2:9101 check
backend contrail-discovery-backend
option nolinger
balance roundrobin
$__contrail_disc_backend_servers__
#server 10.84.14.2 10.84.14.2:9110 check
#server 10.84.14.2 10.84.14.2:9111 check
$__tor_agent_ha_config__
$__rabbitmq_config__
#contrail-config-marker-end
""")
q_listen_port = 9697
q_server_lines = ''
q_frontend = 'frontend quantum-server *:9696'
q_ssl_forwarding = ''
api_listen_port = 9100
api_frontend = 'frontend contrail-api *:8082'
api_ssl_forwarding = ''
api_server_lines = ''
disc_listen_port = 9110
disc_server_lines = ''
tor_agent_ha_config = ''
rabbitmq_config = """
listen rabbitmq 0.0.0.0:5673
mode tcp
maxconn 10000
balance leastconn
option tcpka
option nolinger
option forceclose
timeout client 0
timeout server 0
timeout client-fin 60s
timeout server-fin 60s\n"""
space = ' ' * 3
for host_string in env.roledefs['cfgm']:
server_index = env.roledefs['cfgm'].index(host_string) + 1
host_ip = hstr_to_ip(get_control_host_string(host_string))
q_server_lines = q_server_lines + \
' server %s %s:%s check inter 2000 rise 2 fall 3\n' \
%(host_ip, host_ip, str(q_listen_port))
for i in range(nworkers):
api_server_lines = api_server_lines + \
' server %s %s:%s check inter 2000 rise 2 fall 3\n' \
%(host_ip, host_ip, str(api_listen_port + i))
disc_server_lines = disc_server_lines + \
' server %s %s:%s check inter 2000 rise 2 fall 3\n' \
%(host_ip, host_ip, str(disc_listen_port + i))
if server_index == 1:
rabbitmq_config +=\
'%s server rabbit%s %s:5672 weight 200 check inter 2000 rise 2 fall 3\n'\
% (space, server_index, host_ip)
else:
rabbitmq_config +=\
'%s server rabbit%s %s:5672 weight 100 check inter 2000 rise 2 fall 3 backup\n'\
% (space, server_index, host_ip)
if get_contrail_internal_vip() == get_openstack_internal_vip():
# Openstack and cfgm are same nodes.
# Dont add rabbitmq confing twice in haproxy, as setup_ha has added already.
rabbitmq_config = ''
# create TOR agent configuration for the HA proxy
if 'toragent' in env.roledefs.keys() and 'tor_agent' in env.keys():
tor_agent_ha_config = get_all_tor_agent_haproxy_config()
# contail-api SSL termination
if apiserver_ssl_enabled():
q_frontend = """frontend quantum-server
bind *:9696 ssl crt /etc/contrail/ssl/certs/contrailcertbundle.pem"""
q_ssl_forwarding = """ option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }"""
api_frontend = """frontend contrail-api
bind *:8082 ssl crt /etc/contrail/ssl/certs/contrailcertbundle.pem"""
api_ssl_forwarding = """ option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }"""
for host_string in env.roledefs['cfgm']:
haproxy_config = template.safe_substitute({
'__contrail_quantum_servers__': q_server_lines,
'__quantum_server_frontend__': q_frontend,
'__quantum_ssl_forwarding__': q_ssl_forwarding,
'__contrail_api_frontend__': api_frontend,
'__contrail_api_ssl_forwarding__': api_ssl_forwarding,
'__contrail_api_backend_servers__': api_server_lines,
'__contrail_disc_backend_servers__': disc_server_lines,
'__contrail_hap_user__': 'haproxy',
'__contrail_hap_passwd__': get_haproxy_token('cfgm'),
'__rabbitmq_config__': rabbitmq_config,
'__tor_agent_ha_config__': tor_agent_ha_config,
})
with settings(host_string=host_string):
# chop old settings including pesky default from pkg...
tmp_fname = "/tmp/haproxy-%s-config" %(host_string)
get_as_sudo("/etc/haproxy/haproxy.cfg", tmp_fname)
with settings(warn_only=True):
local("sed -i -e '/^#contrail-config-marker-start/,/^#contrail-config-marker-end/d' %s" %(tmp_fname))
local("sed -i -e 's/frontend\s*main\s*\*:5000/frontend main *:5001/' %s" %(tmp_fname))
local("sed -i -e 's/ssl-relay 0.0.0.0:8443/ssl-relay 0.0.0.0:5002/' %s" %(tmp_fname))
local('grep -q "tune.bufsize 16384" %s || sed -i "/^global/a\\ tune.bufsize 16384" %s' % (tmp_fname, tmp_fname))
local('grep -q "tune.maxrewrite 1024" %s || sed -i "/^global/a\\ tune.maxrewrite 1024" %s' % (tmp_fname, tmp_fname))
# ...generate new ones
cfg_file = open(tmp_fname, 'a')
cfg_file.write(haproxy_config)
cfg_file.close()
put(tmp_fname, "/etc/haproxy/haproxy.cfg", use_sudo=True)
local("rm %s" %(tmp_fname))
# haproxy enable
with settings(host_string=host_string, warn_only=True):
sudo("chkconfig haproxy on")
sudo("service haproxy restart")
# end fixup_restart_haproxy_in_all_cfgm
# Get HA proxy configuration for a TOR agent
def get_tor_agent_haproxy_config(proxy_name, key, ha_dict):
tor_agent_ha_config = '\n'
port_list = ha_dict[key]
ha_dict_len = len(port_list)
if ha_dict_len == 0:
return tor_agent_ha_config
ip2 = None
if "-" in key:
ip1 = key.split('-')[0]
ip2 = key.split('-')[1]
else:
ip1 = key
tor_agent_ha_config = tor_agent_ha_config + 'listen %s\n' %(proxy_name)
tor_agent_ha_config = tor_agent_ha_config + ' option tcpka\n'
tor_agent_ha_config = tor_agent_ha_config + ' mode tcp\n'
tor_agent_ha_config = tor_agent_ha_config + ' bind :%s' %(port_list[0])
for i in range(1, ha_dict_len):
tor_agent_ha_config = tor_agent_ha_config + ',:%s' %(port_list[i])
tor_agent_ha_config = tor_agent_ha_config + '\n'
tor_agent_ha_config = tor_agent_ha_config + ' server %s %s check inter 2000\n' %(ip1, ip1)
if ip2 != None:
tor_agent_ha_config = tor_agent_ha_config + ' server %s %s check inter 2000\n' %(ip2, ip2)
tor_agent_ha_config = tor_agent_ha_config + '\n'
return tor_agent_ha_config
#end get_tor_agent_haproxy_config
def get_tor_agent_id(entry):
tor_id = -1
if 'tor_id' in entry:
tor_id= int(entry['tor_id'])
elif 'tor_agent_id' in entry:
tor_id= int(entry['tor_agent_id'])
else:
print 'tor-agent-id configuration is missing in testbed file'
return tor_id
#end get_tor_agent_id
# Given a host_string and tor_name, return the standby tor-agent info identified
# by index and host-string of tor-agent
def get_standby_info(skip_host, match_tor_name):
toragent_dict = getattr(env,'tor_agent', None)
tor_agent_host_list = get_toragent_nodes()
for host in tor_agent_host_list:
if host == skip_host:
continue
for i in range(len(toragent_dict[host])):
tor_name= toragent_dict[host][i]['tor_name']
if tor_name == match_tor_name:
return (i, host)
return (-1, None)
#end get_standby_info
def make_key(tsn1, tsn2):
if tsn1 < tsn2:
return tsn1 + "-" + tsn2
return tsn2 + "-" + tsn1
# Get HA proxy configuration for all TOR agents
def get_all_tor_agent_haproxy_config():
toragent_dict = getattr(env,'tor_agent', None)
master_standby_dict = {}
tor_agent_host_list = get_toragent_nodes()
for host in tor_agent_host_list:
for i in range(len(toragent_dict[host])):
tor_name= toragent_dict[host][i]['tor_name']
tsn1 = toragent_dict[host][i]['tor_tsn_ip']
port1 = toragent_dict[host][i]['tor_ovs_port']
standby_tor_idx, standby_host = get_standby_info(host, tor_name)
key = tsn1
if (standby_tor_idx != -1 and standby_host != None):
tsn2 = toragent_dict[standby_host][standby_tor_idx]['tor_tsn_ip']
port2 = toragent_dict[standby_host][standby_tor_idx]['tor_ovs_port']
if port1 == port2:
key = make_key(tsn1, tsn2)
else:
print "Tor Agents (%s, %d) and (%s, %d) are configured as \
redundant agents but don't have same ovs_port" \
%(host, i, standby_host, standby_tor_idx)
if not key in master_standby_dict:
master_standby_dict[key] = []
if not port1 in master_standby_dict[key]:
master_standby_dict[key].append(port1)
i = 1
cfg_str = ""
for key in master_standby_dict.keys():
proxy_name = "contrail-tor-agent-" + str(i)
i = i + 1
cfg_str = cfg_str + get_tor_agent_haproxy_config(proxy_name, key, master_standby_dict)
return cfg_str
#end test_task
@roles('cfgm')
@task
def setup_haproxy_config():
"""Provisions HA proxy service in all nodes defined in cfgm role."""
if env.roledefs['cfgm']:
execute("setup_haproxy_config_node", env.host_string)
@task
def setup_haproxy_config_node(*args):
"""Provisions HA proxy service in one or list of nodes."""
nworkers = 1
fixup_restart_haproxy_in_all_cfgm(nworkers)
#end setup_haproxy_node
def fixup_restart_haproxy_in_one_compute(compute_host_string):
compute_haproxy_template = string.Template("""
#contrail-compute-marker-start
listen contrail-compute-stats :5938
mode http
stats enable
stats uri /
stats auth $__contrail_hap_user__:$__contrail_hap_passwd__
$__contrail_disc_stanza__
$__contrail_quantum_stanza__
$__contrail_qpid_stanza__
$__contrail_glance_api_stanza__
#contrail-compute-marker-end
""")
ds_stanza_template = string.Template("""
$__contrail_disc_frontend__
backend discovery-server-backend
balance roundrobin
$__contrail_disc_servers__
#server 10.84.14.2 10.84.14.2:5998 check
""")
q_stanza_template = string.Template("""
$__contrail_quantum_frontend__
backend quantum-server-backend
balance roundrobin
$__contrail_quantum_servers__
#server 10.84.14.2 10.84.14.2:9696 check
""")
g_api_stanza_template = string.Template("""
$__contrail_glance_api_frontend__
backend glance-api-backend
balance roundrobin
$__contrail_glance_apis__
#server 10.84.14.2 10.84.14.2:9292 check
""")
ds_frontend = textwrap.dedent("""\
frontend discovery-server 127.0.0.1:5998
default_backend discovery-server-backend
""")
q_frontend = textwrap.dedent("""\
frontend quantum-server 127.0.0.1:9696
default_backend quantum-server-backend
""")
g_api_frontend = textwrap.dedent("""\
frontend glance-api 127.0.0.1:9292
default_backend glance-api-backend
""")
haproxy_config = ''
# if this compute is also config, skip quantum and discovery
# stanza as they would have been generated in config context
ds_stanza = ''
q_stanza = ''
if compute_host_string not in env.roledefs['cfgm']:
# generate discovery service stanza
ds_server_lines = ''
for config_host_string in env.roledefs['cfgm']:
host_ip = hstr_to_ip(config_host_string)
ds_server_lines = ds_server_lines + \
' server %s %s:5998 check\n' %(host_ip, host_ip)
ds_stanza = ds_stanza_template.safe_substitute({
'__contrail_disc_frontend__': ds_frontend,
'__contrail_disc_servers__': ds_server_lines,
})
# generate quantum stanza
q_server_lines = ''
for config_host_string in env.roledefs['cfgm']:
host_ip = hstr_to_ip(config_host_string)
q_server_lines = q_server_lines + \
' server %s %s:9696 check\n' %(host_ip, host_ip)
q_stanza = q_stanza_template.safe_substitute({
'__contrail_quantum_frontend__': q_frontend,
'__contrail_quantum_servers__': q_server_lines,
})
# if this compute is also openstack, skip glance-api stanza
# as that would have been generated in openstack context
g_api_stanza = ''
if compute_host_string not in env.roledefs['openstack']:
# generate a glance-api stanza
g_api_server_lines = ''
for openstack_host_string in env.roledefs['openstack']:
host_ip = hstr_to_ip(openstack_host_string)
g_api_server_lines = g_api_server_lines + \
' server %s %s:9292 check\n' %(host_ip, host_ip)
g_api_stanza = g_api_stanza_template.safe_substitute({
'__contrail_glance_api_frontend__': g_api_frontend,
'__contrail_glance_apis__': g_api_server_lines,
})
# HACK: for now only one openstack
break
with settings(host_string=compute_host_string):
# chop old settings including pesky default from pkg...
tmp_fname = "/tmp/haproxy-%s-compute" %(compute_host_string)
get_as_sudo("/etc/haproxy/haproxy.cfg", tmp_fname)
with settings(warn_only=True):
local("sed -i -e '/^#contrail-compute-marker-start/,/^#contrail-compute-marker-end/d' %s"\
%(tmp_fname))
local("sed -i -e 's/*:5000/*:5001/' %s" %(tmp_fname))
local("sed -i -e 's/ssl-relay 0.0.0.0:8443/ssl-relay 0.0.0.0:5002/' %s" %(tmp_fname))
local('grep -q "tune.bufsize 16384" %s || sed -i "/^global/a\\ tune.bufsize 16384" %s' % (tmp_fname, tmp_fname))
local('grep -q "tune.maxrewrite 1024" %s || sed -i "/^global/a\\ tune.maxrewrite 1024" %s' % (tmp_fname, tmp_fname))
# ...generate new ones
compute_haproxy = compute_haproxy_template.safe_substitute({
'__contrail_hap_user__': 'haproxy',
'__contrail_hap_passwd__': 'contrail123',
'__contrail_disc_stanza__': ds_stanza,
'__contrail_quantum_stanza__': q_stanza,
'__contrail_glance_api_stanza__': g_api_stanza,
'__contrail_qpid_stanza__': '',
})
cfg_file = open(tmp_fname, 'a')
cfg_file.write(compute_haproxy)
cfg_file.close()
put(tmp_fname, "/etc/haproxy/haproxy.cfg", use_sudo=True)
local("rm %s" %(tmp_fname))
# enable
with settings(host_string=compute_host_string, warn_only=True):
sudo("chkconfig haproxy on")
sudo("service haproxy restart")
# end fixup_restart_haproxy_in_one_compute
def fixup_restart_haproxy_in_all_compute():
for compute_host_string in env.roledefs['compute']:
fixup_restart_haproxy_in_one_compute(compute_host_string)
# end fixup_restart_haproxy_in_all_compute
def fixup_restart_haproxy_in_all_openstack():
openstack_haproxy_template = string.Template("""
#contrail-openstack-marker-start
listen contrail-openstack-stats :5936
mode http
stats enable
stats uri /
stats auth $__contrail_hap_user__:$__contrail_hap_passwd__
$__contrail_quantum_stanza__
#contrail-openstack-marker-end
""")
q_stanza_template = string.Template("""
$__contrail_quantum_frontend__
backend quantum-server-backend
balance roundrobin
$__contrail_quantum_servers__
#server 10.84.14.2 10.84.14.2:9696 check
""")
q_frontend = textwrap.dedent("""\
frontend quantum-server 127.0.0.1:9696
default_backend quantum-server-backend
""")
# for all openstack, set appropriate haproxy stanzas
for openstack_host_string in env.roledefs['openstack']:
haproxy_config = ''
# if this openstack is also config, skip quantum stanza
# as that would have been generated in config context
q_stanza = ''
if openstack_host_string not in env.roledefs['cfgm']:
# generate a quantum stanza
q_server_lines = ''
for config_host_string in env.roledefs['cfgm']:
host_ip = hstr_to_ip(config_host_string)
q_server_lines = q_server_lines + \
' server %s %s:9696 check\n' %(host_ip, host_ip)
q_stanza = q_stanza_template.safe_substitute({
'__contrail_quantum_frontend__': q_frontend,
'__contrail_quantum_servers__': q_server_lines,
})
with settings(host_string=openstack_host_string):
# chop old settings including pesky default from pkg...
tmp_fname = "/tmp/haproxy-%s-openstack" %(openstack_host_string)
get_as_sudo("/etc/haproxy/haproxy.cfg", tmp_fname)
with settings(warn_only=True):
local("sed -i -e '/^#contrail-openstack-marker-start/,/^#contrail-openstack-marker-end/d' %s"\
%(tmp_fname))
local("sed -i -e 's/*:5000/*:5001/' %s" %(tmp_fname))
local("sed -i -e 's/ssl-relay 0.0.0.0:8443/ssl-relay 0.0.0.0:5002/' %s" %(tmp_fname))
local('grep -q "tune.bufsize 16384" %s || sed -i "/^global/a\\ tune.bufsize 16384" %s' % (tmp_fname, tmp_fname))
local('grep -q "tune.maxrewrite 1024" %s || sed -i "/^global/a\\ tune.maxrewrite 1024" %s' % (tmp_fname, tmp_fname))
# ...generate new ones
openstack_haproxy = openstack_haproxy_template.safe_substitute({
'__contrail_hap_user__': 'haproxy',
'__contrail_hap_passwd__': 'contrail123',
'__contrail_quantum_stanza__': q_stanza,
})
cfg_file = open(tmp_fname, 'a')
cfg_file.write(openstack_haproxy)
cfg_file.close()
put(tmp_fname, "/etc/haproxy/haproxy.cfg", use_sudo=True)
local("rm %s" %(tmp_fname))
# enable
with settings(host_string=openstack_host_string, warn_only=True):
sudo("chkconfig haproxy on")
sudo("service haproxy restart")
# end fixup_restart_haproxy_in_all_openstack
@task
def setup_cfgm_node(*args):
"""Provisions config services in one or list of nodes. USAGE: fab setup_cfgm_node:user@1.1.1.1,user@2.2.2.2"""
for host_string in args:
# Enable settings for Ubuntu
with settings(host_string=host_string):
if apiserver_ssl_enabled():
execute("setup_apiserver_ssl_certs_node", host_string)
if keystone_ssl_enabled():
execute("copy_keystone_ssl_certs_to_node", host_string)
if apiserver_ssl_enabled():
execute("copy_certs_for_neutron_node", host_string)
enable_haproxy()
nworkers = 1
fixup_restart_haproxy_in_all_cfgm(nworkers)
for host_string in args:
with settings(host_string=host_string):
if detect_ostype() == 'ubuntu':
with settings(warn_only=True):
sudo('rm /etc/init/supervisor-config.override')
sudo('rm /etc/init/neutron-server.override')
# Frame the command line to provision config node
cmd = frame_vnc_config_cmd(host_string)
# Execute the provision config script
with cd(INSTALLER_DIR):
sudo(cmd)
orch = get_orchestrator()
if orch == 'vcenter':
vcenter_info = getattr(env, 'vcenter_servers', None)
if not vcenter_info:
print 'Error: vcenter block is not defined in testbed file.Exiting'
return
else:
for v in vcenter_info:
vcenter_server = vcenter_info[v]
vcenter_server_name = v
break
#create the static esxi:vrouter map file
create_esxi_vrouter_map_file(vcenter_server_name, vcenter_server, host_string)
# Frame the command to provision vcenter-plugin
cmd = frame_vnc_vcenter_plugin_cmd(host_string)
# Execute the provision vcenter-plugin script
with cd(INSTALLER_DIR):
sudo(cmd)
# HAPROXY fixups
haproxy = get_haproxy_opt()
if haproxy:
fixup_restart_haproxy_in_all_compute()
fixup_restart_haproxy_in_all_openstack()
#end setup_cfgm_node
def fixup_ceilometer_conf_common():
conf_file = "/etc/ceilometer/ceilometer.conf"
openstack_sku = get_openstack_sku()
database_host_list = [get_control_host_string(entry)\
for entry in env.roledefs['database']]
database_ip_list = ["%s:27017" % (hstr_to_ip(db_host))\
for db_host in database_host_list]
database_ip_str = ','.join(database_ip_list)
value = "mongodb://ceilometer:CEILOMETER_DBPASS@" + database_ip_str + \
"/ceilometer?replicaSet=rs-ceilometer"
if openstack_sku == 'havana':
sudo("openstack-config --set %s DEFAULT connection %s" % (conf_file, value))
else:
sudo("openstack-config --set %s database connection %s" % (conf_file, value))
amqp_server_ip = get_openstack_amqp_server()
sudo("openstack-config --set %s DEFAULT rabbit_host %s" % (conf_file, amqp_server_ip))
if get_openstack_internal_vip():
sudo("openstack-config --set %s notification workload_partitioning %s" %
(conf_file, "True"))
sudo("openstack-config --set %s compute workload_partitioning %s" %
(conf_file, "True"))
sudo("openstack-config --set %s coordination backend_url %s%s%s" %
(conf_file, "kazoo://", env.roledefs['database'][0], ":2181"))
sudo("openstack-config --set %s DEFAULT rabbit_port %s" % (conf_file,
get_openstack_amqp_port()))
value = "/var/log/ceilometer"
sudo("openstack-config --set %s DEFAULT log_dir %s" % (conf_file, value))
value = "a74ca26452848001921c"
if openstack_sku == 'havana':
sudo("openstack-config --set %s DEFAULT metering_secret %s" % (conf_file, value))
else:
sudo("openstack-config --set %s publisher metering_secret %s" % (conf_file, value))
sudo("openstack-config --set %s DEFAULT auth_strategy keystone" % conf_file)
sudo("openstack-config --set %s database time_to_live %d" % (conf_file, get_ceilometer_ttl()))
#end fixup_ceilometer_conf_common
def fixup_ceilometer_conf_keystone(openstack_ip):
auth_protocol = 'http'
if keystone_ssl_enabled():
auth_protocol = 'https'
conf_file = '/etc/ceilometer/ceilometer.conf'
with settings(warn_only=True):
authtoken_config = sudo("grep '^auth_host =' /etc/ceilometer/ceilometer.conf").succeeded
if not authtoken_config:
config_cmd = "openstack-config --set %s keystone_authtoken" % conf_file
sudo("%s admin_password CEILOMETER_PASS" % config_cmd)
sudo("%s admin_user ceilometer" % config_cmd)
sudo("%s admin_tenant_name service" % config_cmd)
sudo("%s auth_uri %s://%s:5000" % (config_cmd, auth_protocol, openstack_ip))
sudo("%s auth_protocol %s" % (config_cmd, auth_protocol))
sudo("%s auth_port 35357" % config_cmd)
sudo("%s auth_host %s" % (config_cmd, openstack_ip))
if keystone_ssl_enabled():
sudo("%s insecure True" % config_cmd)
config_cmd = "openstack-config --set %s service_credentials" % conf_file
sudo("%s os_password CEILOMETER_PASS" % config_cmd)
sudo("%s os_tenant_name service" % config_cmd)
sudo("%s os_username ceilometer" % config_cmd)
sudo("%s os_auth_url %s://%s:5000/v2.0" % (config_cmd, auth_protocol, openstack_ip))
if keystone_ssl_enabled():
sudo("%s insecure True" % config_cmd)
#end fixup_ceilometer_conf_keystone
def fixup_ceilometer_pipeline_conf(analytics_ip):
import yaml
rconf_file = '/etc/ceilometer/pipeline.yaml'
conf_file = 'pipeline.yaml'
ltemp_dir = tempfile.mkdtemp()
get(rconf_file, ltemp_dir)
with open('%s/%s' % (ltemp_dir, conf_file)) as fap:
data = fap.read()
pipeline_dict = yaml.safe_load(data)
# If already configured with 'contrail_source' and/or 'contrail_sink' exit
for source in pipeline_dict['sources']:
if source['name'] == 'contrail_source':
return
for sink in pipeline_dict['sinks']:
if sink['name'] == 'contrail_sink':
return
# Edit meters in sources to exclude floating IP meters if '*' is
# configured
for source in pipeline_dict['sources']:
for mname in source['meters']:
if mname == '*':
source['meters'].append('!ip.floating.*')
print('Excluding floating IP meters from source %s' % (source['name']))
break
# Add contrail source and sinks to the pipeline
interval = int(get_ceilometer_interval())
contrail_source = {'interval': interval,
'meters': ['ip.floating.receive.bytes',
'ip.floating.receive.packets',
'ip.floating.transmit.bytes',
'ip.floating.transmit.packets'],
'name': 'contrail_source',
'sinks': ['contrail_sink']}
contrail_source['resources'] = ['contrail://%s:8081/' % (analytics_ip)]
contrail_sink = {'publishers': ['notifier://' if is_mitaka_or_above() else 'rpc://'],
'transformers': None,
'name': 'contrail_sink'}
pipeline_dict['sources'].append(contrail_source)
pipeline_dict['sinks'].append(contrail_sink)
with open('%s/%s' % (ltemp_dir, conf_file), 'w') as fap:
yaml.safe_dump(pipeline_dict, fap, explicit_start=True,
default_flow_style=False, indent=4)
rtemp_dir = sudo('(tempdir=$(mktemp -d); echo $tempdir)')
put('%s/%s' % (ltemp_dir, conf_file), rtemp_dir, use_sudo=True)
sudo('mv %s/%s %s' % (rtemp_dir, conf_file, rconf_file))
local('rm -rf %s' % (ltemp_dir))
sudo('rm -rf %s' % (rtemp_dir))
#end fixup_ceilometer_pipeline_conf
def fixup_mongodb_conf_file():
sudo("service mongodb stop")
sudo("sed -i -e '/^[ ]*bind/s/^/#/' /etc/mongodb.conf")
with settings(warn_only=True):
output = sudo("grep replSet=rs-ceilometer /etc/mongodb.conf")
if not output.succeeded:
sudo("echo \"replSet=rs-ceilometer\" >> /etc/mongodb.conf")
sudo("service mongodb start")
# check if the mongodb is running, if not, issue start again
count = 1
cmd = "service mongodb status | grep not"
with settings(warn_only=True):
output = sudo(cmd)
while output.succeeded:
count += 1
if count > 10:
break
sleep(1)
sudo("service mongodb restart")
with settings(warn_only=True):
output = sudo(cmd)
#end fixup_mongodb_conf_file
def setup_ceilometer_mongodb(ip, mongodb_ip_list):
# Configure replicaSet only on the first mongodb node
if ip == mongodb_ip_list[0]:
# Verify that we are able to connect
cmd = "mongo --host " + ip + " --quiet --eval " + \
"'db = db.getSiblingDB(\"ceilometer\")'"
verify_command_succeeded(cmd = cmd, expected_output = "ceilometer",
error_str = "Not able to connect to mongodb",
max_count = 60, sleep_interval = 2,
warn_only = True)
# Verify if replicaSet is already configured
cmd = "mongo --host " + ip + " --quiet --eval 'rs.conf()._id'"
with settings(warn_only=True):
output = sudo(cmd)
if output.succeeded and output == 'rs-ceilometer':
return
cmd = "mongo --host " + ip + " --quiet --eval " + \
"'rs.initiate({_id:\"rs-ceilometer\", " + \
"members:[{_id:0, host:\"" + ip + ":27017\"}]}).ok'"
verify_command_succeeded(cmd = cmd, expected_output = "1",
error_str = "Not able to initiate replicaSet",
max_count = 1, sleep_interval = 1,
warn_only = False)
# Verify that we are adding on primary
cmd = "mongo --host " + ip + " --quiet --eval 'db.isMaster().ismaster'"
verify_command_succeeded(cmd = cmd, expected_output = "true",
error_str = "Not primary",
max_count = 30, sleep_interval = 2,
warn_only = False)
# Add replicaSet members
for other_ip in mongodb_ip_list:
if ip == other_ip:
continue
cmd = "mongo --host " + ip + \
" --quiet --eval 'rs.add(\"" + other_ip + ":27017\").ok'"
verify_command_succeeded(cmd = cmd, expected_output = "1",
error_str = "Not able to add " + \
other_ip + " to replicaSet",
max_count = 10, sleep_interval = 1,
warn_only = False)
# Verify replicaSet status and members
cmd = "mongo --host " + ip + " --quiet --eval 'rs.status().ok'"
verify_command_succeeded(cmd = cmd, expected_output = "1",
error_str = "replicaSet status NOT OK",
max_count = 10, sleep_interval = 1,
warn_only = False)
cmd = "mongo --host " + ip + " --quiet --eval " + \
"'rs.status().members.length'"
verify_command_succeeded(cmd = cmd,
expected_output = str(len(mongodb_ip_list)),
error_str = "replicaSet does not contain "
"all database nodes",
max_count = 1, sleep_interval = 1,
warn_only = False)
# check if ceilometer user has already been added
cmd = "mongo --host " + ip + " --quiet --eval " + \
"\"db.system.users.find({'user':'ceilometer'}).count()\" ceilometer"
output = sudo(cmd)
# Does user ceilometer exist
if output == "1":
return
cmd = "mongo --host " + ip + " --eval " + \
"'db = db.getSiblingDB(\"ceilometer\"); " + \
"db.addUser({user: \"ceilometer\", pwd: \"CEILOMETER_DBPASS\", " + \
"roles: [ \"readWrite\", \"dbAdmin\" ]})'"
if not sudo(cmd).succeeded:
raise RuntimeError("Not able to add ceilometer mongodb user")
#end setup_ceilometer_mongodb
@task
@roles('compute')
def setup_ceilometer_compute():
"""Provisions ceilometer compute services in all nodes defined in compute role."""
if env.roledefs['compute']:
execute("setup_ceilometer_compute_node", env.host_string)
@task
def setup_ceilometer_compute_node(*args):
"""Provisions ceilometer compute services in one or list of nodes. USAGE: fab setup_ceilometer_compute_node:user@1.1.1.1,user@2.2.2.2"""
openstack_host = env.roledefs['openstack'][0]
for host_string in args:
with settings(host_string=host_string):
os_type = detect_ostype()
with settings(warn_only=True):
compute_ceilometer_present = sudo("grep '^instance_usage_audit =' /etc/nova/nova.conf").succeeded
if not compute_ceilometer_present:
config_cmd = "openstack-config --set /etc/nova/nova.conf DEFAULT"
sudo("%s notification_driver ceilometer.compute.nova_notifier" % config_cmd)
sudo("%s notification_driver nova.openstack.common.notifier.rpc_notifier" % config_cmd)
sudo("%s notify_on_state_change vm_and_task_state" % config_cmd)
sudo("%s instance_usage_audit_period hour" % config_cmd)
sudo("%s instance_usage_audit True" % config_cmd)
if os_type == 'ubuntu':
nova_services = ['nova-compute']
elif os_type in ['redhat']:
nova_services = ['openstack-nova-compute']
else:
raise RuntimeError("Unsupported OS Type (%s)", os_type)
for svc in nova_services:
sudo("service %s restart" % (svc))
if host_string != openstack_host:
# copy over ceilometer.conf from the first openstack node
conf_file = '/etc/ceilometer/ceilometer.conf'
local_tempdir = tempfile.mkdtemp()
with lcd(local_tempdir):
with settings(host_string = openstack_host):
get(conf_file, local_tempdir)
tempdir = sudo('(tempdir=$(mktemp -d); echo $tempdir)')
put('%s/ceilometer.conf' % (local_tempdir), tempdir, use_sudo=True)
sudo('mv %s/ceilometer.conf %s' % (tempdir, conf_file))
local('rm -rf %s' % (local_tempdir))
sudo('rm -rf %s' % (tempdir))
if os_type == 'ubuntu':
ceilometer_services = ['ceilometer-agent-compute']
elif os_type in ['redhat']:
ceilometer_services = ['openstack-ceilometer-compute']
else:
raise RuntimeError("Unsupported OS Type (%s)", os_type)
for svc in ceilometer_services:
sudo("service %s restart" % (svc))
@task
@roles('openstack')
def setup_contrail_ceilometer_plugin():
"""Provisions contrail ceilometer plugin in the first node defined in openstack role."""
if env.roledefs['openstack']:
execute("setup_contrail_ceilometer_plugin_node", env.host_string)
@task
def setup_contrail_ceilometer_plugin_node(*args):
"""Provisions contrail ceilometer plugin in one or list of nodes.
USAGE: fab setup_contrail_ceilometer_plugin_node:user@1.1.1.1,user@2.2.2.2"""
analytics_ip = hstr_to_ip(env.roledefs['collector'][0])
for host_string in args:
with settings(host_string=host_string):
# Fixup ceilometer pipeline.yaml cfg
fixup_ceilometer_pipeline_conf(analytics_ip)
os_type = detect_ostype()
if os_type == 'ubuntu':
ceilometer_services = ['ceilometer-agent-central']
elif os_type in ['redhat']:
ceilometer_services = ['openstack-ceilometer-central']
else:
raise RuntimeError("Unsupported OS Type (%s)", os_type)
for svc in ceilometer_services:
sudo("service %s restart" % (svc))
@task
@roles('openstack')
def setup_ceilometer():
"""Provisions ceilometer services in all nodes defined in openstack role."""
if env.roledefs['openstack']:
execute("setup_ceilometer_node", env.host_string)
execute("setup_network_service") #Provisions in cfgm node
execute("setup_image_service_node", env.host_string)
execute("setup_identity_service_node", env.host_string)
@task
def setup_ceilometer_node(*args):
"""Provisions ceilometer services in one or list of nodes. USAGE: fab setup_ceilometer_node:user@1.1.1.1,user@2.2.2.2"""
if not is_ceilometer_provision_supported():
return
analytics_ip = hstr_to_ip(env.roledefs['collector'][0])
for host_string in args:
self_host = get_control_host_string(host_string)
self_ip = hstr_to_ip(self_host)
with settings(host_string=host_string):
openstack_sku = get_openstack_sku()
if openstack_sku == 'havana':
ceilometer_services = ['ceilometer-agent-central',
'ceilometer-api',
'ceilometer-collector']
else:
ceilometer_services = ['ceilometer-agent-central',
'ceilometer-agent-notification',
'ceilometer-api',
'ceilometer-collector']
if openstack_sku in ['juno', 'kilo', 'liberty']:
ceilometer_services += ['ceilometer-alarm-evaluator',
'ceilometer-alarm-notifier']
fixup_ceilometer_conf_common()
#keystone auth params
cmd = "source /etc/contrail/openstackrc;keystone --insecure user-get ceilometer"
with settings(warn_only=True):
output = sudo(cmd)
count = 1
while not output.succeeded and (
"Unable to establish connection" in output or
"Service Unavailable (HTTP 503)" in output):
count += 1