-
Notifications
You must be signed in to change notification settings - Fork 390
/
vnc_cfg_ifmap.py
1931 lines (1633 loc) · 73.1 KB
/
vnc_cfg_ifmap.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
"""
Layer that transforms VNC config objects to ifmap representation
"""
from cfgm_common.zkclient import ZookeeperClient, IndexAllocator
from gevent import ssl, monkey
monkey.patch_all()
import gevent
import gevent.event
from gevent.queue import Queue
import sys
import time
from pprint import pformat
from lxml import etree, objectify
import cgitb
import StringIO
import re
import socket
import errno
import subprocess
import netaddr
from netaddr import IPNetwork
from bitarray import bitarray
from cfgm_common import ignore_exceptions, utils
from cfgm_common.ifmap.client import client, namespaces
from cfgm_common.ifmap.request import NewSessionRequest, RenewSessionRequest,\
EndSessionRequest, PublishRequest, SearchRequest, SubscribeRequest,\
PurgeRequest, PollRequest
from cfgm_common.ifmap.id import IPAddress, MACAddress, Device,\
AccessRequest, Identity, CustomIdentity
from cfgm_common.ifmap.operations import PublishUpdateOperation,\
PublishNotifyOperation, PublishDeleteOperation, SubscribeUpdateOperation,\
SubscribeDeleteOperation
from cfgm_common.ifmap.util import attr, link_ids
from cfgm_common.ifmap.response import Response, newSessionResult
from cfgm_common.ifmap.metadata import Metadata
from cfgm_common import obj_to_json
from cfgm_common.exceptions import ResourceExhaustionError, ResourceExistsError
import copy
import json
import uuid
import datetime
import pycassa
import pycassa.util
import pycassa.cassandra.ttypes
from pycassa.system_manager import *
from datetime import datetime
from pycassa.util import *
import amqp.exceptions
import kombu
import signal, os
#from cfgm_common import vnc_type_conv
from provision_defaults import *
import cfgm_common.imid
from cfgm_common.exceptions import *
from vnc_quota import *
from gen.vnc_ifmap_client_gen import *
from gen.vnc_cassandra_client_gen import *
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus, \
ConnectionType
from sandesh.traces.ttypes import DBRequestTrace, MessageBusNotifyTrace, \
IfmapTrace
import logging
logger = logging.getLogger(__name__)
@ignore_exceptions
def get_trace_id():
try:
req_id = gevent.getcurrent().trace_request_id
except Exception:
req_id = 'req-%s' %(str(uuid.uuid4()))
gevent.getcurrent().trace_request_id = req_id
return req_id
# end get_trace_id
@ignore_exceptions
def trace_msg(trace_obj, trace_name, sandesh_hdl, error_msg=None):
if trace_obj:
if error_msg:
trace_obj.error = error_msg
trace_obj.trace_msg(name=trace_name, sandesh=sandesh_hdl)
# end trace_msg
class VncIfmapClient(VncIfmapClientGen):
def handler(self, signum, frame):
file = open("/tmp/api-server-ifmap-cache.txt", "w")
file.write(str(self._id_to_metas))
file.close()
def __init__(self, db_client_mgr, ifmap_srv_ip, ifmap_srv_port,
uname, passwd, ssl_options, ifmap_srv_loc=None):
super(VncIfmapClient, self).__init__()
# TODO username/passwd from right place
self._CONTRAIL_XSD = "http://www.contrailsystems.com/vnc_cfg.xsd"
self._IPERMS_NAME = "id-perms"
self._IPERMS_FQ_NAME = "contrail:" + self._IPERMS_NAME
self._SUBNETS_NAME = "contrail:subnets"
self._IPAMS_NAME = "contrail:ipams"
self._SG_RULE_NAME = "contrail:sg_rules"
self._POLICY_ENTRY_NAME = "contrail:policy_entry"
self._NAMESPACES = {
'env': "http://www.w3.org/2003/05/soap-envelope",
'ifmap': "http://www.trustedcomputinggroup.org/2010/IFMAP/2",
'meta':
"http://www.trustedcomputinggroup.org/2010/IFMAP-METADATA/2",
'contrail': self._CONTRAIL_XSD
}
self._db_client_mgr = db_client_mgr
self._sandesh = db_client_mgr._sandesh
ConnectionState.update(conn_type = ConnectionType.IFMAP,
name = 'IfMap', status = ConnectionStatus.INIT, message = '',
server_addrs = ["%s:%s" % (ifmap_srv_ip, ifmap_srv_port)])
# launch mapserver
if ifmap_srv_loc:
self._launch_mapserver(ifmap_srv_ip, ifmap_srv_port, ifmap_srv_loc)
# Cache of metas populated in ifmap server. Useful in update to find
# what things to remove in ifmap server
self._id_to_metas = {}
self.accumulator = None
self.accumulated_request_len = 0
# Set the signal handler
signal.signal(signal.SIGUSR2, self.handler)
mapclient = client(("%s" % (ifmap_srv_ip), "%s" % (ifmap_srv_port)),
uname, passwd, self._NAMESPACES, ssl_options)
self._mapclient = mapclient
connected = False
while not connected:
try:
result = mapclient.call('newSession', NewSessionRequest())
connected = True
except socket.error as e:
time.sleep(3)
ConnectionState.update(conn_type = ConnectionType.IFMAP,
name = 'IfMap', status = ConnectionStatus.UP, message = '',
server_addrs = ["%s:%s" % (ifmap_srv_ip, ifmap_srv_port)])
mapclient.set_session_id(newSessionResult(result).get_session_id())
mapclient.set_publisher_id(newSessionResult(result).get_publisher_id())
# Initialize ifmap-id handler (alloc|convert|parse etc.)
self._imid_handler = Imid()
imid = self._imid_handler
# Publish init config (TODO this should come from api-server init)
# config-root
buf = cStringIO.StringIO()
perms = Provision.defaults.perms['config-root']
perms.exportChildren(buf, level=1, pretty_print=False)
id_perms_xml = buf.getvalue()
buf.close()
update = {}
meta = Metadata(self._IPERMS_NAME, '',
{'ifmap-cardinality': 'singleValue'},
ns_prefix='contrail', elements=id_perms_xml)
self._update_id_self_meta(update, meta)
self._publish_update("contrail:config-root:root", update)
# end __init__
def get_imid_handler(self):
return self._imid_handler
# end get_imid_handler
# Parse ifmap-server returned search results and create list of tuples
# of (ident-1, ident-2, link-attribs)
def parse_result_items(self, srch_result, my_imid):
xpath_expr = \
'/env:Envelope/env:Body/ifmap:response/searchResult/resultItem'
result_items = self._parse(srch_result, xpath_expr)
return cfgm_common.imid.parse_result_items(result_items, my_imid)
# end parse_result_items
# In list of (ident-1, ident-2, link-attribs) tuples, return list of
# ifmap-ids of other idents
def get_others_in_result_list(self, result_list, my_imid):
other_imid_list = []
for result_elem in result_list:
ident_1, ident_2, meta = result_elem
if (ident_1 is None) or (ident_2 is None):
continue
other_imid = None
if ident_1.attrib['name'] == my_imid:
other_imid = ident_2.attrib['name']
elif ident_2.attrib['name'] == my_imid:
other_imid = ident_1.attrib['name']
other_imid_list.append(other_imid)
return other_imid_list
# end get_others_in_result_list
def _ensure_port_not_listened(self, server_ip, server_port):
try:
s = socket.create_connection((server_ip, server_port))
s.close()
print "IP %s port %s already listened on"\
% (server_ip, server_port)
except Exception as err:
if err.errno == errno.ECONNREFUSED:
return # all is well
# end _ensure_port_not_listened
def _block_till_port_listened(self, server_name, server_ip, server_port):
svr_running = False
while not svr_running:
try:
s = socket.create_connection((server_ip, server_port))
s.close()
svr_running = True
except Exception as err:
if err.errno == errno.ECONNREFUSED:
print "%s not up, retrying in 2 secs" % (server_name)
time.sleep(2)
else:
raise err
# end _block_till_port_listened
# launch ifmap server
def _launch_mapserver(self, ifmap_srv_ip, ifmap_srv_port, ifmap_srv_loc):
print 'Starting IFMAP server ...'
self._ensure_port_not_listened(ifmap_srv_ip, ifmap_srv_port)
logf_out = open('ifmap-server.out', 'w')
logf_err = open('ifmap-server.err', 'w')
self._mapserver = subprocess.Popen(['java', '-jar', 'build/irond.jar'],
cwd=ifmap_srv_loc, stdout=logf_out,
stderr=logf_err)
self._block_till_port_listened(
'ifmap-server', ifmap_srv_ip, ifmap_srv_port)
# end _launch_mapserver
@ignore_exceptions
def _generate_ifmap_trace(self, oper, body):
req_id = get_trace_id()
ifmap_trace = IfmapTrace(request_id=req_id)
ifmap_trace.operation = oper
ifmap_trace.body = body
return ifmap_trace
# end _generate_ifmap_trace
def _publish_with_trace(self, oper, oper_body, async):
# safety check, if we proceed ifmap-server reports error
# asking for update|delete in publish
if not oper_body:
return
trace = self._generate_ifmap_trace(oper, oper_body)
try:
not_published = True
retry_count = 0
while not_published:
sess_id = self._mapclient.get_session_id()
if async:
method = getattr(self._mapclient, 'call_async_result')
else:
method = getattr(self._mapclient, 'call')
req_xml = PublishRequest(sess_id, oper_body)
resp_xml = method('publish', req_xml)
resp_doc = etree.parse(StringIO.StringIO(resp_xml))
err_codes = resp_doc.xpath('/env:Envelope/env:Body/ifmap:response/errorResult/@errorCode',
namespaces=self._NAMESPACES)
if err_codes:
if retry_count == 0:
log_str = 'Error publishing to ifmap, req: %s, resp: %s' \
%(req_xml, resp_xml)
self._db_client_mgr.config_log_error(log_str)
retry_count = retry_count + 1
result = self._mapclient.call('newSession',
NewSessionRequest())
sess_id = newSessionResult(result).get_session_id()
pub_id = newSessionResult(result).get_publisher_id()
self._mapclient.set_session_id(sess_id)
self._mapclient.set_publisher_id(pub_id)
else: # successful publish
not_published = False
break
# end while not_published
if retry_count:
log_str = 'Success publishing to ifmap after %d tries' \
%(retry_count)
self._db_client_mgr.config_log_error(log_str)
trace_msg(trace, 'IfmapTraceBuf', self._sandesh)
except Exception as e:
trace_msg(trace, 'IfmapTraceBuf', self._sandesh, error_msg=str(e))
log_str = 'Failed to publish %s body %s to ifmap: %s' %(oper,
oper_body, str(e))
logger.error(log_str)
self._db_client_mgr.config_log_error(log_str)
raise
# end _publish_with_trace
def _delete_id_self_meta(self, self_imid, meta_name):
mapclient = self._mapclient
del_str = str(PublishDeleteOperation(
id1=str(Identity(
name=self_imid,
type="other",
other_type="extended")),
filter=meta_name))
self._publish_with_trace('delete', del_str, async=False)
# del meta from cache and del id if this was last meta
if meta_name:
prop_name = meta_name.replace('contrail:', '')
del self._id_to_metas[self_imid][prop_name]
if not self._id_to_metas[self_imid]:
del self._id_to_metas[self_imid]
else:
del self._id_to_metas[self_imid]
# end _delete_id_self_meta
def _delete_id_pair_meta(self, id1, id2, metadata):
mapclient = self._mapclient
del_str = str(PublishDeleteOperation(
id1=str(Identity(
name=id1,
type="other",
other_type="extended")),
id2=str(Identity(
name=id2,
type="other",
other_type="extended")),
filter=metadata))
self._publish_with_trace('delete', del_str, async=False)
# del meta,id2 from cache and del id if this was last meta
def _id_to_metas_delete(id1, id2, meta_name):
if meta_name not in self._id_to_metas[id1]:
return
if not self._id_to_metas[id1][meta_name]:
del self._id_to_metas[id1][meta_name]
if not self._id_to_metas[id1]:
del self._id_to_metas[id1]
return
# if meta is prop, noop
if 'id' not in self._id_to_metas[id1][meta_name][0]:
return
self._id_to_metas[id1][meta_name] = \
[{'id':m['id'], 'meta':m['meta']} \
for m in self._id_to_metas[id1][meta_name] if m['id'] != id2]
if metadata:
meta_name = metadata.replace('contrail:', '')
# replace with remaining refs
for (id_x, id_y) in [(id1, id2), (id2, id1)]:
_id_to_metas_delete(id_x, id_y, meta_name)
else: # no meta specified remove all links from id1 to id2
for (id_x, id_y) in [(id1, id2), (id2, id1)]:
meta_names = self._id_to_metas.get(id_x, {}).keys()
for meta_name in meta_names:
_id_to_metas_delete(id_x, id_y, meta_name)
# end _delete_id_pair_meta
def _update_id_self_meta(self, update, meta):
""" update: dictionary of the type
update[<id> | 'self'] = list(metadata)
"""
if 'self' in update:
mlist = update['self']
else:
mlist = []
update['self'] = mlist
mlist.append(meta)
# end _update_id_self_meta
def _update_id_pair_meta(self, update, to_id, meta):
if to_id in update:
mlist = update[to_id]
else:
mlist = []
update[to_id] = mlist
mlist.append(meta)
# end _update_id_pair_meta
def _publish_update(self, self_imid, update):
if self_imid not in self._id_to_metas:
self._id_to_metas[self_imid] = {}
def _build_request_id_self(imid, metalist):
request = ''
for m in metalist:
request += unicode(PublishUpdateOperation(
id1=unicode(Identity(name=self_imid, type="other",
other_type="extended")),
metadata=unicode(m),
lifetime='forever'))
return request
def _build_request_id_pair(id1, id2, metalist):
request = ''
for m in metalist:
request += unicode(PublishUpdateOperation(
id1=unicode(Identity(name=id1, type="other",
other_type="extended")),
id2=unicode(Identity(name=id2, type="other",
other_type="extended")),
metadata=unicode(m),
lifetime='forever'))
return request
mapclient = self._mapclient
requests = []
if 'self' in update:
metalist = update['self']
requests.append(
_build_request_id_self(self_imid, metalist))
# remember what we wrote for diffing during next update
for m in metalist:
meta_name = m._Metadata__name.replace('contrail:', '')
self._id_to_metas[self_imid][meta_name] = [{'meta':m}]
for id2 in update:
if id2 == 'self':
continue
metalist = update[id2]
requests.append(
_build_request_id_pair(self_imid, id2, metalist))
# remember what we wrote for diffing during next update
for m in metalist:
meta_name = m._Metadata__name.replace('contrail:', '')
if meta_name in self._id_to_metas[self_imid]:
self._id_to_metas[self_imid][meta_name].append({'meta':m,
'id': id2})
else:
self._id_to_metas[self_imid][meta_name] = [{'meta':m,
'id': id2}]
if id2 not in self._id_to_metas:
self._id_to_metas[id2] = {}
if meta_name in self._id_to_metas[id2]:
self._id_to_metas[id2][meta_name].append({'meta':m,
'id': self_imid})
else:
self._id_to_metas[id2][meta_name] = [{'meta':m,
'id': self_imid}]
if self.accumulator is not None:
self.accumulator.append(requests)
self.accumulated_request_len += len(requests)
if self.accumulated_request_len >= 1024*1024:
upd_str = \
''.join(''.join(request) for request in \
self._ifmap_db.accumulator)
self._publish_with_trace('update', upd_str, async=True)
self.accumulator = []
self.accumulated_request_len = 0
else:
upd_str = ''.join(requests)
self._publish_with_trace('update', upd_str, async=True)
# end _publish_update
def _search(self, start_id, match_meta=None, result_meta=None,
max_depth=1):
# set ifmap search parmeters
srch_params = {}
srch_params['max-depth'] = str(max_depth)
if match_meta is not None:
srch_params['match-links'] = match_meta
if result_meta is not None:
# all => don't set result-filter, so server returns all id + meta
if result_meta == "all":
pass
else:
srch_params['result-filter'] = result_meta
else:
# default to return match_meta metadata types only
srch_params['result-filter'] = match_meta
mapclient = self._mapclient
srch_req = SearchRequest(mapclient.get_session_id(), start_id,
search_parameters=srch_params
)
result = mapclient.call('search', srch_req)
return result
# end _search
def _parse(self, srch_result, xpath_expr):
soap_doc = etree.parse(StringIO.StringIO(srch_result))
result_items = soap_doc.xpath(xpath_expr,
namespaces=self._NAMESPACES)
return result_items
# end _parse
def _search_and_parse(self, start_id, xpath_expr,
match_meta=None, result_meta=None, max_depth=0):
result = self._search(start_id, match_meta, result_meta, max_depth)
result_items = self._parse(result, xpath_expr)
return result_items
# end _search_and_parse
def _get_id_meta_refs(self, result_items, self_type, parent_type):
# Given parsed result items from search, returns # of idents + metadata
# referring to this ident (incl self + parent). In addition, parent's
# name and names of non-parent, non-self idents referring to this ident
# are returned. TODO should this be moved to cfgm/common
ref_cnt = 0
ref_set = set()
ref_names = ""
parent_imid = ""
imid = self._imid_handler
for r_item in result_items:
if r_item.tag == 'identity':
ident_name = r_item.attrib['name']
ident_type = cfgm_common.imid.ifmap_id_to_type(ident_name)
# No action if already encountered
if ident_name in ref_set:
continue
ref_cnt = ref_cnt + 1
ref_set.add(ident_name)
if (ident_type == self_type):
continue
if (ident_type == parent_type):
parent_imid = r_item.attrib['name']
continue
# non-parent, non-self refs
ref_names = "%s %s" % (ref_names, ident_name)
elif r_item.tag == 'metadata':
# TBI figure out meta only belonging to self
ref_cnt = ref_cnt + 1
meta_elem = r_item.getchildren()[0]
meta_name = re.sub("{.*}", "", meta_elem.tag)
ref_names = "%s %s" % (ref_names, meta_name)
return ref_cnt, parent_imid, ref_names
# end _get_id_meta_refs
def fq_name_to_ifmap_id(self, obj_type, fq_name):
return cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, fq_name)
# end fq_name_to_ifmap_id
def ifmap_id_to_fq_name(self, ifmap_id):
return cfgm_common.imid.get_fq_name_from_ifmap_id(ifmap_id)
# end ifmap_id_to_fq_name
# end class VncIfmapClient
class Imid(ImidGen):
pass
# end class Imid
class VncCassandraClient(VncCassandraClientGen):
# Name to ID mapping keyspace + tables
_UUID_KEYSPACE_NAME = 'config_db_uuid'
# TODO describe layout
_OBJ_UUID_CF_NAME = 'obj_uuid_table'
# TODO describe layout
_OBJ_FQ_NAME_CF_NAME = 'obj_fq_name_table'
# has obj uuid as rowkey; ascii as column type; <fq_name>, <ifmap_id>
# <obj_json> <child_cf_col_name> as column values
_UUID_CF_NAME = 'uuid_table'
# has type:fq_name as rowkey; ascii as column type; <obj uuid> <ifmap_id>
# as column values
_FQ_NAME_CF_NAME = 'fq_name_table'
# has ifmap_id as rowkey; ascii as column type
# <obj uuid>, <fq_name> as column values
# ifmap_id itself is contrail:<type>:<fq-name delimited by ':'>
_IFMAP_ID_CF_NAME = 'ifmap_id_table'
# has obj uuid:<child-type> as rowkey; timeuuid column type; <child obj
# uuid> as column values
_CHILDREN_CF_NAME = 'children_table'
_SUBNET_CF_NAME = 'subnet_bitmask_table'
# Useragent datastore keyspace + tables (used by quantum plugin currently)
_USERAGENT_KEYSPACE_NAME = 'useragent'
_USERAGENT_KV_CF_NAME = 'useragent_keyval_table'
def __init__(self, db_client_mgr, cass_srv_list, reset_config, db_prefix):
super(VncCassandraClient, self).__init__()
self._db_client_mgr = db_client_mgr
self._reset_config = reset_config
self._cache_uuid_to_fq_name = {}
if db_prefix:
self._db_prefix = '%s_' %(db_prefix)
else:
self._db_prefix = ''
self._cassandra_init(cass_srv_list)
# end __init__
# Helper routines for cassandra
def _cassandra_init(self, server_list):
# 1. Ensure keyspace and schema/CFs exist
# 2. Read in persisted data and publish to ifmap server
ConnectionState.update(conn_type = ConnectionType.DATABASE,
name = 'Cassandra', status = ConnectionStatus.INIT, message = '',
server_addrs = server_list)
uuid_ks_name = '%s%s' %(self._db_prefix, VncCassandraClient._UUID_KEYSPACE_NAME)
obj_uuid_cf_info = (VncCassandraClient._OBJ_UUID_CF_NAME, None)
obj_fq_name_cf_info = (VncCassandraClient._OBJ_FQ_NAME_CF_NAME, None)
uuid_cf_info = (VncCassandraClient._UUID_CF_NAME, None)
fq_name_cf_info = (VncCassandraClient._FQ_NAME_CF_NAME, None)
ifmap_id_cf_info = (VncCassandraClient._IFMAP_ID_CF_NAME, None)
subnet_cf_info = (VncCassandraClient._SUBNET_CF_NAME, None)
children_cf_info = (
VncCassandraClient._CHILDREN_CF_NAME, TIME_UUID_TYPE)
self._cassandra_ensure_keyspace(
server_list, uuid_ks_name,
[obj_uuid_cf_info, obj_fq_name_cf_info,
uuid_cf_info, fq_name_cf_info, ifmap_id_cf_info,
subnet_cf_info, children_cf_info])
useragent_ks_name = '%s%s' %(self._db_prefix, VncCassandraClient._USERAGENT_KEYSPACE_NAME)
useragent_kv_cf_info = (VncCassandraClient._USERAGENT_KV_CF_NAME, None)
self._cassandra_ensure_keyspace(server_list, useragent_ks_name,
[useragent_kv_cf_info])
uuid_pool = pycassa.ConnectionPool(
uuid_ks_name, server_list, max_overflow=-1,
use_threadlocal=True, prefill=True, pool_size=20, pool_timeout=120,
max_retries=-1, timeout=5)
useragent_pool = pycassa.ConnectionPool(
useragent_ks_name, server_list, max_overflow=-1,
use_threadlocal=True, prefill=True, pool_size=20, pool_timeout=120,
max_retries=-1, timeout=5)
rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
self._obj_uuid_cf = pycassa.ColumnFamily(
uuid_pool, VncCassandraClient._OBJ_UUID_CF_NAME,
read_consistency_level = rd_consistency,
write_consistency_level = wr_consistency)
self._obj_fq_name_cf = pycassa.ColumnFamily(
uuid_pool, VncCassandraClient._OBJ_FQ_NAME_CF_NAME,
read_consistency_level = rd_consistency,
write_consistency_level = wr_consistency)
self._useragent_kv_cf = pycassa.ColumnFamily(
useragent_pool, VncCassandraClient._USERAGENT_KV_CF_NAME,
read_consistency_level = rd_consistency,
write_consistency_level = wr_consistency)
self._subnet_cf = pycassa.ColumnFamily(
uuid_pool, VncCassandraClient._SUBNET_CF_NAME,
read_consistency_level = rd_consistency,
write_consistency_level = wr_consistency)
ConnectionState.update(conn_type = ConnectionType.DATABASE,
name = 'Cassandra', status = ConnectionStatus.UP, message = '',
server_addrs = server_list)
# end _cassandra_init
def _cassandra_ensure_keyspace(self, server_list,
keyspace_name, cf_info_list):
# Retry till cassandra is up
server_idx = 0
num_dbnodes = len(server_list)
connected = False
while not connected:
try:
cass_server = server_list[server_idx]
sys_mgr = SystemManager(cass_server)
connected = True
except Exception as e:
# TODO do only for
# thrift.transport.TTransport.TTransportException
server_idx = (server_idx + 1) % num_dbnodes
time.sleep(3)
if self._reset_config:
try:
sys_mgr.drop_keyspace(keyspace_name)
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
print "Warning! " + str(e)
try:
sys_mgr.create_keyspace(keyspace_name, SIMPLE_STRATEGY,
{'replication_factor': str(num_dbnodes)})
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
print "Warning! " + str(e)
for cf_info in cf_info_list:
try:
(cf_name, comparator_type) = cf_info
if comparator_type:
sys_mgr.create_column_family(
keyspace_name, cf_name,
comparator_type=comparator_type,
default_validation_class='UTF8Type')
else:
sys_mgr.create_column_family(keyspace_name, cf_name,
default_validation_class='UTF8Type')
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
print "Warning! " + str(e)
sys_mgr.alter_column_family(keyspace_name, cf_name,
default_validation_class='UTF8Type')
# end _cassandra_ensure_keyspace
def _create_prop(self, bch, obj_uuid, prop_name, prop_val):
bch.insert(obj_uuid, {'prop:%s' % (prop_name): json.dumps(prop_val)})
# end _create_prop
def _update_prop(self, bch, obj_uuid, prop_name, new_props):
if new_props[prop_name] is None:
bch.remove(obj_uuid, columns=['prop:' + prop_name])
else:
bch.insert(
obj_uuid,
{'prop:' + prop_name: json.dumps(new_props[prop_name])})
# prop has been accounted for, remove so only new ones remain
del new_props[prop_name]
# end _update_prop
def _create_child(self, bch, parent_type, parent_uuid,
child_type, child_uuid):
child_col = {'children:%s:%s' %
(child_type, child_uuid): json.dumps(None)}
bch.insert(parent_uuid, child_col)
parent_col = {'parent:%s:%s' %
(parent_type, parent_uuid): json.dumps(None)}
bch.insert(child_uuid, parent_col)
# end _create_child
def _read_child(self, result, obj_uuid, child_type,
child_uuid, child_tstamp):
if '%ss' % (child_type) not in result:
result['%ss' % (child_type)] = []
child_info = {}
child_info['to'] = self.uuid_to_fq_name(child_uuid)
child_info['href'] = self._db_client_mgr.generate_url(
child_type, child_uuid)
child_info['uuid'] = child_uuid
child_info['tstamp'] = child_tstamp
result['%ss' % (child_type)].append(child_info)
# end _read_child
def _delete_child(self, bch, parent_type, parent_uuid,
child_type, child_uuid):
child_col = {'children:%s:%s' %
(child_type, child_uuid): json.dumps(None)}
bch.remove(parent_uuid, columns=[
'children:%s:%s' % (child_type, child_uuid)])
# end _delete_child
def _create_ref(self, bch, obj_type, obj_uuid, ref_type,
ref_uuid, ref_data):
bch.insert(
obj_uuid, {'ref:%s:%s' %
(ref_type, ref_uuid): json.dumps(ref_data)})
if obj_type == ref_type:
bch.insert(
ref_uuid, {'ref:%s:%s' %
(obj_type, obj_uuid): json.dumps(ref_data)})
else:
bch.insert(
ref_uuid, {'backref:%s:%s' %
(obj_type, obj_uuid): json.dumps(ref_data)})
# end _create_ref
def _read_ref(self, result, obj_uuid, ref_type, ref_uuid, ref_data_json):
if '%s_refs' % (ref_type) not in result:
result['%s_refs' % (ref_type)] = []
ref_data = json.loads(ref_data_json)
ref_info = {}
try:
ref_info['to'] = self.uuid_to_fq_name(ref_uuid)
except NoIdError as e:
ref_info['to'] = ['ERROR']
if ref_data:
try:
ref_info['attr'] = ref_data['attr']
except KeyError:
# TODO remove backward compat old format had attr directly
ref_info['attr'] = ref_data
ref_info['href'] = self._db_client_mgr.generate_url(
ref_type, ref_uuid)
ref_info['uuid'] = ref_uuid
result['%s_refs' % (ref_type)].append(ref_info)
# end _read_ref
def _read_back_ref(self, result, obj_uuid, back_ref_type,
back_ref_uuid, back_ref_data_json):
if '%s_back_refs' % (back_ref_type) not in result:
result['%s_back_refs' % (back_ref_type)] = []
back_ref_info = {}
back_ref_info['to'] = self.uuid_to_fq_name(back_ref_uuid)
back_ref_data = json.loads(back_ref_data_json)
if back_ref_data:
try:
back_ref_info['attr'] = back_ref_data['attr']
except KeyError:
# TODO remove backward compat old format had attr directly
back_ref_info['attr'] = back_ref_data
back_ref_info['href'] = self._db_client_mgr.generate_url(
back_ref_type, back_ref_uuid)
back_ref_info['uuid'] = back_ref_uuid
result['%s_back_refs' % (back_ref_type)].append(back_ref_info)
# end _read_back_ref
def _update_ref(self, bch, obj_type, obj_uuid, ref_type,
old_ref_uuid, new_ref_infos):
if ref_type not in new_ref_infos:
# update body didn't touch this type, nop
return
if old_ref_uuid not in new_ref_infos[ref_type]:
# remove old ref
bch.remove(obj_uuid, columns=[
'ref:%s:%s' % (ref_type, old_ref_uuid)])
if obj_type == ref_type:
bch.remove(old_ref_uuid, columns=[
'ref:%s:%s' % (obj_type, obj_uuid)])
else:
bch.remove(old_ref_uuid, columns=[
'backref:%s:%s' % (obj_type, obj_uuid)])
self._db_client_mgr.dbe_cache_invalidate({'uuid':
old_ref_uuid})
else:
# retain old ref with new ref attr
new_ref_data = new_ref_infos[ref_type][old_ref_uuid]
bch.insert(
obj_uuid,
{'ref:%s:%s' %
(ref_type, old_ref_uuid): json.dumps(new_ref_data)})
if obj_type == ref_type:
bch.insert(
old_ref_uuid,
{'ref:%s:%s' %
(obj_type, obj_uuid): json.dumps(new_ref_data)})
else:
bch.insert(
old_ref_uuid,
{'backref:%s:%s' %
(obj_type, obj_uuid): json.dumps(new_ref_data)})
self._db_client_mgr.dbe_cache_invalidate({'uuid':
old_ref_uuid})
# uuid has been accounted for, remove so only new ones remain
del new_ref_infos[ref_type][old_ref_uuid]
# end _update_ref
def _delete_ref(self, bch, obj_type, obj_uuid, ref_type, ref_uuid):
send = False
if bch is None:
send = True
bch = self._cassandra_db._obj_uuid_cf.batch()
bch.remove(obj_uuid, columns=['ref:%s:%s' % (ref_type, ref_uuid)])
if obj_type == ref_type:
bch.remove(ref_uuid, columns=[
'ref:%s:%s' % (obj_type, obj_uuid)])
else:
bch.remove(ref_uuid, columns=[
'backref:%s:%s' % (obj_type, obj_uuid)])
if send:
bch.send()
# end _delete_ref
def is_latest(self, id, tstamp):
id_perms_json = self._obj_uuid_cf.get(
id, columns=['prop:id_perms'])['prop:id_perms']
id_perms = json.loads(id_perms_json)
if id_perms['last_modified'] == tstamp:
return True
else:
return False
# end is_latest
def cache_uuid_to_fq_name_add(self, id, fq_name):
self._cache_uuid_to_fq_name[id] = fq_name
# end cache_uuid_to_fq_name_add
def cache_uuid_to_fq_name_del(self, id):
try:
del self._cache_uuid_to_fq_name[id]
except KeyError:
pass
# end cache_uuid_to_fq_name_del
def update_last_modified(self, bch, obj_uuid, id_perms=None):
if id_perms is None:
id_perms = json.loads(self._obj_uuid_cf.get(obj_uuid, ['prop:id_perms'])['prop:id_perms'])
id_perms['last_modified'] = datetime.datetime.utcnow().isoformat()
self._update_prop(bch, obj_uuid, 'id_perms', {'id_perms': id_perms})
# end update_last_modified
def uuid_to_fq_name(self, id):
try:
#TODO remove from cache on delete_notify
return self._cache_uuid_to_fq_name[id]
except KeyError:
try:
fq_name_json = self._obj_uuid_cf.get(
id, columns=['fq_name'])['fq_name']
except pycassa.NotFoundException:
raise NoIdError(id)
fq_name = json.loads(fq_name_json)
self.cache_uuid_to_fq_name_add(id, fq_name)
return fq_name
# end uuid_to_fq_name
def uuid_to_obj_type(self, id):
try:
type_json = self._obj_uuid_cf.get(id, columns=['type'])['type']
except pycassa.NotFoundException:
raise NoIdError(id)
return json.loads(type_json)
# end uuid_to_fq_name
def fq_name_to_uuid(self, obj_type, fq_name):
method_name = obj_type.replace('-', '_')
fq_name_str = ':'.join(fq_name)
col_start = '%s:' % (utils.encode_string(fq_name_str))
col_fin = '%s;' % (utils.encode_string(fq_name_str))
try:
col_info_iter = self._obj_fq_name_cf.xget(
method_name, column_start=col_start, column_finish=col_fin)
except pycassa.NotFoundException:
raise NoIdError('%s %s' % (obj_type, fq_name))
col_infos = list(col_info_iter)
if len(col_infos) == 0:
raise NoIdError('%s %s' % (obj_type, fq_name))
for (col_name, col_val) in col_infos:
obj_uuid = col_name.split(':')[-1]
return obj_uuid
# end fq_name_to_uuid
def uuid_to_obj_dict(self, id):
try:
obj_cols = self._obj_uuid_cf.get(id)
except pycassa.NotFoundException:
raise NoIdError(id)
return obj_cols
# end uuid_to_obj_dict
def uuid_to_obj_perms(self, id):
try:
id_perms_json = self._obj_uuid_cf.get(
id, columns=['prop:id_perms'])['prop:id_perms']
id_perms = json.loads(id_perms_json)
except pycassa.NotFoundException:
raise NoIdError(id)
return id_perms
# end uuid_to_obj_perms