Skip to content

Commit

Permalink
1. Rename multi_tenancy to aaa_mode for analytics API
Browse files Browse the repository at this point in the history
Handle keystone v2 and v3 token infos returned by
VNC API. Enable cloud-admin-only aaa_mode by default

Change analytics DB and underlay to overlay mapper to
use local admin port when quering opserver

Do not cache auth_token in vnc lib

Closes-Bug: #1599654

2. Changes to bring analytics authenticated access in sync with config

  1. Rename aaa_mode value cloud-admin-only to cloud-admin
  2. CLOUD_ADMIN_ROLE defaults to admin instead of cloud-admin

Partial-Bug: #1607563
(cherry picked from commit 42db6e3)

3. Fix missing import of OpServerUtils in analytics_db.py

Closes-Bug: #1609054
(cherry picked from commit cf5f056)

4. Remove aaa_mode value cloud-admin-only

Closes-Bug: #1609987
(cherry picked from commit 58a8a0f)

5. Keep on trying to create VNC API client from analytics API

The gevent that creates the VNC API client was exiting due to
authentication failure exception. Changed code to handle all
exceptions and keep on trying to create the API client. The
node status will show the API connection down in case we are
not able to create the VNC API client.

Closes-Bug: #1611158
(cherry picked from commit 8072aa5)

6. Change the obj-perms API to pass in the user token in HTTP headers

With PKI tokens, when user token was passed in query parameters for
obj-perms API the token was getting truncated. Changed the API
to accept user token in X-USER-TOKEN HTTP header.

Closes-Bug: #1614376

Conflicts:
	src/config/api-server/tests/test_perms2.py

7. Fix issue with retrieving the db usage info in analytics-api

Closes-Bug: #1614285
(cherry picked from commit 0ec8bf7)

Change-Id: Id715e40fe3996964b5298da1cd63c248243071dd
  • Loading branch information
Megh Bhatt committed Aug 22, 2016
1 parent 21657ae commit 051a2c1
Show file tree
Hide file tree
Showing 9 changed files with 88 additions and 66 deletions.
16 changes: 8 additions & 8 deletions src/api-lib/vnc_api.py
Expand Up @@ -1090,8 +1090,6 @@ def virtual_network_subnet_ip_count(self, vnobj, subnet_list):
#end virtual_network_subnet_ip_count

def get_auth_token(self):
if self._auth_token:
return self._auth_token
self._headers = self._authenticate(headers=self._headers)
return self._auth_token

Expand Down Expand Up @@ -1200,14 +1198,16 @@ def obj_perms(self, token, obj_uuid=None):
validate user token. Optionally, check token authorization for an object.
rv {'token_info': <token-info>, 'permissions': 'RWX'}
"""
query = 'token=%s' % token
if obj_uuid:
query += '&uuid=%s' % obj_uuid
self._headers['X-USER-TOKEN'] = token
query = 'uuid=%s' % obj_uuid if obj_uuid else ''
try:
rv = self._request_server(rest.OP_GET, "/obj-perms", data=query)
return json.loads(rv)
rv_json = self._request_server(rest.OP_GET, "/obj-perms", data=query)
rv = json.loads(rv_json)
except PermissionDenied:
return None
rv = None
finally:
del self._headers['X-USER-TOKEN']
return rv

# change object ownsership
def chown(self, obj_uuid, owner):
Expand Down
29 changes: 9 additions & 20 deletions src/config/api-server/tests/test_perms2.py
Expand Up @@ -25,6 +25,7 @@
import inspect
import requests
import stevedore
import bottle

from vnc_api.vnc_api import *
import keystoneclient.exceptions as kc_exceptions
Expand Down Expand Up @@ -91,10 +92,6 @@ def __init__(self, apis_ip, apis_port, kc, name, password, role, project):

role_dict = {role.name:role for role in kc.roles.list()}
user_dict = {user.name:user for user in kc.users.list()}
self.user = user_dict[self.name]

# update tenant ID (needed if user entry already existed in keystone)
self.user.tenant_id = tenant.id

logger.info( 'Adding user %s with role %s to tenant %s' \
% (name, role, project))
Expand All @@ -104,7 +101,7 @@ def __init__(self, apis_ip, apis_port, kc, name, password, role, project):
pass

self.vnc_lib = MyVncApi(username = self.name, password = self.password,
tenant_name = self.project,
tenant_name = self.project, tenant_id = self.project_uuid, user_role = role,
api_server_host = apis_ip, api_server_port = apis_port)
# end __init__

Expand All @@ -114,9 +111,7 @@ def api_acl_name(self):
return rg_name

def check_perms(self, obj_uuid):
query = 'token=%s&uuid=%s' % (self.vnc_lib.get_token(), obj_uuid)
rv = self.vnc_lib._request_server(rest.OP_GET, "/obj-perms", data=query)
rv = json.loads(rv)
rv = self.vnc_lib.obj_perms(self.vnc_lib.get_auth_token(), obj_uuid)
return rv['permissions']

# display resource id-perms
Expand Down Expand Up @@ -294,30 +289,24 @@ def token_from_user_info(user_name, tenant_name, domain_name, role_name,

class MyVncApi(VncApi):
def __init__(self, username = None, password = None,
tenant_name = None, api_server_host = None, api_server_port = None):
tenant_name = None, tenant_id = None, user_role = None,
api_server_host = None, api_server_port = None):
self._username = username
self._tenant_name = tenant_name
self.auth_token = None
self._kc = keystone.Client(username='admin', password='contrail123',
tenant_name='admin',
auth_url='http://127.0.0.1:5000/v2.0')
self._tenant_id = tenant_id
self._user_role = user_role
VncApi.__init__(self, username = username, password = password,
tenant_name = tenant_name, api_server_host = api_server_host,
api_server_port = api_server_port)

def _authenticate(self, response=None, headers=None):
role_name = self._kc.user_role(self._username, self._tenant_name)
uobj = self._kc.users.get(self._username)
rval = token_from_user_info(self._username, self._tenant_name,
'default-domain', role_name, uobj.tenant_id)
'default-domain', self._user_role, self._tenant_id)
new_headers = headers or {}
new_headers['X-AUTH-TOKEN'] = rval
self.auth_token = rval
self._auth_token = rval
return new_headers

def get_token(self):
return self.auth_token

# This is needed for VncApi._authenticate invocation from within Api server.
# We don't have access to user information so we hard code admin credentials.
def ks_admin_authenticate(self, response=None, headers=None):
Expand Down
4 changes: 2 additions & 2 deletions src/config/api-server/vnc_cfg_api_server.py
Expand Up @@ -1692,10 +1692,10 @@ def documentation_http_get(self, filename):
# end documentation_http_get

def obj_perms_http_get(self):
if 'token' not in get_request().query:
if 'HTTP_X_USER_TOKEN' not in get_request().environ:
raise cfgm_common.exceptions.HttpError(
400, 'User token needed for validation')
user_token = get_request().query.token.encode("ascii")
user_token = get_request().environ['HTTP_X_USER_TOKEN'].encode("ascii")

# get permissions in internal context
try:
Expand Down
16 changes: 11 additions & 5 deletions src/opserver/analytics_db.py
Expand Up @@ -34,6 +34,7 @@
from cassandra.query import named_tuple_factory
from cassandra.query import PreparedStatement, tuple_factory
import platform
from opserver_util import OpServerUtils

class AnalyticsDb(object):
def __init__(self, logger, cassandra_server_list,
Expand Down Expand Up @@ -399,20 +400,25 @@ def db_purge(self, purge_cutoff, purge_id):
return self.db_purge_cql(purge_cutoff, purge_id)
# end db_purge

def get_dbusage_info(self, rest_api_ip, rest_api_port):
def get_dbusage_info(self, ip, port, user, password):
"""Collects database usage information from all db nodes
Returns:
A dictionary with db node name as key and db usage in % as value
"""

to_return = {}
try:
uve_url = "http://" + rest_api_ip + ":" + str(rest_api_port) + "/analytics/uves/database-nodes?cfilt=DatabaseUsageInfo"
node_dburls = json.loads(urllib2.urlopen(uve_url).read())
uve_url = "http://" + ip + ":" + str(port) + \
"/analytics/uves/database-nodes?cfilt=DatabaseUsageInfo"
data = OpServerUtils.get_url_http(uve_url, user, password)
node_dburls = json.loads(data.text)

for node_dburl in node_dburls:
# calculate disk usage percentage for analytics in each cassandra node
db_uve_state = json.loads(urllib2.urlopen(node_dburl['href']).read())
# calculate disk usage percentage for analytics in each
# cassandra node
db_uve_data = OpServerUtils.get_url_http(node_dburl['href'],
user, password)
db_uve_state = json.loads(db_uve_data.text)
db_usage_in_perc = (100*
float(db_uve_state['DatabaseUsageInfo']['database_usage'][0]['analytics_db_size_1k'])/
float(db_uve_state['DatabaseUsageInfo']['database_usage'][0]['disk_space_available_1k'] +
Expand Down
26 changes: 14 additions & 12 deletions src/opserver/opserver.py
Expand Up @@ -47,7 +47,8 @@
ModuleCategoryMap, Module2NodeType, NodeTypeNames, ModuleIds,\
INSTANCE_ID_DEFAULT, COLLECTOR_DISCOVERY_SERVICE_NAME,\
ANALYTICS_API_SERVER_DISCOVERY_SERVICE_NAME, ALARM_GENERATOR_SERVICE_NAME, \
OpServerAdminPort, CLOUD_ADMIN_ROLE
OpServerAdminPort, CLOUD_ADMIN_ROLE, APIAAAModes, \
AAA_MODE_CLOUD_ADMIN, AAA_MODE_NO_AUTH
from sandesh.viz.constants import _TABLES, _OBJECT_TABLES,\
_OBJECT_TABLE_SCHEMA, _OBJECT_TABLE_COLUMN_VALUES, \
_STAT_TABLES, STAT_OBJECTID_FIELD, STAT_VT_PREFIX, \
Expand Down Expand Up @@ -814,7 +815,7 @@ def _parse_args(self, args_str=' '.join(sys.argv[1:])):
'partitions' : 15,
'sandesh_send_rate_limit': SandeshSystem. \
get_sandesh_send_rate_limit(),
'multi_tenancy' : False,
'aaa_mode' : AAA_MODE_CLOUD_ADMIN,
'api_server' : '127.0.0.1:8082',
'admin_port' : OpServerAdminPort,
'cloud_admin_role' : CLOUD_ADMIN_ROLE,
Expand Down Expand Up @@ -848,9 +849,6 @@ def _parse_args(self, args_str=' '.join(sys.argv[1:])):
config.read(args.conf_file)
if 'DEFAULTS' in config.sections():
defaults.update(dict(config.items("DEFAULTS")))
if 'multi_tenancy' in config.options('DEFAULTS'):
defaults['multi_tenancy'] = config.getboolean(
'DEFAULTS', 'multi_tenancy')
if 'REDIS' in config.sections():
redis_opts.update(dict(config.items('REDIS')))
if 'DISCOVERY' in config.sections():
Expand Down Expand Up @@ -946,8 +944,8 @@ def _parse_args(self, args_str=' '.join(sys.argv[1:])):
help="Sandesh send rate limit in messages/sec")
parser.add_argument("--cloud_admin_role",
help="Name of cloud-admin role")
parser.add_argument("--multi_tenancy", action="store_true",
help="Validate resource permissions (implies token validation)")
parser.add_argument("--aaa_mode", choices=APIAAAModes,
help="AAA mode")
parser.add_argument("--auth_host",
help="IP address of keystone server")
parser.add_argument("--auth_protocol",
Expand Down Expand Up @@ -983,7 +981,7 @@ def _parse_args(self, args_str=' '.join(sys.argv[1:])):
self._args.auth_host, self._args.auth_port)
auth_conf_info['api_server_use_ssl'] = False
auth_conf_info['cloud_admin_access_only'] = \
self._args.multi_tenancy
False if self._args.aaa_mode == AAA_MODE_NO_AUTH else True
auth_conf_info['cloud_admin_role'] = self._args.cloud_admin_role
auth_conf_info['admin_port'] = self._args.admin_port
api_server_info = self._args.api_server.split(':')
Expand Down Expand Up @@ -1270,8 +1268,10 @@ def _query(self, request):

if tabl == OVERLAY_TO_UNDERLAY_FLOW_MAP:
overlay_to_underlay_map = OverlayToUnderlayMapper(
request.json, self._args.host_ip,
self._args.rest_api_port, self._logger)
request.json, 'localhost',
self._args.auth_conf_info['admin_port'],
self._args.auth_conf_info['admin_user'],
self._args.auth_conf_info['admin_password'], self._logger)
try:
yield overlay_to_underlay_map.process_query()
except OverlayToUnderlayMapperError as e:
Expand Down Expand Up @@ -2096,8 +2096,10 @@ def _auto_purge(self):
while True:
trigger_purge = False
db_node_usage = self._analytics_db.get_dbusage_info(
self._args.rest_api_ip,
self._args.rest_api_port)
'localhost',
self._args.auth_conf_info['admin_port'],
self._args.auth_conf_info['admin_user'],
self._args.auth_conf_info['admin_password'])
self._logger.info("node usage:" + str(db_node_usage) )
self._logger.info("threshold:" + str(self._args.db_purge_threshold))

Expand Down
7 changes: 5 additions & 2 deletions src/opserver/overlay_to_underlay_mapper.py
Expand Up @@ -27,10 +27,12 @@ class OverlayToUnderlayMapperError(Exception):
class OverlayToUnderlayMapper(object):

def __init__(self, query_json, analytics_api_ip,
analytics_api_port, logger):
analytics_api_port, user, password, logger):
self.query_json = query_json
self._analytics_api_ip = analytics_api_ip
self._analytics_api_port = analytics_api_port
self._user = user
self._password = password
self._logger = logger
if self.query_json is not None:
self._start_time = self.query_json['start_time']
Expand Down Expand Up @@ -233,7 +235,8 @@ def _send_query(self, query):
self._logger.debug('Sending query: %s' % (query))
opserver_url = OpServerUtils.opserver_query_url(self._analytics_api_ip,
str(self._analytics_api_port))
resp = OpServerUtils.post_url_http(opserver_url, query, True)
resp = OpServerUtils.post_url_http(opserver_url, query, self._user,
self._password, True)
try:
resp = json.loads(resp)
value = resp['value']
Expand Down
27 changes: 17 additions & 10 deletions src/opserver/test/test_overlay_to_underlay_mapper.py
Expand Up @@ -235,7 +235,7 @@ def test_get_overlay_flow_data_noerror(self, mock_send_query,
overlay_to_underlay_mapper = \
OverlayToUnderlayMapper(
item['input']['overlay_to_underlay_map_query'],
None, None, logging)
None, None, None, None, logging)
self.assertEqual(item['output']['flowrecord_data'],
overlay_to_underlay_mapper._get_overlay_flow_data())
args, _ = overlay_to_underlay_mapper._send_query.call_args
Expand Down Expand Up @@ -296,7 +296,7 @@ def test_get_overlay_flow_data_raise_exception(self):

for query in queries:
overlay_to_underlay_mapper = \
OverlayToUnderlayMapper(query, None, None, logging)
OverlayToUnderlayMapper(query, None, None, None, None, logging)
self.assertRaises(_OverlayToFlowRecordFieldsNameError,
overlay_to_underlay_mapper._get_overlay_flow_data)
# end test_get_overlay_flow_data_raise_exception
Expand Down Expand Up @@ -618,7 +618,7 @@ def test_get_underlay_flow_data_noerror(self, mock_send_query,
overlay_to_underlay_mapper = \
OverlayToUnderlayMapper(
item['input']['overlay_to_underlay_map_query'],
None, None, logging)
None, None, None, None, logging)
self.assertEqual(item['output']['uflow_data'],
overlay_to_underlay_mapper._get_underlay_flow_data(
item['input']['flow_record_data']))
Expand Down Expand Up @@ -683,7 +683,7 @@ def test_get_underlay_flow_data_raise_exception(self):
for query in queries:
overlay_to_underlay_mapper = \
OverlayToUnderlayMapper(query['overlay_to_underlay_map_query'],
None, None, logging)
None, None, None, None, logging)
self.assertRaises(_UnderlayToUFlowDataFieldsNameError,
overlay_to_underlay_mapper._get_underlay_flow_data,
query['flow_record_data'])
Expand All @@ -696,6 +696,8 @@ def test_send_query_no_error(self, mock_post_url_http):
'input': {
'analytics_api_ip': '10.10.10.1',
'analytics_api_port': 8081,
'username': 'admin',
'password': 'admin123',
'query': {
'table': FLOW_TABLE,
'start_time': 'now-10m', 'end_time': 'now-5m',
Expand All @@ -714,6 +716,8 @@ def test_send_query_no_error(self, mock_post_url_http):
'input': {
'analytics_api_ip': '192.168.10.1',
'analytics_api_port': 8090,
'username': 'admin',
'password': 'admin123',
'query': {
'table': 'StatTable.UFlowData.flow',
'start_time': 1416275005000000,
Expand Down Expand Up @@ -751,11 +755,14 @@ def test_send_query_no_error(self, mock_post_url_http):
for item in input_output_list:
overlay_to_underlay_mapper = \
OverlayToUnderlayMapper(None, item['input']['analytics_api_ip'],
item['input']['analytics_api_port'], logging)
item['input']['analytics_api_port'],
item['input']['username'], item['input']['password'],
logging)
self.assertEqual(overlay_to_underlay_mapper._send_query(
item['input']['query']), item['output']['response']['value'])
OpServerUtils.post_url_http.assert_called_with(
item['output']['query_url'], item['input']['query'], True)
item['output']['query_url'], item['input']['query'],
item['input']['username'], item['input']['password'], True)
# end test_send_query_no_error

@mock.patch('opserver.overlay_to_underlay_mapper.OpServerUtils.post_url_http')
Expand Down Expand Up @@ -810,7 +817,7 @@ def test_send_query_raise_exception(self, mock_post_url_http):
for item in queries:
overlay_to_underlay_mapper = \
OverlayToUnderlayMapper(None, item['analytics_api_ip'],
item['analytics_api_port'], logging)
item['analytics_api_port'], None, None, logging)
self.assertRaises(_QueryError,
overlay_to_underlay_mapper._send_query, item['query'])
# end test_send_query_raise_exception
Expand Down Expand Up @@ -888,7 +895,7 @@ def test_send_response_no_error(self):
overlay_to_underlay_mapper = \
OverlayToUnderlayMapper(
item['input']['overlay_to_underlay_map_query'],
None, None, logging)
None, None, None, None, logging)
self.assertEqual(item['output']['underlay_response'],
json.loads(overlay_to_underlay_mapper._send_response(
item['input']['uflow_data'])))
Expand All @@ -911,7 +918,7 @@ def test_send_response_raise_exception(self):
for item in input_list:
overlay_to_underlay_mapper = \
OverlayToUnderlayMapper(item['overlay_to_underlay_map_query'],
None, None, logging)
None, None, None, None, logging)
self.assertRaises(_UnderlayToUFlowDataFieldsNameError,
overlay_to_underlay_mapper._send_response, item['uflow_data'])
# end test_send_response_raise_exception
Expand Down Expand Up @@ -956,7 +963,7 @@ def test_process_query(self, mock_get_overlay_flow_data,
[json.dumps(item['response']) for item in test_data]
for item in test_data:
overlay_to_underlay_mapper = \
OverlayToUnderlayMapper(None, None, None, logging)
OverlayToUnderlayMapper(None, None, None, None, None, logging)
self.assertEqual(item['response'],
json.loads(overlay_to_underlay_mapper.process_query()))
overlay_to_underlay_mapper._get_overlay_flow_data.called_with()
Expand Down

0 comments on commit 051a2c1

Please sign in to comment.