From a6be817f369ca6f2e30f2c3a39079e8a9133e5e9 Mon Sep 17 00:00:00 2001 From: Divakar Date: Wed, 17 Feb 2016 15:22:01 +0530 Subject: [PATCH] Correct the metadata string of fip-pool to vn The metadata mentioned in xsd for the link between floating-ip-pool to VN is "virtual-network-floating-ip-pool" and Agent is adding a rule in dependency manager for VMI as "floating-ip-pool-virtual-network". This is resulting in no InterestedEvent in dependency tracker if the link added between VN-FIP is the last link in that VMI. As a fix, metadata is corrected in Agent. Change-Id: I484099d8c843493cfcdb618a3165819f9eab5d08 closes-bug: #1544788 Handle vxlan change for local_vm peer in l2 flood. Change-Id: I3eb43c9541eec6ef3d343a8aa041b527033c24fe Closes-bug: 1529665 Add ShowRouteAggregateSummaryReq introspect command Change-Id: Ib633b018798e19c7fc73f4bebaef10283aef1a89 Related-bug: 1500698 database-purge request fails if analytics-api starts before cassandra analytics-api creates cql session only on creation of AnalyticsDb object. Therefore, if analytics-api starts before cassandra becomes active, then database-purge request fails. This patch ensures that the cql session is created upon purge request if cql session is None. Change-Id: Ic38d82cd1d614cd80af917ff205c3a63b95d6b92 Closes-Bug: #1546435 LBaaS scheduler should not use hardcoded tenant name This fix removes the hardcoded 'admin' tenant name from lbaas scheduler and reads the tenant from config. It also fixes the default admin_tenant_name in svc_monitor.py to 'admin'. Closes-Bug: #1543296 Change-Id: If5e558e27ef6fd81d81cf56a317f39dbac2854ce Fix error message The VM uuid should be shown instead of the VN uuid Change-Id: I56a65f5aabbc441139784e672babcf5e7508b880 Closes-Bug: #1546107 Adding UVE information for Health Check Virtual Machine Interface UVE will also carry additional information on the HealthCheck instances running on it, its state and running status Closes-Bug: 1546390 Change-Id: Ib6f2dac3e80544852359b8e7633343011b9f7a4e Agent changes to accept port add requests without IP address. This is done for both REST and thrift interface. Also add a flag to Flow info maintained by FlowStatsCollector to indicate whether a delete was enqueued for that flow or not. Change-Id: Ibe1cd81bcd71d42919995860e846383740bdfcdc Closes-Bug: #1546939 --- src/bgp/routing-instance/iroute_aggregator.h | 4 +- .../routing-instance/route_aggregate.sandesh | 16 +- .../route_aggregate_internal.sandesh | 5 + src/bgp/routing-instance/route_aggregator.cc | 24 ++- src/bgp/routing-instance/route_aggregator.h | 4 +- .../routing-instance/show_route_aggregate.cc | 190 +++++++++++++----- src/bgp/test/route_aggregator_test.cc | 39 ++-- .../scheduler/vrouter_scheduler.py | 3 +- .../svc-monitor/svc_monitor/svc_monitor.py | 2 +- src/opserver/analytics_db.py | 43 ++-- src/opserver/opserver.py | 8 +- .../openstack/instance_service_server.cc | 5 +- src/vnsw/agent/oper/health_check.cc | 7 +- src/vnsw/agent/oper/health_check.h | 2 + .../agent/oper/ifmap_dependency_manager.cc | 2 +- src/vnsw/agent/oper/interface.h | 1 + src/vnsw/agent/oper/multicast.cc | 5 + src/vnsw/agent/oper/test/test_intf.cc | 2 +- src/vnsw/agent/oper/vm_interface.cc | 7 +- src/vnsw/agent/oper/vm_interface.h | 1 + src/vnsw/agent/pkt/test/test_pkt_fip.cc | 49 ++++- src/vnsw/agent/port_ipc/port_ipc_handler.cc | 7 +- src/vnsw/agent/test/test_cmn_util.h | 1 + src/vnsw/agent/test/test_l2route.cc | 31 +++ src/vnsw/agent/test/test_util.cc | 8 +- src/vnsw/agent/uve/interface.sandesh | 9 + src/vnsw/agent/uve/interface_uve_table.cc | 19 ++ .../vrouter/flow_stats/flow_export_info.cc | 6 +- .../vrouter/flow_stats/flow_export_info.h | 3 + .../vrouter/flow_stats/flow_stats.sandesh | 1 + .../flow_stats/flow_stats_collector.cc | 14 +- .../vrouter/flow_stats/flow_stats_collector.h | 2 +- 32 files changed, 389 insertions(+), 131 deletions(-) diff --git a/src/bgp/routing-instance/iroute_aggregator.h b/src/bgp/routing-instance/iroute_aggregator.h index 44601017e76..f36725da8f3 100644 --- a/src/bgp/routing-instance/iroute_aggregator.h +++ b/src/bgp/routing-instance/iroute_aggregator.h @@ -23,8 +23,8 @@ class IRouteAggregator { virtual bool IsAggregateRoute(const BgpRoute *route) const = 0; virtual bool IsContributingRoute(const BgpRoute *route) const = 0; - virtual bool FillAggregateRouteInfo(RoutingInstance *ri, - AggregateRouteEntriesInfo *info) const = 0; + virtual bool FillAggregateRouteInfo(AggregateRouteEntriesInfo *info, + bool summary) const = 0; private: friend class RouteAggregatorTest; diff --git a/src/bgp/routing-instance/route_aggregate.sandesh b/src/bgp/routing-instance/route_aggregate.sandesh index 31e2b7c83da..913f76d53d7 100644 --- a/src/bgp/routing-instance/route_aggregate.sandesh +++ b/src/bgp/routing-instance/route_aggregate.sandesh @@ -9,15 +9,25 @@ struct AggregateRouteInfo { 1: string prefix; 2: bgp_peer.ShowRouteBrief aggregate_rt; 3: string nexthop; - 4: list contributors; - 5: bool deleted; + 4: bool deleted; + 5: optional list contributors; } struct AggregateRouteEntriesInfo { - 1: string ri_name; + 1: string name (link="ShowRouteAggregateReq"); 2: list aggregate_route_list; } +response sandesh ShowRouteAggregateSummaryResp { + 1: list aggregate_route_entries; + 2: optional string next_batch (link="ShowRouteAggregateSummaryReqIterate", + link_title="next_batch"); +} + +request sandesh ShowRouteAggregateSummaryReq { + 1: string search_string; +} + response sandesh ShowRouteAggregateResp { 1: list aggregate_route_entries; 2: optional string next_batch (link="ShowRouteAggregateReqIterate", diff --git a/src/bgp/routing-instance/route_aggregate_internal.sandesh b/src/bgp/routing-instance/route_aggregate_internal.sandesh index c95766b524a..834453ad1fb 100644 --- a/src/bgp/routing-instance/route_aggregate_internal.sandesh +++ b/src/bgp/routing-instance/route_aggregate_internal.sandesh @@ -1,6 +1,11 @@ /* * Copyright (c) 2015 Juniper Networks, Inc. All rights reserved. */ + request sandesh ShowRouteAggregateReqIterate { 1: string iterate_info; } + +request sandesh ShowRouteAggregateSummaryReqIterate { + 1: string iterate_info; +} diff --git a/src/bgp/routing-instance/route_aggregator.cc b/src/bgp/routing-instance/route_aggregator.cc index 34d27423df5..83474ba975e 100644 --- a/src/bgp/routing-instance/route_aggregator.cc +++ b/src/bgp/routing-instance/route_aggregator.cc @@ -236,7 +236,7 @@ class AggregateRoute : public ConditionMatch { return contributors_[part_id].empty(); } - void FillShowInfo(AggregateRouteInfo *info) const; + void FillShowInfo(AggregateRouteInfo *info, bool summary) const; private: RoutingInstance *routing_instance_; @@ -481,7 +481,8 @@ void AggregateRoute::set_aggregate_route(BgpRoute *aggregate) { } template -void AggregateRoute::FillShowInfo(AggregateRouteInfo *info) const { +void AggregateRoute::FillShowInfo(AggregateRouteInfo *info, + bool summary) const { BgpTable *table = bgp_table(); info->set_deleted(deleted()); info->set_prefix(aggregate_route_prefix_.ToString()); @@ -493,6 +494,9 @@ void AggregateRoute::FillShowInfo(AggregateRouteInfo *info) const { info->set_nexthop(nexthop_.to_string()); + if (summary) + return; + std::vector contributor_list; BOOST_FOREACH(const RouteList &list, contribute_route_list()) { BOOST_FOREACH(BgpRoute *rt, list) { @@ -688,18 +692,18 @@ bool RouteAggregator::IsContributingRoute(const BgpRoute *route) const { } template -bool RouteAggregator::FillAggregateRouteInfo(RoutingInstance *ri, - AggregateRouteEntriesInfo *info) const { - if (aggregate_route_map().empty()) return false; - - info->set_ri_name(ri->name()); +bool RouteAggregator::FillAggregateRouteInfo(AggregateRouteEntriesInfo *info, + bool summary) const { + if (aggregate_route_map().empty()) + return false; - for (typename AggregateRouteMap::const_iterator it = aggregate_route_map_.begin(); - it != aggregate_route_map_.end(); it++) { + info->set_name(rtinstance_->name()); + for (typename AggregateRouteMap::const_iterator it = + aggregate_route_map_.begin(); it != aggregate_route_map_.end(); it++) { AggregateRouteT *aggregate = static_cast(it->second.get()); AggregateRouteInfo aggregate_info; - aggregate->FillShowInfo(&aggregate_info); + aggregate->FillShowInfo(&aggregate_info, summary); info->aggregate_route_list.push_back(aggregate_info); } return true; diff --git a/src/bgp/routing-instance/route_aggregator.h b/src/bgp/routing-instance/route_aggregator.h index 98e01c6795e..bf49c383c22 100644 --- a/src/bgp/routing-instance/route_aggregator.h +++ b/src/bgp/routing-instance/route_aggregator.h @@ -199,8 +199,8 @@ class RouteAggregator : public IRouteAggregator { virtual bool IsAggregateRoute(const BgpRoute *route) const; virtual bool IsContributingRoute(const BgpRoute *route) const; - virtual bool FillAggregateRouteInfo(RoutingInstance *ri, - AggregateRouteEntriesInfo *info) const; + virtual bool FillAggregateRouteInfo(AggregateRouteEntriesInfo *info, + bool summary) const; private: class DeleteActor; diff --git a/src/bgp/routing-instance/show_route_aggregate.cc b/src/bgp/routing-instance/show_route_aggregate.cc index 53544baa9fb..e78db5a7d3d 100644 --- a/src/bgp/routing-instance/show_route_aggregate.cc +++ b/src/bgp/routing-instance/show_route_aggregate.cc @@ -4,59 +4,54 @@ #include "bgp/bgp_show_handler.h" +#include +#include + #include "bgp/bgp_server.h" #include "bgp/bgp_show_handler.h" #include "bgp/routing-instance/routing_instance.h" #include "bgp/routing-instance/route_aggregator.h" +#include "bgp/routing-instance/route_aggregate_internal_types.h" #include "bgp/routing-instance/route_aggregate_types.h" +using boost::assign::list_of; using std::string; using std::vector; -static bool FillRouteAggregateInfo(Address::Family family, - const string search_string, - AggregateRouteEntriesInfo &info, - RoutingInstance *rtinstance) { - const BgpTable *table = - static_cast(rtinstance->GetTable(family)); - if (!table) - return false; - if (!search_string.empty() && - (table->name().find(search_string) == string::npos) && - (search_string != "deleted" || !table->IsDeleted())) { - return false; - } - - IRouteAggregator *iroute_aggregator = rtinstance->route_aggregator(family); - if (!iroute_aggregator) - return false; - return iroute_aggregator->FillAggregateRouteInfo(rtinstance, &info); -} - -// Specialization of BgpShowHandler<>::CallbackCommon. -template <> -bool BgpShowHandler::CallbackCommon( - const BgpSandeshContext *bsc, Data *data) { - uint32_t page_limit = bsc->page_limit() ? bsc->page_limit() : kPageLimit; - uint32_t iter_limit = bsc->iter_limit() ? bsc->iter_limit() : kIterLimit; +static bool FillRouteAggregateInfoList(const BgpSandeshContext *bsc, + bool summary, uint32_t page_limit, uint32_t iter_limit, + const string &start_instance, const string &search_string, + vector *are_list, string *next_instance) { RoutingInstanceMgr *rim = bsc->bgp_server->routing_instance_mgr(); - RoutingInstanceMgr::const_name_iterator it = - rim->name_clower_bound(data->next_entry); + rim->name_clower_bound(start_instance); for (uint32_t iter_count = 0; it != rim->name_cend(); ++it, ++iter_count) { - RoutingInstance *rinstance = it->second; - AggregateRouteEntriesInfo info; - if (FillRouteAggregateInfo(Address::INET, data->search_string, info, - rinstance)) { - data->show_list.push_back(info); - } - if (FillRouteAggregateInfo(Address::INET6, data->search_string, info, - rinstance)) { - data->show_list.push_back(info); + RoutingInstance *rtinstance = it->second; + + vector families = + list_of(Address::INET)(Address::INET6); + BOOST_FOREACH(Address::Family family, families) { + const BgpTable *table = + static_cast(rtinstance->GetTable(family)); + if (!table) + continue; + if (!search_string.empty() && + (table->name().find(search_string) == string::npos) && + (search_string != "deleted" || !table->IsDeleted())) { + continue; + } + + IRouteAggregator *iroute_aggregator = + rtinstance->route_aggregator(family); + if (!iroute_aggregator) + continue; + AggregateRouteEntriesInfo info; + if (!iroute_aggregator->FillAggregateRouteInfo(&info, summary)) + continue; + are_list->push_back(info); } - if (data->show_list.size() >= page_limit) + if (are_list->size() >= page_limit) break; if (iter_count >= iter_limit) break; @@ -68,21 +63,76 @@ bool BgpShowHandlershow_list.size() >= page_limit; - SaveContextToData(it->second->name(), done, data); + bool done = are_list->size() >= page_limit; + *next_instance = it->second->name(); return done; } -// Specialization of BgpShowHandler<>::FillShowList. +// +// Specialization of BgpShowHandler<>::CallbackCommon for regular introspect. +// template <> -void BgpShowHandler::FillShowList( - ShowRouteAggregateResp *resp, - const vector &show_list) { +bool BgpShowHandler::CallbackCommon( + const BgpSandeshContext *bsc, Data *data) { + uint32_t page_limit = bsc->page_limit() ? bsc->page_limit() : kPageLimit; + uint32_t iter_limit = bsc->iter_limit() ? bsc->iter_limit() : kIterLimit; + string next_instance; + bool done = FillRouteAggregateInfoList(bsc, false, page_limit, iter_limit, + data->next_entry, data->search_string, &data->show_list, + &next_instance); + if (!next_instance.empty()) + SaveContextToData(next_instance, done, data); + return done; +} + +// +// Specialization of BgpShowHandler<>::FillShowList for regular introspect. +// +template <> +void BgpShowHandler::FillShowList( + ShowRouteAggregateResp *resp, + const vector &show_list) { resp->set_aggregate_route_entries(show_list); } +// +// Specialization of BgpShowHandler<>::CallbackCommon for summary introspect. +// +template <> +bool BgpShowHandler::CallbackCommon( + const BgpSandeshContext *bsc, Data *data) { + uint32_t page_limit = bsc->page_limit() ? bsc->page_limit() : kPageLimit; + uint32_t iter_limit = bsc->iter_limit() ? bsc->iter_limit() : kIterLimit; + string next_instance; + bool done = FillRouteAggregateInfoList(bsc, true, page_limit, iter_limit, + data->next_entry, data->search_string, &data->show_list, + &next_instance); + if (!next_instance.empty()) + SaveContextToData(next_instance, done, data); + return done; +} + +// +// Specialization of BgpShowHandler<>::FillShowList for summary introspect. +// +template <> +void BgpShowHandler::FillShowList( + ShowRouteAggregateSummaryResp *resp, + const vector &show_list) { + resp->set_aggregate_route_entries(show_list); +} + +// // Handler for ShowRouteAggregateReq. +// void ShowRouteAggregateReq::HandleRequest() const { RequestPipeline::PipeSpec ps(this); RequestPipeline::StageSpec s1; @@ -127,3 +177,51 @@ void ShowRouteAggregateReqIterate::HandleRequest() const { ps.stages_.push_back(s1); RequestPipeline rp(ps); } + +// +// Handler for ShowRouteAggregateSummaryReq. +// +void ShowRouteAggregateSummaryReq::HandleRequest() const { + RequestPipeline::PipeSpec ps(this); + RequestPipeline::StageSpec s1; + TaskScheduler *scheduler = TaskScheduler::GetInstance(); + + s1.taskId_ = scheduler->GetTaskId("bgp::RouteAggregate"); + s1.cbFn_ = boost::bind(&BgpShowHandler< + ShowRouteAggregateSummaryReq, + ShowRouteAggregateSummaryReqIterate, + ShowRouteAggregateSummaryResp, + AggregateRouteEntriesInfo>::Callback, _1, _2, _3, _4, _5); + s1.allocFn_ = BgpShowHandler< + ShowRouteAggregateSummaryReq, + ShowRouteAggregateSummaryReqIterate, + ShowRouteAggregateSummaryResp, + AggregateRouteEntriesInfo>::CreateData; + s1.instances_.push_back(0); + ps.stages_.push_back(s1); + RequestPipeline rp(ps); +} + +// +// Handler for ShowRouteAggregateSummaryReqIterate. +// +void ShowRouteAggregateSummaryReqIterate::HandleRequest() const { + RequestPipeline::PipeSpec ps(this); + RequestPipeline::StageSpec s1; + TaskScheduler *scheduler = TaskScheduler::GetInstance(); + + s1.taskId_ = scheduler->GetTaskId("bgp::RouteAggregate"); + s1.cbFn_ = boost::bind(&BgpShowHandler< + ShowRouteAggregateSummaryReq, + ShowRouteAggregateSummaryReqIterate, + ShowRouteAggregateSummaryResp, + AggregateRouteEntriesInfo>::CallbackIterate, _1, _2, _3, _4, _5); + s1.allocFn_ = BgpShowHandler< + ShowRouteAggregateSummaryReq, + ShowRouteAggregateSummaryReqIterate, + ShowRouteAggregateSummaryResp, + AggregateRouteEntriesInfo>::CreateData; + s1.instances_.push_back(0); + ps.stages_.push_back(s1); + RequestPipeline rp(ps); +} diff --git a/src/bgp/test/route_aggregator_test.cc b/src/bgp/test/route_aggregator_test.cc index 84a277cce3f..cc5fffbff55 100644 --- a/src/bgp/test/route_aggregator_test.cc +++ b/src/bgp/test/route_aggregator_test.cc @@ -379,39 +379,50 @@ class RouteAggregatorTest : public ::testing::Test { return list; } - static void ValidateShowRouteAggregationResponse(Sandesh *sandesh, - string &result, RouteAggregatorTest *self, bool empty) { - ShowRouteAggregateResp *resp = - dynamic_cast(sandesh); - TASK_UTIL_EXPECT_NE((ShowRouteAggregateResp *)NULL, resp); - self->validate_done_ = true; - - if (empty) + template + void ValidateResponse(Sandesh *sandesh, string &result, bool empty) { + RespT *resp = dynamic_cast(sandesh); + TASK_UTIL_EXPECT_NE((RespT *) NULL, resp); + + if (empty) { TASK_UTIL_EXPECT_EQ(0, resp->get_aggregate_route_entries().size()); - else + } else { TASK_UTIL_EXPECT_EQ(1, resp->get_aggregate_route_entries().size()); + } int i = 0; BOOST_FOREACH(const AggregateRouteEntriesInfo &info, resp->get_aggregate_route_entries()) { - TASK_UTIL_EXPECT_EQ(info.get_ri_name(), result); + TASK_UTIL_EXPECT_EQ(info.get_name(), result); i++; } + validate_done_ = true; } - void VerifyRouteAggregateSandesh(std::string ri_name, bool empty=false) { + void VerifyRouteAggregateSandesh(std::string ri_name, bool empty = false) { BgpSandeshContext sandesh_context; sandesh_context.bgp_server = bgp_server_.get(); sandesh_context.xmpp_peer_manager = NULL; Sandesh::set_client_context(&sandesh_context); - Sandesh::set_response_callback( - boost::bind(ValidateShowRouteAggregationResponse, _1, ri_name, - this, empty)); + + Sandesh::set_response_callback(boost::bind( + &RouteAggregatorTest::ValidateResponse, + this, _1, ri_name, empty)); ShowRouteAggregateReq *req = new ShowRouteAggregateReq; req->set_search_string(ri_name); validate_done_ = false; req->HandleRequest(); req->Release(); TASK_UTIL_EXPECT_EQ(true, validate_done_); + + Sandesh::set_response_callback(boost::bind( + &RouteAggregatorTest::ValidateResponse, + this, _1, ri_name, empty)); + ShowRouteAggregateSummaryReq *sreq = new ShowRouteAggregateSummaryReq; + sreq->set_search_string(ri_name); + validate_done_ = false; + sreq->HandleRequest(); + sreq->Release(); + TASK_UTIL_EXPECT_EQ(true, validate_done_); } EventManager evm_; diff --git a/src/config/svc-monitor/svc_monitor/scheduler/vrouter_scheduler.py b/src/config/svc-monitor/svc_monitor/scheduler/vrouter_scheduler.py index cd34f7b8446..1919f184ef7 100644 --- a/src/config/svc-monitor/svc_monitor/scheduler/vrouter_scheduler.py +++ b/src/config/svc-monitor/svc_monitor/scheduler/vrouter_scheduler.py @@ -57,7 +57,8 @@ def _get_az_vrouter_list(self): if not self._args.netns_availability_zone: return None az_list = self._nc.oper('availability_zones', 'list', - 'admin', detailed=True) + self._args.admin_tenant_name, + detailed=True) az_vr_list = [] for az in az_list: if self._args.netns_availability_zone not in str(az): diff --git a/src/config/svc-monitor/svc_monitor/svc_monitor.py b/src/config/svc-monitor/svc_monitor/svc_monitor.py index fbeaa4a0a78..320407a3d0e 100644 --- a/src/config/svc-monitor/svc_monitor/svc_monitor.py +++ b/src/config/svc-monitor/svc_monitor/svc_monitor.py @@ -671,7 +671,7 @@ def parse_args(args_str): 'auth_insecure': True, 'admin_user': 'user1', 'admin_password': 'password1', - 'admin_tenant_name': 'default-domain' + 'admin_tenant_name': 'admin' } schedops = { 'si_netns_scheduler_driver': diff --git a/src/opserver/analytics_db.py b/src/opserver/analytics_db.py index 0bcff4d87c2..a037dc0d6b1 100644 --- a/src/opserver/analytics_db.py +++ b/src/opserver/analytics_db.py @@ -67,7 +67,7 @@ def connect_db(self): if not AnalyticsDb.use_cql(): self.connect_db_thrift() else: - self.get_cql_session(COLLECTOR_KEYSPACE_CQL) + self.get_cql_session() def connect_db_thrift(self): try: @@ -105,18 +105,17 @@ def _get_sysm(self): # end _get_sysm def _get_analytics_ttls_cql(self): - ret_row = {} - if (self._session is None) : - self._logger.error("Session to %s not initialized" % \ - COLLECTOR_KEYSPACE_CQL) + session = self.get_cql_session() + if not session: return None + ret_row = {} try: ttl_query = "SELECT * FROM %s" % SYSTEM_OBJECT_TABLE.lower() - self._session.row_factory = dict_factory - rs = self._session.execute(ttl_query) + session.row_factory = dict_factory + rs = session.execute(ttl_query) for r in rs: row = r - self._session.row_factory = named_tuple_factory + session.row_factory = named_tuple_factory return (row, 0) except Exception as e: self._logger.error("Exception: analytics_start_time Failure ") @@ -141,7 +140,7 @@ def _get_analytics_ttls_thrift(self): return (ret_row, -1) return (row, 0) - def _get_analytics_ttls(self): + def get_analytics_ttls(self): ret_row = {} if (AnalyticsDb.use_cql): (row, status) = self._get_analytics_ttls_cql() @@ -169,23 +168,27 @@ def _get_analytics_ttls(self): ret_row[SYSTEM_OBJECT_GLOBAL_DATA_TTL] = row[SYSTEM_OBJECT_GLOBAL_DATA_TTL] return ret_row - # end _get_analytics_ttls + # end get_analytics_ttls def _get_analytics_start_time_cql(self): + session = self.get_cql_session() + if not session: + return None # old_row_factory is usually named_tuple_factory - old_row_factory = self._session.row_factory - self._session.row_factory = dict_factory + old_row_factory = session.row_factory + session.row_factory = dict_factory try: start_time_query = "SELECT * FROM %s" % \ (SYSTEM_OBJECT_TABLE) - rs = self._session.execute(start_time_query) + rs = session.execute(start_time_query) for r in rs: row = r - self._session.row_factory = old_row_factory + session.row_factory = old_row_factory return row except Exception as e: self._logger.error("Exception: analytics_start_time Failure %s" % e) return None + # end _get_analytics_start_time_cql def _get_analytics_start_time_thrift(self): try: @@ -196,7 +199,7 @@ def _get_analytics_start_time_thrift(self): self._logger.error("Exception: analytics_start_time Failure %s" % e) return None - def _get_analytics_start_time(self): + def get_analytics_start_time(self): if AnalyticsDb.use_cql() is True: row = self._get_analytics_start_time_cql() else: @@ -223,9 +226,11 @@ def _get_analytics_start_time(self): ret_row[SYSTEM_OBJECT_MSG_START_TIME] = row[SYSTEM_OBJECT_MSG_START_TIME] return ret_row - # end _get_analytics_start_time + # end get_analytics_start_time - def get_cql_session(self, keyspace): + def get_cql_session(self): + if self._session: + return self._session creds=None try: if self._cassandra_user is not None and \ @@ -239,12 +244,14 @@ def get_cql_session(self, keyspace): cql_port = '9042' cluster = Cluster(contact_points = server_list, auth_provider = creds, port = cql_port) - self._session=cluster.connect(keyspace) + self._session=cluster.connect(COLLECTOR_KEYSPACE_CQL) self._session.connection_class = GeventConnection self._session.default_consistency_level = ConsistencyLevel.LOCAL_ONE + return self._session except Exception as e: self._logger.error("Exception: get_cql_session Failure %s" % e) return None + # end get_cql_session def _update_analytics_start_time_cql(self, start_times): # The column names in SYSTEM_OBJECT_TABLE have to be encoded in "" diff --git a/src/opserver/opserver.py b/src/opserver/opserver.py index c89eaf735ad..b360e39d3d5 100644 --- a/src/opserver/opserver.py +++ b/src/opserver/opserver.py @@ -1784,7 +1784,7 @@ def get_purge_cutoff(self, purge_input, start_times): self._logger.error("start times:" + str(start_times)) - analytics_ttls = self._analytics_db._get_analytics_ttls() + analytics_ttls = self._analytics_db.get_analytics_ttls() analytics_time_range = min( (current_time - start_times[SYSTEM_OBJECT_START_TIME]), 60*60*1000000*analytics_ttls[SYSTEM_OBJECT_GLOBAL_DATA_TTL]) @@ -1822,7 +1822,7 @@ def process_purge_request(self): json.dumps(response), _ERRORS[errno.EBADMSG], {'Content-type': 'application/json'}) - start_times = self._analytics_db._get_analytics_start_time() + start_times = self._analytics_db.get_analytics_start_time() if (start_times == None): self._logger.info("Failed to get the analytics start time") response = {'status': 'failed', @@ -1981,7 +1981,7 @@ def _auto_purge(self): if (trigger_purge): # trigger purge - start_times = self._analytics_db._get_analytics_start_time() + start_times = self._analytics_db.get_analytics_start_time() purge_cutoff = self.get_purge_cutoff( (100.0 - float(self._args.db_purge_level)), start_times) @@ -1995,7 +1995,7 @@ def _auto_purge(self): def _get_analytics_data_start_time(self): - analytics_start_time = (self._analytics_db._get_analytics_start_time())[SYSTEM_OBJECT_START_TIME] + analytics_start_time = (self._analytics_db.get_analytics_start_time())[SYSTEM_OBJECT_START_TIME] response = {'analytics_data_start_time': analytics_start_time} return bottle.HTTPResponse( json.dumps(response), 200, {'Content-type': 'application/json'}) diff --git a/src/vnsw/agent/openstack/instance_service_server.cc b/src/vnsw/agent/openstack/instance_service_server.cc index d2bddd28d96..60febee7114 100644 --- a/src/vnsw/agent/openstack/instance_service_server.cc +++ b/src/vnsw/agent/openstack/instance_service_server.cc @@ -562,8 +562,9 @@ void AddPortReq::HandleRequest() const { boost::system::error_code ec, ec6; Ip4Address ip(Ip4Address::from_string(get_ip_address(), ec)); Ip6Address ip6 = Ip6Address::from_string(get_ip6_address(), ec6); - if (((ec != 0) && (ec6 != 0)) || - (ip.is_unspecified() && ip6.is_unspecified())) { + /* Return only if wrong IP address is passed in both IPv4 and IPv6 fields. + * An IP address of all zeroes is not considered wrong/invalid */ + if ((ec != 0) && (ec6 != 0)) { resp_str += "Neither Ipv4 nor IPv6 address is correct, "; err = true; } diff --git a/src/vnsw/agent/oper/health_check.cc b/src/vnsw/agent/oper/health_check.cc index 0530408a31c..278851be331 100644 --- a/src/vnsw/agent/oper/health_check.cc +++ b/src/vnsw/agent/oper/health_check.cc @@ -156,6 +156,10 @@ void HealthCheckInstance::OnExit(InstanceTask *task, service_->table_->InstanceEventEnqueue(event); } +bool HealthCheckInstance::IsRunning() const { + return (task_.get() != NULL ? task_->is_running(): false); +} + HealthCheckInstanceEvent::HealthCheckInstanceEvent(HealthCheckInstance *inst, EventType type, const std::string &message) : @@ -212,8 +216,7 @@ bool HealthCheckService::DBEntrySandesh(Sandesh *sresp, inst_data.set_health_check_ip (it->second->ip_->destination_ip().to_string()); inst_data.set_active(it->second->active_); - inst_data.set_running(it->second->task_.get() != NULL ? - it->second->task_->is_running(): false); + inst_data.set_running(it->second->IsRunning()); inst_data.set_last_update_time(it->second->last_update_time_); inst_list.push_back(inst_data); it++; diff --git a/src/vnsw/agent/oper/health_check.h b/src/vnsw/agent/oper/health_check.h index c5f304d58c7..01b6cece736 100644 --- a/src/vnsw/agent/oper/health_check.h +++ b/src/vnsw/agent/oper/health_check.h @@ -115,6 +115,7 @@ struct HealthCheckInstance { // OnExit Callback for Task void OnExit(InstanceTask *task, const boost::system::error_code &ec); bool active() {return active_;} + bool IsRunning() const; // reference to health check service under // which this instance is running @@ -162,6 +163,7 @@ class HealthCheckService : AgentRefCount, void DeleteInstances(); const boost::uuids::uuid &uuid() const { return uuid_; } + const std::string &name() const { return name_; } private: friend class HealthCheckInstance; diff --git a/src/vnsw/agent/oper/ifmap_dependency_manager.cc b/src/vnsw/agent/oper/ifmap_dependency_manager.cc index 10530373fb4..0ceef461df7 100644 --- a/src/vnsw/agent/oper/ifmap_dependency_manager.cc +++ b/src/vnsw/agent/oper/ifmap_dependency_manager.cc @@ -654,7 +654,7 @@ void IFMapDependencyManager::InitializeDependencyRules(Agent *agent) { "floating-ip", true, "floating-ip-pool-floating-ip", "floating-ip-pool", false, - "floating-ip-pool-virtual-network", + "virtual-network-floating-ip-pool", "virtual-network", true)); AddDependencyPath("virtual-machine-interface", MakePath("instance-ip-virtual-machine-interface", diff --git a/src/vnsw/agent/oper/interface.h b/src/vnsw/agent/oper/interface.h index 068f6631ca2..124624ec208 100644 --- a/src/vnsw/agent/oper/interface.h +++ b/src/vnsw/agent/oper/interface.h @@ -112,6 +112,7 @@ class Interface : AgentRefCount, public AgentOperDBEntry { VrfEntry *vrf() const {return vrf_.get();} bool ipv4_active() const {return ipv4_active_;} bool ipv6_active() const {return ipv6_active_;} + bool is_hc_active() const { return is_hc_active_; } bool metadata_ip_active() const {return metadata_ip_active_;} bool ip_active(Address::Family family) const; bool l2_active() const {return l2_active_;} diff --git a/src/vnsw/agent/oper/multicast.cc b/src/vnsw/agent/oper/multicast.cc index 309eaf2705f..0fc4f6b4921 100644 --- a/src/vnsw/agent/oper/multicast.cc +++ b/src/vnsw/agent/oper/multicast.cc @@ -177,6 +177,11 @@ void MulticastHandler::HandleVnParametersChange(DBTablePartBase *partition, state->vrf_name_, state->vxlan_id_, data); + Ip4Address broadcast = IpAddress::from_string("255.255.255.255", + ec).to_v4(); + AddL2BroadcastRoute(all_broadcast, state->vrf_name_, vn->GetName(), + broadcast, MplsTable::kInvalidLabel, + state->vxlan_id_, 0); } //Delete or withdraw old vxlan id diff --git a/src/vnsw/agent/oper/test/test_intf.cc b/src/vnsw/agent/oper/test/test_intf.cc index ef22e5db0c1..e9c5426cfda 100644 --- a/src/vnsw/agent/oper/test/test_intf.cc +++ b/src/vnsw/agent/oper/test/test_intf.cc @@ -1640,7 +1640,7 @@ TEST_F(IntfTest, IntfActivateDeactivate_2) { // layer3 nexthop are absent //2> Add instance IP and make sure layer3 nexthop are added //3> Delete instance ip and layer3 nexthop are deleted -TEST_F(IntfTest, DISABLED_IntfActivateDeactivate_5) { +TEST_F(IntfTest, IntfActivateDeactivate_5) { struct PortInfo input[] = { {"vnet1", 1, "0.0.0.0", "00:00:00:01:01:01", 1, 1}, }; diff --git a/src/vnsw/agent/oper/vm_interface.cc b/src/vnsw/agent/oper/vm_interface.cc index 5ba5fa4e6c1..4be34d832ef 100644 --- a/src/vnsw/agent/oper/vm_interface.cc +++ b/src/vnsw/agent/oper/vm_interface.cc @@ -523,7 +523,7 @@ static void BuildVm(VmInterfaceConfigData *data, IFMapNode *node, "configuration VM UUID is", UuidToString(data->vm_uuid_), "compute VM uuid is", - UuidToString(cfg_entry->GetVnUuid())); + UuidToString(cfg_entry->GetVmUuid())); } } @@ -4387,6 +4387,11 @@ void VmInterface::DeleteHealthCheckInstance(HealthCheckInstance *hc_inst) { assert(ret != 0); } +const VmInterface::HealthCheckInstanceSet & +VmInterface::hc_instance_set() const { + return hc_instance_set_; +} + //////////////////////////////////////////////////////////////////////////// // VRF assign rule routines //////////////////////////////////////////////////////////////////////////// diff --git a/src/vnsw/agent/oper/vm_interface.h b/src/vnsw/agent/oper/vm_interface.h index 2a4730fec31..f8aa81be4d0 100644 --- a/src/vnsw/agent/oper/vm_interface.h +++ b/src/vnsw/agent/oper/vm_interface.h @@ -567,6 +567,7 @@ class VmInterface : public Interface { void InsertHealthCheckInstance(HealthCheckInstance *hc_inst); void DeleteHealthCheckInstance(HealthCheckInstance *hc_inst); + const HealthCheckInstanceSet &hc_instance_set() const; size_t GetFloatingIpCount() const { return floating_ip_list_.list_.size(); } bool HasServiceVlan() const { return service_vlan_list_.list_.size() != 0; } diff --git a/src/vnsw/agent/pkt/test/test_pkt_fip.cc b/src/vnsw/agent/pkt/test/test_pkt_fip.cc index 26247e3e879..fa1309ed291 100644 --- a/src/vnsw/agent/pkt/test/test_pkt_fip.cc +++ b/src/vnsw/agent/pkt/test/test_pkt_fip.cc @@ -253,32 +253,61 @@ static void Setup() { ret = false; } - // Configure Floating-IP - AddFloatingIpPool("fip-pool1", 1); + // Configure Floating-IP-1 + AddInstanceIp("instance_fixed_ip", 8, "1.1.1.10"); + client->WaitForIdle(); + + AddLink("virtual-machine-interface", "vnet1", "instance-ip", + "instance_fixed_ip"); + client->WaitForIdle(); + + AddLink("virtual-machine-interface", "vnet1", "floating-ip", "fip1"); + client->WaitForIdle(); + AddFloatingIp("fip1", 1, "2.1.1.100"); + client->WaitForIdle(); + + AddFloatingIpPool("fip-pool1", 1); + client->WaitForIdle(); + AddLink("floating-ip", "fip1", "floating-ip-pool", "fip-pool1"); - AddFloatingIp("fip_2", 2, "2.1.1.99"); - AddLink("floating-ip", "fip_2", "floating-ip-pool", "fip-pool1"); + client->WaitForIdle(); + AddLink("floating-ip-pool", "fip-pool1", "virtual-network", "default-project:vn2"); - AddLink("virtual-machine-interface", "vnet1", "floating-ip", "fip1"); + client->WaitForIdle(); + + // Configure Floating-IP-2 + AddLink("virtual-machine-interface", "vnet1", "floating-ip", + "fip_fixed_ip"); + client->WaitForIdle(); + + AddFloatingIp("fip_2", 2, "2.1.1.99"); + client->WaitForIdle(); + AddFloatingIpPool("fip-pool2", 2); + client->WaitForIdle(); + + AddLink("floating-ip", "fip_2", "floating-ip-pool", "fip-pool1"); + client->WaitForIdle(); + AddFloatingIp("fip_3", 3, "3.1.1.100"); + client->WaitForIdle(); + AddLink("floating-ip", "fip_3", "floating-ip-pool", "fip-pool2"); + client->WaitForIdle(); + AddLink("floating-ip-pool", "fip-pool2", "virtual-network", "default-project:vn3"); client->WaitForIdle(); + AddInstanceIp("instance_fixed_ip", 8, "1.1.1.10"); - AddLink("virtual-machine-interface", "vnet1", "instance-ip", - "instance_fixed_ip"); + client->WaitForIdle(); AddFloatingIp("fip_fixed_ip", 4, "2.1.1.101", "1.1.1.10"); AddLink("floating-ip", "fip_fixed_ip", "floating-ip-pool", "fip-pool1"); AddLink("floating-ip-pool", "fip-pool1", "virtual-network", "default-project:vn2"); - AddLink("virtual-machine-interface", "vnet1", "floating-ip", - "fip_fixed_ip"); - client->WaitForIdle(); EXPECT_TRUE(vnet[1]->HasFloatingIp(Address::INET)); if (vnet[1]->HasFloatingIp(Address::INET) == false) { diff --git a/src/vnsw/agent/port_ipc/port_ipc_handler.cc b/src/vnsw/agent/port_ipc/port_ipc_handler.cc index 42d73543689..1a3c2b39624 100644 --- a/src/vnsw/agent/port_ipc/port_ipc_handler.cc +++ b/src/vnsw/agent/port_ipc/port_ipc_handler.cc @@ -203,10 +203,13 @@ bool PortIpcHandler::AddPort(const PortIpcHandler::AddPortParams &r, intf_type = CfgIntEntry::CfgIntNameSpacePort; } boost::system::error_code ec, ec6; + /* from_string returns default constructor IP (all zeroes) when there is + * error in passed IP for both v4 and v6 */ Ip4Address ip(Ip4Address::from_string(r.ip_address, ec)); Ip6Address ip6 = Ip6Address::from_string(r.ip6_address, ec6); - if (((ec != 0) && (ec6 != 0)) || - (ip.is_unspecified() && ip6.is_unspecified())) { + /* We permit port-add with all zeroes IP address but not with + * invalid IP */ + if ((ec != 0) && (ec6 != 0)) { resp_str += "Neither Ipv4 nor IPv6 address is correct, "; err = true; } diff --git a/src/vnsw/agent/test/test_cmn_util.h b/src/vnsw/agent/test/test_cmn_util.h index d7f5e03e71f..2e36a952900 100644 --- a/src/vnsw/agent/test/test_cmn_util.h +++ b/src/vnsw/agent/test/test_cmn_util.h @@ -75,6 +75,7 @@ void AddNode(Agent *agent, const char *node_name, const char *name, int id, void DelNode(const char *node_name, const char *name); void DelNode(Agent *agent, const char *node_name, const char *name); void IntfSyncMsg(PortInfo *input, int id); +CfgIntEntry *CfgPortGet(boost::uuids::uuid u); void IntfCfgAddThrift(PortInfo *input, int id); void IntfCfgAdd(int intf_id, const string &name, const string ipaddr, int vm_id, int vn_id, const string &mac, uint16_t vlan, diff --git a/src/vnsw/agent/test/test_l2route.cc b/src/vnsw/agent/test/test_l2route.cc index 0337f457e96..1572e919095 100644 --- a/src/vnsw/agent/test/test_l2route.cc +++ b/src/vnsw/agent/test/test_l2route.cc @@ -1524,6 +1524,37 @@ TEST_F(RouteTest, add_local_peer_and_then_vm) { bgp_peer.reset(); } +TEST_F(RouteTest, l2_flood_vxlan_update) { + struct PortInfo input[] = { + {"vnet1", 1, "1.1.1.10", "00:00:00:01:01:01", 1, 1}, + }; + + client->Reset(); + VnAddReq(1, "vn1"); + client->WaitForIdle(); + CreateVmportEnv(input, 1); + client->WaitForIdle(); + + //Add a peer and enqueue path add in multicast route. + BridgeRouteEntry *rt = L2RouteGet("vrf1", + MacAddress::FromString("ff:ff:ff:ff:ff:ff"), + Ip4Address(0)); + EXPECT_TRUE(rt->FindPath(agent_->multicast_peer())); + uint32_t vxlan_id = rt->FindPath(agent_->multicast_peer())->vxlan_id(); + EXPECT_TRUE(vxlan_id != 0); + + VxLanNetworkIdentifierMode(true); + client->WaitForIdle(); + + uint32_t new_vxlan_id = rt->FindPath(agent_->multicast_peer())->vxlan_id(); + EXPECT_TRUE(vxlan_id != new_vxlan_id); + + VxLanNetworkIdentifierMode(false); + client->WaitForIdle(); + DeleteVmportEnv(input, 1, true); + client->WaitForIdle(); +} + int main(int argc, char *argv[]) { ::testing::InitGoogleTest(&argc, argv); GETUSERARGS(); diff --git a/src/vnsw/agent/test/test_util.cc b/src/vnsw/agent/test/test_util.cc index 206baa450f5..475ba13d044 100644 --- a/src/vnsw/agent/test/test_util.cc +++ b/src/vnsw/agent/test/test_util.cc @@ -784,9 +784,15 @@ InetInterface *InetInterfaceGet(const char *ifname) { return static_cast(Agent::GetInstance()->interface_table()->FindActiveEntry(&key)); } +CfgIntEntry *CfgPortGet(boost::uuids::uuid u) { + CfgIntKey key(u); + return static_cast(Agent::GetInstance()-> + interface_config_table()->Find(&key)); +} + Interface *VmPortGet(int id) { VmInterfaceKey key(AgentKey::ADD_DEL_CHANGE, MakeUuid(id), ""); - return static_cast(Agent::GetInstance()->interface_table()->FindActiveEntry(&key)); + return static_cast(Agent::GetInstance()->interface_table()->Find(&key, false)); } bool VmPortFloatingIpCount(int id, unsigned int count) { diff --git a/src/vnsw/agent/uve/interface.sandesh b/src/vnsw/agent/uve/interface.sandesh index 218475185ac..3693c9be925 100644 --- a/src/vnsw/agent/uve/interface.sandesh +++ b/src/vnsw/agent/uve/interface.sandesh @@ -38,6 +38,13 @@ struct VmInterfaceStats { 4: u64 out_bytes; } +struct VmHealthCheckInstance { + 1: string name; + 2: string uuid; + 3: bool is_running; + 4: string status; +} + /** * Sandesh definition for vm interface in agent */ @@ -65,6 +72,8 @@ struct UveVMInterfaceAgent { 20: optional u64 in_bw_usage; 21: optional u64 out_bw_usage; 22: optional bool ip4_active; + 23: optional bool is_health_check_active; + 24: optional list health_check_instance_list; } /** diff --git a/src/vnsw/agent/uve/interface_uve_table.cc b/src/vnsw/agent/uve/interface_uve_table.cc index beb0f4b6540..53518323328 100644 --- a/src/vnsw/agent/uve/interface_uve_table.cc +++ b/src/vnsw/agent/uve/interface_uve_table.cc @@ -3,6 +3,7 @@ */ #include +#include #include #include @@ -107,6 +108,7 @@ bool InterfaceUveTable::UveInterfaceEntry::FrameInterfaceMsg(const string &name, s_intf->set_mac_address(intf_->vm_mac()); s_intf->set_ip6_address(intf_->primary_ip6_addr().to_string()); s_intf->set_ip6_active(intf_->ipv6_active()); + s_intf->set_is_health_check_active(intf_->is_hc_active()); vector uve_fip_list; if (intf_->HasFloatingIp(Address::INET)) { @@ -132,6 +134,23 @@ bool InterfaceUveTable::UveInterfaceEntry::FrameInterfaceMsg(const string &name, } s_intf->set_floating_ips(uve_fip_list); + vector uve_hc_list; + const VmInterface::HealthCheckInstanceSet hc_list = + intf_->hc_instance_set(); + VmInterface::HealthCheckInstanceSet::const_iterator hc_it = + hc_list.begin(); + while (hc_it != hc_list.end()) { + HealthCheckInstance *inst = (*hc_it); + VmHealthCheckInstance uve_inst; + uve_inst.set_name(inst->service_->name()); + uve_inst.set_uuid(to_string(inst->service_->uuid())); + uve_inst.set_status(inst->active() ? "Active" : "InActive"); + uve_inst.set_is_running(inst->IsRunning()); + hc_it++; + uve_hc_list.push_back(uve_inst); + } + s_intf->set_health_check_instance_list(uve_hc_list); + s_intf->set_label(intf_->label()); s_intf->set_ip4_active(intf_->ipv4_active()); s_intf->set_l2_active(intf_->l2_active()); diff --git a/src/vnsw/agent/vrouter/flow_stats/flow_export_info.cc b/src/vnsw/agent/vrouter/flow_stats/flow_export_info.cc index d5e35854368..efbe4c4e00a 100644 --- a/src/vnsw/agent/vrouter/flow_stats/flow_export_info.cc +++ b/src/vnsw/agent/vrouter/flow_stats/flow_export_info.cc @@ -8,7 +8,8 @@ FlowExportInfo::FlowExportInfo() : flow_handle_(FlowEntry::kInvalidFlowHandle), action_info_(), vm_cfg_name_(), peer_vrouter_(), tunnel_type_(TunnelType::INVALID), underlay_source_port_(0), changed_(false), - fip_(0), fip_vmi_(AgentKey::ADD_DEL_CHANGE, nil_uuid(), ""), tcp_flags_(0) { + fip_(0), fip_vmi_(AgentKey::ADD_DEL_CHANGE, nil_uuid(), ""), tcp_flags_(0), + delete_enqueued_(false) { drop_reason_ = FlowEntry::DropReasonStr(FlowEntry::DROP_UNKNOWN); rev_flow_key_.Reset(); interface_uuid_ = boost::uuids::nil_uuid(); @@ -22,7 +23,8 @@ FlowExportInfo::FlowExportInfo(FlowEntry *fe, uint64_t setup_time) : flow_handle_(fe->flow_handle()), action_info_(fe->match_p().action_info), vm_cfg_name_(fe->data().vm_cfg_name), peer_vrouter_(fe->peer_vrouter()), tunnel_type_(fe->tunnel_type()), underlay_source_port_(0), - changed_(true), fip_(fe->fip()), fip_vmi_(fe->fip_vmi()), tcp_flags_(0) { + changed_(true), fip_(fe->fip()), fip_vmi_(fe->fip_vmi()), tcp_flags_(0), + delete_enqueued_(false) { flow_uuid_ = FlowTable::rand_gen_(); egress_uuid_ = FlowTable::rand_gen_(); FlowEntry *rflow = fe->reverse_flow_entry(); diff --git a/src/vnsw/agent/vrouter/flow_stats/flow_export_info.h b/src/vnsw/agent/vrouter/flow_stats/flow_export_info.h index 5ee1870f35c..12a74a14920 100644 --- a/src/vnsw/agent/vrouter/flow_stats/flow_export_info.h +++ b/src/vnsw/agent/vrouter/flow_stats/flow_export_info.h @@ -61,6 +61,8 @@ class FlowExportInfo { } bool IsEqual(const FlowExportInfo &rhs) const; void Copy(const FlowExportInfo &rhs); + void set_delete_enqueued(bool value) { delete_enqueued_ = value; } + bool delete_enqueued() const { return delete_enqueued_; } private: boost::uuids::uuid flow_uuid_; boost::uuids::uuid egress_uuid_; // used/applicable only for local flows @@ -91,6 +93,7 @@ class FlowExportInfo { boost::uuids::uuid interface_uuid_; std::string drop_reason_; uint16_t tcp_flags_; + bool delete_enqueued_; }; #endif // __AGENT_FLOW_EXPORT_INFO_H__ diff --git a/src/vnsw/agent/vrouter/flow_stats/flow_stats.sandesh b/src/vnsw/agent/vrouter/flow_stats/flow_stats.sandesh index 48283a93795..fe0a9c1c001 100644 --- a/src/vnsw/agent/vrouter/flow_stats/flow_stats.sandesh +++ b/src/vnsw/agent/vrouter/flow_stats/flow_stats.sandesh @@ -104,6 +104,7 @@ struct SandeshFlowExportInfo { 19: string fip_vmi; 20: string fip; 21: u16 underlay_source_port; + 22: bool delete_enqueued; } /** diff --git a/src/vnsw/agent/vrouter/flow_stats/flow_stats_collector.cc b/src/vnsw/agent/vrouter/flow_stats/flow_stats_collector.cc index 551c1df9b08..b338982d384 100644 --- a/src/vnsw/agent/vrouter/flow_stats/flow_stats_collector.cc +++ b/src/vnsw/agent/vrouter/flow_stats/flow_stats_collector.cc @@ -289,8 +289,10 @@ void FlowStatsCollector::UpdateStatsAndExportFlow(FlowExportInfo *info, ExportFlow(key, info, 0, 0); } -void FlowStatsCollector::FlowDeleteEnqueue(const FlowKey &key, bool rev) { +void FlowStatsCollector::FlowDeleteEnqueue(const FlowKey &key, bool rev, + FlowExportInfo *info) { agent_uve_->agent()->pkt()->get_flow_proto()->DeleteFlowRequest(key, rev); + info->set_delete_enqueued(true); } void FlowStatsCollector::UpdateFlowStatsInternal(FlowExportInfo *info, @@ -338,15 +340,12 @@ void FlowStatsCollector::UpdateAndExportInternal(FlowExportInfo *info, ExportFlow(key, info, diff_bytes, diff_pkts); } -// FIXME : Handle multiple tables bool FlowStatsCollector::Run() { FlowEntryTree::iterator it; FlowExportInfo *rev_info = NULL; FlowExportInfo *info = NULL; uint32_t count = 0; bool key_updation_reqd = true, deleted; - Agent *agent = agent_uve_->agent(); - FlowTable *flow_obj = agent->pkt()->flow_table(0); FlowKey key; run_counter_++; @@ -395,7 +394,7 @@ bool FlowStatsCollector::Run() { it++; } } - FlowDeleteEnqueue(key, rev_info != NULL? true : false); + FlowDeleteEnqueue(key, rev_info != NULL? true : false, info); if (rev_info) { count++; if (count == flow_count_per_pass_) { @@ -438,7 +437,7 @@ bool FlowStatsCollector::Run() { it++; } } - FlowDeleteEnqueue(key, true); + FlowDeleteEnqueue(key, true, info); if (rev_info) { count++; if (count == flow_count_per_pass_) { @@ -470,7 +469,7 @@ bool FlowStatsCollector::Run() { /* Update the flow_timer_interval and flow_count_per_pass_ based on * total flows that we have */ - uint32_t total_flows = flow_obj->Size(); + uint32_t total_flows = flow_tree_.size(); uint32_t flow_timer_interval; uint32_t age_time_millisec = flow_age_time_intvl() / 1000; @@ -1010,6 +1009,7 @@ static void FlowExportInfoToSandesh(const FlowExportInfo &value, Ip4Address ip(value.fip()); info.set_fip(ip.to_string()); info.set_underlay_source_port(value.underlay_source_port()); + info.set_delete_enqueued(value.delete_enqueued()); } void FlowStatsRecordsReq::HandleRequest() const { diff --git a/src/vnsw/agent/vrouter/flow_stats/flow_stats_collector.h b/src/vnsw/agent/vrouter/flow_stats/flow_stats_collector.h index 3d6430ab5e7..2ccae01a2aa 100644 --- a/src/vnsw/agent/vrouter/flow_stats/flow_stats_collector.h +++ b/src/vnsw/agent/vrouter/flow_stats/flow_stats_collector.h @@ -144,7 +144,7 @@ class FlowStatsCollector : public StatsCollector { bool teardown_time, uint64_t *diff_bytes, uint64_t *diff_pkts); - void FlowDeleteEnqueue(const FlowKey &key, bool rev); + void FlowDeleteEnqueue(const FlowKey &key, bool rev, FlowExportInfo *info); void EnqueueFlowMsg(); void DispatchPendingFlowMsg(); void GetFlowSandeshActionParams(const FlowAction &action_info,