Skip to content

Commit

Permalink
Support pagination of output for multicast manager introspect
Browse files Browse the repository at this point in the history
Limit the maximum number of entries displayed on a single page.
A next_batch link is generated if there are more entries to be
displayed.

Also limit maximum number of entries examined in one invocation of
the callback routine.  This comes into play when there is a search
string specified and many entries don't match it.  A partial page
is saved in user-allocated data and the next invocation of callback
appends to it.  This is repeated till there's a full page or there
are no more entries in the table.

Following changes are implemented:

- Move code from bgp_sandesh.cc to bgp_show_multicast_manager.cc
- Use class template BgpShowHandler to avoid code duplication
- Implement iteration limit to avoid hogging CPU from introspect
- Reuse type parameterized tests to also cover multicast manager
- Remove older unit test which was limited in scope
- Sprinkle const as required

Change-Id: I190cfbaaa58ac52ce63fb6bd916c040ab100e742
Closes-Bug: 1479428
  • Loading branch information
Nischal Sheth committed Aug 14, 2015
1 parent b7bbed1 commit fcc0f5c
Show file tree
Hide file tree
Showing 12 changed files with 262 additions and 240 deletions.
1 change: 1 addition & 0 deletions src/bgp/SConscript
Expand Up @@ -60,6 +60,7 @@ libbgp = env.Library('bgp',
'bgp_session.cc',
'bgp_show_config.cc',
'bgp_show_evpn_table.cc',
'bgp_show_multicast_manager.cc',
'bgp_show_neighbor.cc',
'bgp_show_route_summary.cc',
'bgp_show_routing_instance.cc',
Expand Down
14 changes: 14 additions & 0 deletions src/bgp/bgp_multicast.cc
Expand Up @@ -1013,3 +1013,17 @@ LifetimeActor *McastTreeManager::deleter() {
return deleter_.get();
}

//
// Return the LifetimeActor for the McastTreeManager.
// Const version.
//
const LifetimeActor *McastTreeManager::deleter() const {
return deleter_.get();
}

//
// Return true if the McastTreeManager is deleted.
//
bool McastTreeManager::deleted() const {
return deleter_->IsDeleted();
}
13 changes: 10 additions & 3 deletions src/bgp/bgp_multicast.h
Expand Up @@ -312,8 +312,8 @@ class McastManagerPartition {
BgpServer *server();
McastTreeManager *tree_manager() const { return tree_manager_; }

bool empty() { return sg_list_.empty(); }
size_t size() { return sg_list_.size(); }
bool empty() const { return sg_list_.empty(); }
size_t size() const { return sg_list_.size(); }

private:
friend class BgpMulticastTest;
Expand Down Expand Up @@ -370,6 +370,9 @@ class McastTreeManager {
public:
static const int kDegree = 4;

typedef std::vector<McastManagerPartition *> PartitionList;
typedef PartitionList::const_iterator const_iterator;

enum NodeLevel {
LevelFirst = 0,
LevelNative = 0,
Expand All @@ -380,6 +383,9 @@ class McastTreeManager {
explicit McastTreeManager(ErmVpnTable *table);
virtual ~McastTreeManager();

const_iterator begin() const { return partitions_.begin(); }
const_iterator end() const { return partitions_.end(); }

virtual void Initialize();
virtual void Terminate();

Expand All @@ -395,13 +401,14 @@ class McastTreeManager {
void RetryDelete();

LifetimeActor *deleter();
const LifetimeActor *deleter() const;
bool deleted() const;

private:
friend class BgpMulticastTest;
friend class ShowMulticastManagerDetailHandler;

class DeleteActor;
typedef std::vector<McastManagerPartition *> PartitionList;

void AllocPartitions();
void FreePartitions();
Expand Down
4 changes: 4 additions & 0 deletions src/bgp/bgp_peer.sandesh
Expand Up @@ -294,10 +294,14 @@ request sandesh ShowEvpnTableSummaryReq {
struct ShowMulticastManager {
1: string name (link="ShowMulticastManagerDetailReq");
2: u32 total_trees;
3: bool deleted;
4: string deleted_at;
}

response sandesh ShowMulticastManagerResp {
1: list<ShowMulticastManager> managers;
2: optional string next_batch (link="ShowMulticastManagerReqIterate",
link_title="next_batch");
}

request sandesh ShowMulticastManagerReq {
Expand Down
4 changes: 4 additions & 0 deletions src/bgp/bgp_peer_internal.sandesh
Expand Up @@ -58,6 +58,10 @@ request sandesh ShowEvpnTableSummaryReqIterate {
1: string iterate_info;
}

request sandesh ShowMulticastManagerReqIterate {
1: string iterate_info;
}

response sandesh ShowMulticastManagerDetailResp {
1: list<bgp_peer.ShowMulticastTree> trees;
}
Expand Down
143 changes: 1 addition & 142 deletions src/bgp/bgp_sandesh.cc
Expand Up @@ -763,147 +763,6 @@ void ClearBgpNeighborReq::HandleRequest() const {
RequestPipeline rp(ps);
}

class ShowMulticastManagerHandler {
public:
struct MulticastManagerDataKey {
string routing_table;
bool operator<(const MulticastManagerDataKey &rhs) const {
return (routing_table < rhs.routing_table);
}
};

struct MulticastManagerData : public RequestPipeline::InstData {
typedef map<MulticastManagerDataKey, uint32_t> Map;
Map table_map;
};

static RequestPipeline::InstData *CreateData(int stage) {
return (new MulticastManagerData);
}

static void FillMulticastManagerStats(MulticastManagerData *data,
ErmVpnTable *table, int inst_id) {
MulticastManagerDataKey key;
key.routing_table = table->name();
McastTreeManager *tm = table->GetTreeManager();
McastManagerPartition *partition = tm->GetPartition(inst_id);
data->table_map[key] = partition->size();
}

static bool CallbackS1(const Sandesh *sr,
const RequestPipeline::PipeSpec ps,
int stage, int instNum,
RequestPipeline::InstData *data) {
int inst_id = ps.stages_[stage].instances_[instNum];

MulticastManagerData *mydata =
static_cast<MulticastManagerData *>(data);
const ShowMulticastManagerReq *req =
static_cast<const ShowMulticastManagerReq *>(ps.snhRequest_.get());
const string &search_string = req->get_search_string();
BgpSandeshContext *bsc =
static_cast<BgpSandeshContext *>(req->client_context());
RoutingInstanceMgr *rim = bsc->bgp_server->routing_instance_mgr();
for (RoutingInstanceMgr::name_iterator it = rim->name_begin();
it != rim->name_end(); it++) {
RoutingInstance *ri = it->second;
if (ri->IsDefaultRoutingInstance())
continue;
ErmVpnTable *table =
static_cast<ErmVpnTable *>(ri->GetTable(Address::ERMVPN));
if (!table)
continue;
if (!search_string.empty() &&
table->name().find(search_string) == string::npos) {
continue;
}
FillMulticastManagerStats(mydata, table, inst_id);
}

return true;
}

static void FillMulticastManagerInfo(const RequestPipeline::StageData *sd,
vector<ShowMulticastManager> &mgr_list, ErmVpnTable *table) {
ShowMulticastManager mgr;
MulticastManagerDataKey key;
key.routing_table = table->name();
for (size_t idx = 0; idx < sd->size(); idx++) {
const MulticastManagerData &data =
static_cast<const MulticastManagerData &>(sd->at(idx));
MulticastManagerData::Map::const_iterator dit =
data.table_map.find(key);
if (dit != data.table_map.end()) {
mgr.total_trees += dit->second;
}
}

mgr.set_name(table->name());
mgr_list.push_back(mgr);
}

static bool CallbackS2(const Sandesh *sr,
const RequestPipeline::PipeSpec ps,
int stage, int instNum,
RequestPipeline::InstData *data) {
const ShowMulticastManagerReq *req =
static_cast<const ShowMulticastManagerReq *>(ps.snhRequest_.get());
const string &search_string = req->get_search_string();
BgpSandeshContext *bsc =
static_cast<BgpSandeshContext *>(req->client_context());
RoutingInstanceMgr *rim = bsc->bgp_server->routing_instance_mgr();
vector<ShowMulticastManager> mgr_list;
const RequestPipeline::StageData *sd = ps.GetStageData(0);
for (RoutingInstanceMgr::name_iterator it = rim->name_begin();
it != rim->name_end(); it++) {
RoutingInstance *ri = it->second;
if (ri->IsDefaultRoutingInstance())
continue;
ErmVpnTable *table =
static_cast<ErmVpnTable *>(ri->GetTable(Address::ERMVPN));
if (!table)
continue;
if (!search_string.empty() &&
table->name().find(search_string) == string::npos) {
continue;
}
FillMulticastManagerInfo(sd, mgr_list, table);
}


ShowMulticastManagerResp *resp = new ShowMulticastManagerResp;
resp->set_managers(mgr_list);
resp->set_context(req->context());
resp->Response();
return true;
}
};

void ShowMulticastManagerReq::HandleRequest() const {
RequestPipeline::PipeSpec ps(this);
BgpSandeshContext *bsc = static_cast<BgpSandeshContext *>(client_context());

// Request pipeline has 2 stages.
// First stage to collect multicast manager stats.
// Second stage to fill stats from stage 1 and respond to the request.
RequestPipeline::StageSpec s1, s2;
TaskScheduler *scheduler = TaskScheduler::GetInstance();

s1.taskId_ = scheduler->GetTaskId("db::DBTable");
s1.allocFn_ = ShowMulticastManagerHandler::CreateData;
s1.cbFn_ = ShowMulticastManagerHandler::CallbackS1;
for (int i = 0; i < bsc->bgp_server->database()->PartitionCount(); i++) {
s1.instances_.push_back(i);
}

s2.taskId_ = scheduler->GetTaskId("bgp::ShowCommand");
s2.cbFn_ = ShowMulticastManagerHandler::CallbackS2;
s2.instances_.push_back(0);

ps.stages_ = list_of(s1)(s2);
RequestPipeline rp(ps);
}

class ShowMulticastManagerDetailHandler {
public:
struct MulticastManagerDetailData : public RequestPipeline::InstData {
Expand Down Expand Up @@ -973,7 +832,7 @@ class ShowMulticastManagerDetailHandler {
static_cast<BgpSandeshContext *>(req->client_context());
DBTableBase *table = bsc->bgp_server->database()->FindTable(req->get_name());
ErmVpnTable *mcast_table = dynamic_cast<ErmVpnTable *>(table);
if (mcast_table)
if (mcast_table && !mcast_table->IsVpnTable())
FillMulticastPartitionInfo(mydata, mcast_table, inst_id);

return true;
Expand Down

0 comments on commit fcc0f5c

Please sign in to comment.