From 08a04e6d9cda5f22825556b43ddba18c57bd0b72 Mon Sep 17 00:00:00 2001 From: Nischal Sheth Date: Thu, 13 Oct 2016 10:14:19 -0700 Subject: [PATCH] Make BgpSenderPartition::UpdatePeerQueue more efficient The old implementation used to go through all peers for a RibOut and build a bitset of send ready peers to pass to PeerDequeue. The bitset is used to decide which peers to merge with the start marker when we encounter other markers while doing peer dequeue. The new implementation determines which peers are send ready more dynamically i.e. as we encounter other markers during peer dequeue. This avoids unnecessary work in cases where we do not encounter many markers. This could be because some markers are behind the start marker for the peer or because the all peers in the start marker get blocked or because some peers are in the tail marker. Further, the new implementation determines the send ready state directly from the RibOut instead of the BgpUpdateSender since the RibOut state is more authoritative. Also treat send_ready state in PeerState as advisory and send_ready in the IPeerUpdate as authoritative. This if justified because the IPeerUpdate could become blocked while processing updates from some other BgpSenderPartition. Also fix a few update related UTs appropriately. Change-Id: Ib40d0a0d71c48f56e1af30962cc6f2459be4140a Partial-Bug: 1607132 --- src/bgp/bgp_peer.cc | 2 + src/bgp/bgp_peer.h | 1 + src/bgp/bgp_peer.sandesh | 1 + src/bgp/bgp_ribout.cc | 15 ++++++ src/bgp/bgp_ribout.h | 2 + src/bgp/bgp_ribout_updates.cc | 31 ++++++----- src/bgp/bgp_ribout_updates.h | 2 +- src/bgp/bgp_update_sender.cc | 74 ++++++++++++------------- src/bgp/bgp_update_sender.h | 1 - src/bgp/bgp_xmpp_channel.cc | 3 +- src/bgp/bgp_xmpp_sandesh.cc | 1 + src/bgp/ipeer.h | 2 + src/bgp/test/bgp_ribout_updates_test.h | 4 ++ src/bgp/test/bgp_update_sender_test.cc | 75 +++++++++++++------------- src/bgp/test/bgp_update_test.cc | 11 ++-- 15 files changed, 128 insertions(+), 97 deletions(-) diff --git a/src/bgp/bgp_peer.cc b/src/bgp/bgp_peer.cc index eb6d39ca163..6b55b0d3b12 100644 --- a/src/bgp/bgp_peer.cc +++ b/src/bgp/bgp_peer.cc @@ -913,6 +913,7 @@ void BgpPeer::CustomClose() { // Close this peer by closing all of it's RIBs. // void BgpPeer::Close(bool non_graceful) { + send_ready_ = true; if (membership_req_pending_ && !close_manager_->IsMembershipInUse()) { BGP_LOG_PEER(Event, this, SandeshLevel::SYS_INFO, BGP_LOG_FLAG_ALL, BGP_PEER_DIR_NA, "Close procedure deferred"); @@ -2197,6 +2198,7 @@ void BgpPeer::FillNeighborInfo(const BgpSandeshContext *bsc, bnr->set_negotiated_hold_time(state_machine_->hold_time()); bnr->set_primary_path_count(GetPrimaryPathCount()); bnr->set_task_instance(GetTaskInstance()); + bnr->set_send_ready(send_ready_); bnr->set_flap_count(peer_stats_->num_flaps()); bnr->set_flap_time(peer_stats_->last_flap()); bnr->set_auth_type( diff --git a/src/bgp/bgp_peer.h b/src/bgp/bgp_peer.h index 60e7dfc18d0..ff0daca33f9 100644 --- a/src/bgp/bgp_peer.h +++ b/src/bgp/bgp_peer.h @@ -215,6 +215,7 @@ class BgpPeer : public IPeer { virtual bool CanUseMembershipManager() const; virtual bool IsRegistrationRequired() const { return true; } virtual uint64_t GetEorSendTimerElapsedTimeUsecs() const; + virtual bool send_ready() const { return send_ready_; } void Close(bool non_graceful); void Clear(int subcode); diff --git a/src/bgp/bgp_peer.sandesh b/src/bgp/bgp_peer.sandesh index f880eb77f03..41cc5836981 100644 --- a/src/bgp/bgp_peer.sandesh +++ b/src/bgp/bgp_peer.sandesh @@ -78,6 +78,7 @@ struct BgpNeighborResp { 48: bool passive; 55: bool as_override; 61: string private_as_action; + 62: bool send_ready; 50: u32 peer_port; 51: string transport_address; 4: string local_address; // local ip address and port diff --git a/src/bgp/bgp_ribout.cc b/src/bgp/bgp_ribout.cc index b7eac5c8ffc..41bbc3e07b8 100644 --- a/src/bgp/bgp_ribout.cc +++ b/src/bgp/bgp_ribout.cc @@ -20,6 +20,7 @@ #include "bgp/bgp_table.h" #include "bgp/bgp_update.h" #include "bgp/bgp_update_sender.h" +#include "bgp/ipeer.h" #include "bgp/routing-instance/routing_instance.h" #include "db/db.h" @@ -385,6 +386,20 @@ bool RibOut::IsActive(IPeerUpdate *peer) const { return (index < 0 ? false : active_peerset_.test(index)); } +// +// Build the subset of given RibPeerSet in this RibOut that are send ready. +// +void RibOut::BuildSendReadyBitSet(const RibPeerSet &peerset, + RibPeerSet *mready) const { + for (size_t bit = peerset.find_first(); bit != RibPeerSet::npos; + bit = peerset.find_next(bit)) { + IPeerUpdate *peer = GetPeer(bit); + if (peer->send_ready()) { + mready->set(bit); + } + } +} + // // Return the number of peers this route has been advertised to. // diff --git a/src/bgp/bgp_ribout.h b/src/bgp/bgp_ribout.h index 4c6a86ffef8..5ef86cb5ae4 100644 --- a/src/bgp/bgp_ribout.h +++ b/src/bgp/bgp_ribout.h @@ -262,6 +262,8 @@ class RibOut { bool IsRegistered(IPeerUpdate *peer); void Deactivate(IPeerUpdate *peer); bool IsActive(IPeerUpdate *peer) const; + void BuildSendReadyBitSet(const RibPeerSet &peerset, + RibPeerSet *mready) const; IPeerUpdate *GetPeer(int index) const; int GetPeerIndex(IPeerUpdate *peer) const; diff --git a/src/bgp/bgp_ribout_updates.cc b/src/bgp/bgp_ribout_updates.cc index 97a7ce2c084..92647741fe3 100644 --- a/src/bgp/bgp_ribout_updates.cc +++ b/src/bgp/bgp_ribout_updates.cc @@ -296,13 +296,13 @@ bool RibOutUpdates::TailDequeue(int queue_id, const RibPeerSet &msync, // blocked parameter is populated with the set of peers that are send blocked. // bool RibOutUpdates::PeerDequeue(int queue_id, IPeerUpdate *peer, - const RibPeerSet &mready, RibPeerSet *blocked) { CHECK_CONCURRENCY("bgp::SendUpdate"); stats_[queue_id].peer_dequeue_count_++; UpdateQueue *queue = queue_vec_[queue_id]; - UpdateMarker *start_marker = queue->GetMarker(ribout_->GetPeerIndex(peer)); + int peer_idx = ribout_->GetPeerIndex(peer); + UpdateMarker *start_marker = queue->GetMarker(peer_idx); // We're done if this is the same as the tail marker. Updates will be // built subsequently via TailDequeue. @@ -311,16 +311,14 @@ bool RibOutUpdates::PeerDequeue(int queue_id, IPeerUpdate *peer, return true; } - // Get the encapsulator for the first RouteUpdate. Even if there's no - // RouteUpdate, we should find another marker or the tail marker. - UpdateEntry *upentry; - RouteUpdatePtr update = - monitor_->GetNextEntry(queue_id, start_marker, &upentry); - assert(upentry); - - // At least one peer in the start marker i.e. the peer for which we are - // called must be send ready. - assert(start_marker->members.intersects(mready)); + // We're done if the lead peer is not send ready. This can happen if + // the peer got blocked when processing updates in another partition. + RibPeerSet mready; + ribout_->BuildSendReadyBitSet(start_marker->members, &mready); + if (!mready.test(peer_idx)) { + blocked->set(peer_idx); + return false; + } // Split out any peers from the marker that are not send ready. Note that // this updates the RibPeerSet in the marker. @@ -331,6 +329,13 @@ bool RibOutUpdates::PeerDequeue(int queue_id, IPeerUpdate *peer, queue->MarkerSplit(start_marker, notready); } + // Get the encapsulator for the first RouteUpdate. Even if there's no + // RouteUpdate, we should find another marker or the tail marker. + UpdateEntry *upentry; + RouteUpdatePtr update = + monitor_->GetNextEntry(queue_id, start_marker, &upentry); + assert(upentry); + // Update loop. Keep going till we reach the tail marker or till all the // peers get blocked. RibPeerSet members = start_marker->members; @@ -370,7 +375,7 @@ bool RibOutUpdates::PeerDequeue(int queue_id, IPeerUpdate *peer, // with the marker that is being processed for dequeue. Note // that this updates the RibPeerSet in the marker. RibPeerSet mmove; - mmove.BuildIntersection(marker->members, mready); + ribout_->BuildSendReadyBitSet(marker->members, &mmove); if (!mmove.empty()) { stats_[queue_id].marker_merge_count_++; queue->MarkerMerge(start_marker, marker, mmove); diff --git a/src/bgp/bgp_ribout_updates.h b/src/bgp/bgp_ribout_updates.h index 39a30d636d9..f26d6424109 100644 --- a/src/bgp/bgp_ribout_updates.h +++ b/src/bgp/bgp_ribout_updates.h @@ -72,7 +72,7 @@ class RibOutUpdates { virtual bool TailDequeue(int queue_id, const RibPeerSet &msync, RibPeerSet *blocked, RibPeerSet *unsync); virtual bool PeerDequeue(int queue_id, IPeerUpdate *peer, - const RibPeerSet &mready, RibPeerSet *blocked); + RibPeerSet *blocked); // Enqueue a marker at the head of the queue with this bit set. bool QueueJoin(int queue_id, int bit); diff --git a/src/bgp/bgp_update_sender.cc b/src/bgp/bgp_update_sender.cc index 3029c80c983..27c91d9bac0 100644 --- a/src/bgp/bgp_update_sender.cc +++ b/src/bgp/bgp_update_sender.cc @@ -46,11 +46,17 @@ struct BgpSenderPartition::PeerRibState { // to implement regular and circular iterator classes that are used to walk // through all the RibState entries for a peer. // -// A PeerState maintains the in_sync and send_ready state for the IPeer. An -// IPeer/PeerState is considered to be send_ready when the underlying socket -// is writable. It is considered to be in_sync if it's send_ready and the -// marker for the IPeer has merged with the tail marker for all QueueIds in -// all RiBOuts that the IPeer is subscribed. +// A PeerState maintains the in_sync and send_ready state for the IPeerUpdate. +// +// The PeerState is considered to be send_ready when the underlying socket is +// is writable. Note that the send_ready state in the PeerState may be out of +// date with the actual socket state because the socket could have got blocked +// when writing from another partition. Hence IPeerUpdate::send_ready() is the +// more authoritative source. +// +// The PeerState is considered to be in_sync if it's send_ready and the marker +// IPeerUpdate the peer has merged with the tail marker for all QueueIds in +// all RiBOuts for which the IPeerUpdate is subscribed. // // The PeerState keeps count of the number of active RibOuts for each QueueId. // A (RibOut, QueueId) pair is considered to be active if the PeerState isn't @@ -645,31 +651,20 @@ void BgpSenderPartition::BuildSyncBitSet(const RibOut *ribout, RibState *rs, for (RibState::iterator it = rs->begin(peer_state_imap_); it != rs->end(peer_state_imap_); ++it) { - const PeerState *ps = it.operator->(); - if (ps->in_sync()) { - int rix = ribout->GetPeerIndex(ps->peer()); - msync->set(rix); - } - } -} + PeerState *ps = it.operator->(); -// -// Build the RibPeerSet of IPeers for the RibOut that are send ready. Note -// that we need to use bit indices that are specific to the RibOut, not the -// ones from the BgpSenderPartition. -// -void BgpSenderPartition::BuildSendReadyBitSet(RibOut *ribout, - RibPeerSet *mready) { - CHECK_CONCURRENCY("bgp::SendUpdate"); - - RibState *rs = rib_state_imap_.Find(ribout); - assert(rs != NULL); - for (RibState::iterator it = rs->begin(peer_state_imap_); - it != rs->end(peer_state_imap_); ++it) { - const PeerState *ps = it.operator->(); - if (ps->send_ready()) { - int rix = ribout->GetPeerIndex(ps->peer()); - mready->set(rix); + // If the PeerState is in sync but the IPeerUpdate is not send ready + // then update the sync and send ready state in the PeerState. Note + // that the RibOut queue for the PeerState will get marked active via + // the call the SetQueueActive from UpdateRibOut. + if (ps->in_sync()) { + if (ps->peer()->send_ready()) { + int rix = ribout->GetPeerIndex(ps->peer()); + msync->set(rix); + } else { + ps->clear_sync(); + ps->set_send_ready(false); + } } } } @@ -830,19 +825,11 @@ bool BgpSenderPartition::UpdatePeerQueue(IPeerUpdate *peer, PeerState *ps, if (!BitIsSet(it.peer_rib_state().qactive, queue_id)) continue; - RibOut *ribout = it.operator->(); - - // Build the send ready bitset. This includes all send ready peers - // for the ribout so that we can potentially merge other peers as - // we move forward in processing the update queue. - RibPeerSet send_ready; - BuildSendReadyBitSet(ribout, &send_ready); - // Drain the queue till we can do no more. + RibOut *ribout = it.operator->(); RibOutUpdates *updates = ribout->updates(index_); RibPeerSet blocked; - bool done = updates->PeerDequeue(queue_id, peer, send_ready, &blocked); - assert(send_ready.Contains(blocked)); + bool done = updates->PeerDequeue(queue_id, peer, &blocked); // Process blocked mask. RibState *rs = it.rib_state(); @@ -875,11 +862,20 @@ bool BgpSenderPartition::UpdatePeerQueue(IPeerUpdate *peer, PeerState *ps, void BgpSenderPartition::UpdatePeer(IPeerUpdate *peer) { CHECK_CONCURRENCY("bgp::SendUpdate"); + // Bail if the PeerState is not send ready. PeerState *ps = peer_state_imap_.Find(peer); if (!ps->send_ready()) { return; } + // Update the PeerState and bail if the IPeerUpdate is not send ready. + // This happens if the IPeerUpdate gets blocked while processing some + // other partition. + if (!peer->send_ready()) { + ps->set_send_ready(false); + return; + } + // Go through all queues and drain them if there's anything on them. for (int queue_id = RibOutUpdates::QCOUNT - 1; queue_id >= 0; --queue_id) { if (ps->QueueCount(queue_id) == 0) { diff --git a/src/bgp/bgp_update_sender.h b/src/bgp/bgp_update_sender.h index 29fa0bd9c01..5d47888e9d6 100644 --- a/src/bgp/bgp_update_sender.h +++ b/src/bgp/bgp_update_sender.h @@ -122,7 +122,6 @@ class BgpSenderPartition { bool UpdatePeerQueue(IPeerUpdate *peer, PeerState *ps, int queue_id); void BuildSyncBitSet(const RibOut *ribout, RibState *rs, RibPeerSet *msync); - void BuildSendReadyBitSet(RibOut *ribout, RibPeerSet *mready); void SetQueueActive(const RibOut *ribout, RibState *rs, int queue_id, const RibPeerSet &munsync); diff --git a/src/bgp/bgp_xmpp_channel.cc b/src/bgp/bgp_xmpp_channel.cc index 3b98b157984..f34d8b3c395 100644 --- a/src/bgp/bgp_xmpp_channel.cc +++ b/src/bgp/bgp_xmpp_channel.cc @@ -364,7 +364,7 @@ class BgpXmppChannel::XmppPeer : public IPeer { parent_->MembershipRequestCallback(table); } - bool send_ready() const { return send_ready_; } + virtual bool send_ready() const { return send_ready_; } private: void WriteReadyCb(const boost::system::error_code &ec) { @@ -439,6 +439,7 @@ bool BgpXmppChannel::XmppPeer::SendUpdate(const uint8_t *msg, size_t msgsize, } void BgpXmppChannel::XmppPeer::Close(bool non_graceful) { + send_ready_ = true; parent_->set_peer_closed(true); if (server_ == NULL) return; diff --git a/src/bgp/bgp_xmpp_sandesh.cc b/src/bgp/bgp_xmpp_sandesh.cc index e2f776abac8..288c0f837f4 100644 --- a/src/bgp/bgp_xmpp_sandesh.cc +++ b/src/bgp/bgp_xmpp_sandesh.cc @@ -49,6 +49,7 @@ static void FillXmppNeighborInfo(BgpNeighborResp *bnr, bnr->set_primary_path_count(bx_channel->Peer()->GetPrimaryPathCount()); bnr->set_task_instance(connection->GetTaskInstance()); bnr->set_auth_type(connection->GetXmppAuthenticationType()); + bnr->set_send_ready(bx_channel->Peer()->send_ready()); bnr->set_flap_count(bx_channel->Peer()->peer_stats()->num_flaps()); bnr->set_flap_time(bx_channel->Peer()->peer_stats()->last_flap()); if (summary) diff --git a/src/bgp/ipeer.h b/src/bgp/ipeer.h index 3b34931fba9..fe8114b5137 100644 --- a/src/bgp/ipeer.h +++ b/src/bgp/ipeer.h @@ -20,6 +20,8 @@ class PeerCloseManager; class IPeerUpdate { public: virtual ~IPeerUpdate() { } + virtual bool send_ready() const { return true; } + // Printable name virtual const std::string &ToString() const = 0; diff --git a/src/bgp/test/bgp_ribout_updates_test.h b/src/bgp/test/bgp_ribout_updates_test.h index 68332d111fa..8183c868b09 100644 --- a/src/bgp/test/bgp_ribout_updates_test.h +++ b/src/bgp/test/bgp_ribout_updates_test.h @@ -75,6 +75,8 @@ class BgpTestPeer : public IPeerUpdate { int update_count() const { return count_; } void clear_update_count() { count_ = 0; } bool send_block() const { return send_block_; } + void clear_send_block() { send_block_ = false; } + virtual bool send_ready() const { return !send_block_; } private: int index_; @@ -355,6 +357,7 @@ class RibOutUpdatesTest : public ::testing::Test { void SetPeerUnblockNow(int idx) { ConcurrencyScope scope("bgp::SendReadyTask"); ASSERT_TRUE(idx < (int) peers_.size()); + peers_[idx]->clear_send_block(); spartition_->PeerSendReady(peers_[idx]); } @@ -363,6 +366,7 @@ class RibOutUpdatesTest : public ::testing::Test { ASSERT_TRUE(start_idx <= end_idx); ASSERT_TRUE(end_idx < (int) peers_.size()); for (int idx = start_idx; idx <= end_idx; idx++) { + peers_[idx]->clear_send_block(); spartition_->PeerSendReady(peers_[idx]); } } diff --git a/src/bgp/test/bgp_update_sender_test.cc b/src/bgp/test/bgp_update_sender_test.cc index 9664941d328..cdf6ecca0fd 100644 --- a/src/bgp/test/bgp_update_sender_test.cc +++ b/src/bgp/test/bgp_update_sender_test.cc @@ -39,8 +39,7 @@ class RibOutUpdatesMock : public RibOutUpdates { MOCK_METHOD4(TailDequeue, bool(int, const RibPeerSet &, RibPeerSet *, RibPeerSet *)); - MOCK_METHOD4(PeerDequeue, bool(int, IPeerUpdate *, - const RibPeerSet &, RibPeerSet *)); + MOCK_METHOD3(PeerDequeue, bool(int, IPeerUpdate *, RibPeerSet *)); }; static const int kPeerCount = 4; @@ -95,7 +94,7 @@ class BgpUpdateSenderTest : public ::testing::Test { SchedulerStart(); EXPECT_CALL(*updates_[0], TailDequeue(_, _, _, _)).Times(0); - EXPECT_CALL(*updates_[0], PeerDequeue(_, _, _, _)).Times(0); + EXPECT_CALL(*updates_[0], PeerDequeue(_, _, _)).Times(0); } virtual void TearDown() { @@ -563,7 +562,7 @@ TEST_F(BgpUpdateSenderTest, PeerDequeueBasic1a) { // the peers in sync. for (int idx = 0; idx < kPeerCount; idx++) { EXPECT_CALL(*updates_[0], - PeerDequeue(RibOutUpdates::QUPDATE, peers_[idx], peerset, + PeerDequeue(RibOutUpdates::QUPDATE, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(1) .WillOnce(Return(true)); @@ -617,7 +616,7 @@ TEST_F(BgpUpdateSenderTest, PeerDequeueBasic1b) { if (idx % 2 == 0) continue; EXPECT_CALL(*updates_[0], - PeerDequeue(RibOutUpdates::QUPDATE, peers_[idx], odd_peerset, + PeerDequeue(RibOutUpdates::QUPDATE, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(1) .WillOnce(Return(true)); @@ -668,7 +667,7 @@ TEST_F(BgpUpdateSenderTest, PeerDequeueBasic1c) { RibPeerSet peer0; BuildPeerSet(peer0, 0, 0, 0); EXPECT_CALL(*updates_[0], - PeerDequeue(RibOutUpdates::QUPDATE, peers_[0], peer0, + PeerDequeue(RibOutUpdates::QUPDATE, peers_[0], Property(&RibPeerSet::empty, true))) .Times(1) .WillOnce(Return(true)); @@ -719,10 +718,10 @@ TEST_F(BgpUpdateSenderTest, PeerDequeueBasic1d) { RibPeerSet peerbit; BuildPeerSet(peerbit, 0, idx); EXPECT_CALL(*updates_[0], - PeerDequeue(RibOutUpdates::QUPDATE, peers_[idx], _, + PeerDequeue(RibOutUpdates::QUPDATE, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(peerbit), Return(false))); + .WillOnce(DoAll(SetArgPointee<2>(peerbit), Return(false))); } // Expect no calls to TailDequeue since none of the peers are in sync. @@ -778,7 +777,7 @@ TEST_F(BgpUpdateSenderTest, PeerDequeueBasic2a) { for (int qid = RibOutUpdates::QFIRST; qid < RibOutUpdates::QCOUNT; qid++) { for (int idx = 0; idx < kPeerCount; idx++) { EXPECT_CALL(*updates_[0], - PeerDequeue(qid, peers_[idx], peerset, + PeerDequeue(qid, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(1) .WillOnce(Return(true)); @@ -850,7 +849,7 @@ TEST_F(BgpUpdateSenderTest, PeerDequeueBasic2b) { if (idx % 2 == 1) continue; EXPECT_CALL(*updates_[0], - PeerDequeue(qid, peers_[idx], even_peerset, + PeerDequeue(qid, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(1) .WillOnce(Return(true)); @@ -922,15 +921,15 @@ TEST_F(BgpUpdateSenderTest, PeerDequeueBasic2d) { RibPeerSet peerbit; BuildPeerSet(peerbit, 0, idx); EXPECT_CALL(*updates_[0], - PeerDequeue(RibOutUpdates::QUPDATE, peers_[idx], _, + PeerDequeue(RibOutUpdates::QUPDATE, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(1) .WillOnce(Return(true)); EXPECT_CALL(*updates_[0], - PeerDequeue(RibOutUpdates::QBULK, peers_[idx], _, + PeerDequeue(RibOutUpdates::QBULK, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(peerbit), Return(false))); + .WillOnce(DoAll(SetArgPointee<2>(peerbit), Return(false))); } // Expect no calls to TailDequeue since none of the peers are in sync. @@ -983,7 +982,7 @@ TEST_F(BgpUpdateSenderTest, PeerDequeueBasic3a) { if (idx % 2 == 0) continue; EXPECT_CALL(*updates_[0], - PeerDequeue(RibOutUpdates::QUPDATE, peers_[idx], peerset, + PeerDequeue(RibOutUpdates::QUPDATE, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(1) .WillOnce(Return(true)); @@ -1046,7 +1045,7 @@ TEST_F(BgpUpdateSenderTest, PeerDequeueBasic3b) { if (idx % 2 == 1) continue; EXPECT_CALL(*updates_[0], - PeerDequeue(qid, peers_[idx], peerset, + PeerDequeue(qid, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(1) .WillOnce(Return(true)); @@ -1104,14 +1103,14 @@ TEST_F(BgpUpdateSenderTest, PeerDequeueAlreadyBlocked) { // Expect PeerDequeue to be called for peer 0. Block both peers and // return false. EXPECT_CALL(*updates_[0], - PeerDequeue(RibOutUpdates::QUPDATE, peers_[0], peerset, + PeerDequeue(RibOutUpdates::QUPDATE, peers_[0], Property(&RibPeerSet::empty, true))) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(peer01), Return(false))); + .WillOnce(DoAll(SetArgPointee<2>(peer01), Return(false))); // Expect PeerDequeue to be not called for peer 1. EXPECT_CALL(*updates_[0], - PeerDequeue(RibOutUpdates::QUPDATE, peers_[1], _, + PeerDequeue(RibOutUpdates::QUPDATE, peers_[1], Property(&RibPeerSet::empty, true))) .Times(0); @@ -1176,14 +1175,14 @@ TEST_F(BgpUpdateSenderPeerDequeueBlocks1, BlockFirstQid) { // Expect PeerDequeue to be called for peer 0 for QUPDATE. Block the peer // and return false. EXPECT_CALL(*updates_[0], - PeerDequeue(RibOutUpdates::QUPDATE, peers_[0], peerset, + PeerDequeue(RibOutUpdates::QUPDATE, peers_[0], Property(&RibPeerSet::empty, true))) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(peer0), Return(false))); + .WillOnce(DoAll(SetArgPointee<2>(peer0), Return(false))); // Expect PeerDequeue to be not called for QBULK. EXPECT_CALL(*updates_[0], - PeerDequeue(RibOutUpdates::QBULK, peers_[0], _, + PeerDequeue(RibOutUpdates::QBULK, peers_[0], Property(&RibPeerSet::empty, true))) .Times(0); @@ -1209,15 +1208,15 @@ TEST_F(BgpUpdateSenderPeerDequeueBlocks1, BlockLastQid1) { // Expect PeerDequeue to be called for peer 0 for QUPDATE and QBULK. Block // the peer and return false when processing QBULK. EXPECT_CALL(*updates_[0], - PeerDequeue(RibOutUpdates::QUPDATE, peers_[0], peerset, + PeerDequeue(RibOutUpdates::QUPDATE, peers_[0], Property(&RibPeerSet::empty, true))) .Times(1) .WillOnce(Return(true)); EXPECT_CALL(*updates_[0], - PeerDequeue(RibOutUpdates::QBULK, peers_[0], peerset, + PeerDequeue(RibOutUpdates::QBULK, peers_[0], Property(&RibPeerSet::empty, true))) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(peer0), Return(false))); + .WillOnce(DoAll(SetArgPointee<2>(peer0), Return(false))); // Unblock peer 0. SchedulerStop(); @@ -1244,15 +1243,15 @@ TEST_F(BgpUpdateSenderPeerDequeueBlocks1, BlockLastQid2) { // Expect PeerDequeue to be called for peer 0 for QUPDATE and QBULK. Block // the peer and return true when processing QBULK. EXPECT_CALL(*updates_[0], - PeerDequeue(RibOutUpdates::QUPDATE, peers_[0], peerset, + PeerDequeue(RibOutUpdates::QUPDATE, peers_[0], Property(&RibPeerSet::empty, true))) .Times(1) .WillOnce(Return(true)); EXPECT_CALL(*updates_[0], - PeerDequeue(RibOutUpdates::QBULK, peers_[0], peerset, + PeerDequeue(RibOutUpdates::QBULK, peers_[0], Property(&RibPeerSet::empty, true))) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(peer0), Return(true))); + .WillOnce(DoAll(SetArgPointee<2>(peer0), Return(true))); // Unblock peer 0. SchedulerStop(); @@ -1301,7 +1300,7 @@ class BgpUpdateSenderMultiRibOutTest : public BgpUpdateSenderTest { for (int ro_idx = 0; ro_idx < kRiboutCount; ro_idx++) { Mock::VerifyAndClearExpectations(updates_[ro_idx]); EXPECT_CALL(*updates_[ro_idx], TailDequeue(_, _, _, _)).Times(0); - EXPECT_CALL(*updates_[ro_idx], PeerDequeue(_, _, _, _)).Times(0); + EXPECT_CALL(*updates_[ro_idx], PeerDequeue(_, _, _)).Times(0); } } }; @@ -1650,7 +1649,7 @@ TEST_F(BgpUpdateSenderMultiRibOutTest, PeerDequeueBasic1a) { RibPeerSet peerset; BuildPeerSet(peerset, ro_idx, 0, kPeerCount-1); EXPECT_CALL(*updates_[ro_idx], - PeerDequeue(qid, peers_[idx], peerset, + PeerDequeue(qid, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(1) .WillOnce(Return(true)); @@ -1734,7 +1733,7 @@ TEST_F(BgpUpdateSenderMultiRibOutTest, PeerDequeueBasic1b) { RibPeerSet peerset; BuildPeerSet(peerset, ro_idx, 0, kPeerCount-1); EXPECT_CALL(*updates_[ro_idx], - PeerDequeue(qid, peers_[idx], peerset, + PeerDequeue(qid, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(1) .WillOnce(Return(true)); @@ -1818,10 +1817,10 @@ TEST_F(BgpUpdateSenderMultiRibOutTest, PeerDequeueBlocks1a) { BuildPeerSet(peerbit, 0, idx); BuildPeerSet(peerset, 0, idx, kPeerCount-1); EXPECT_CALL(*updates_[0], - PeerDequeue(qid, peers_[idx], peerset, + PeerDequeue(qid, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(peerbit), Return(false))); + .WillOnce(DoAll(SetArgPointee<2>(peerbit), Return(false))); } // Expect PeerDequeue to be not called for any peer for the rest of the @@ -1829,7 +1828,7 @@ TEST_F(BgpUpdateSenderMultiRibOutTest, PeerDequeueBlocks1a) { for (int idx = 0; idx < kPeerCount; idx++) { for (int ro_idx = 1; ro_idx < kRiboutCount; ro_idx++) { EXPECT_CALL(*updates_[0], - PeerDequeue(qid, peers_[idx], _, + PeerDequeue(qid, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(0); } @@ -1851,7 +1850,7 @@ TEST_F(BgpUpdateSenderMultiRibOutTest, PeerDequeueBlocks1a) { RibPeerSet peerset; BuildPeerSet(peerset, ro_idx, 0, kPeerCount-1); EXPECT_CALL(*updates_[ro_idx], - PeerDequeue(qid, peers_[idx], peerset, + PeerDequeue(qid, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(1) .WillOnce(Return(true)); @@ -1935,7 +1934,7 @@ TEST_F(BgpUpdateSenderMultiRibOutTest, PeerDequeueBlocks1b) { BuildPeerSet(peerset, ro_idx, idx, kPeerCount-1); if (ro_idx != kRiboutCount - 1) { EXPECT_CALL(*updates_[ro_idx], - PeerDequeue(qid, peers_[idx], peerset, + PeerDequeue(qid, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(1) .WillOnce(Return(true)); @@ -1943,10 +1942,10 @@ TEST_F(BgpUpdateSenderMultiRibOutTest, PeerDequeueBlocks1b) { RibPeerSet peerbit; BuildPeerSet(peerbit, ro_idx, idx); EXPECT_CALL(*updates_[ro_idx], - PeerDequeue(qid, peers_[idx], peerset, + PeerDequeue(qid, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(1) - .WillOnce(DoAll(SetArgPointee<3>(peerbit), + .WillOnce(DoAll(SetArgPointee<2>(peerbit), Return(false))); } } @@ -1967,7 +1966,7 @@ TEST_F(BgpUpdateSenderMultiRibOutTest, PeerDequeueBlocks1b) { RibPeerSet peerset; BuildPeerSet(peerset, kRiboutCount-1, 0, kPeerCount-1); EXPECT_CALL(*updates_[kRiboutCount-1], - PeerDequeue(qid, peers_[idx], peerset, + PeerDequeue(qid, peers_[idx], Property(&RibPeerSet::empty, true))) .Times(1) .WillOnce(Return(true)); diff --git a/src/bgp/test/bgp_update_test.cc b/src/bgp/test/bgp_update_test.cc index be86d31ded0..4d689b51f4a 100644 --- a/src/bgp/test/bgp_update_test.cc +++ b/src/bgp/test/bgp_update_test.cc @@ -51,7 +51,7 @@ static int gbl_index; class BgpTestPeer : public IPeerUpdate { public: - BgpTestPeer() : index_(gbl_index++), count_(0) { + BgpTestPeer() : index_(gbl_index++), count_(0), send_block_(false) { std::ostringstream repr; repr << "Peer" << index_; to_str_ = repr.str(); @@ -63,14 +63,15 @@ class BgpTestPeer : public IPeerUpdate { virtual bool SendUpdate(const uint8_t *msg, size_t msgsize) { count_++; - bool send_block = block_set_.find(count_) != block_set_.end(); - if (send_block) { + send_block_ = block_set_.find(count_) != block_set_.end(); + if (send_block_) { cond_var_.Set(); } - return !send_block; + return !send_block_; } void WriteActive(BgpUpdateSender *sender) { + send_block_ = false; sender->PeerSendReady(this); } @@ -87,12 +88,14 @@ class BgpTestPeer : public IPeerUpdate { block_set_.insert(step); } int update_count() const { return count_; } + virtual bool send_ready() const { return !send_block_; } private: int index_; std::set block_set_; int count_; Condition cond_var_; + bool send_block_; std::string to_str_; };