Remove ports that are not used by any channel after timeout
If a port is not used by any channel and if it has no connection for 30 seconds, it will be removed. Note, as long as a port is used by a transport channel, it will be kept even if it does not have any connection. This will be beneficial to continual gathering because new connections can be created in the future when network changes. BUG= R=pthatcher@webrtc.org, zhihuang@webrtc.org Review URL: https://codereview.webrtc.org/2171183002 . Cr-Commit-Position: refs/heads/master@{#13567}
This commit is contained in:
parent
c309e0e3ea
commit
a74363c998
@ -149,6 +149,7 @@ class FakePortAllocatorSession : public PortAllocatorSession {
|
||||
std::vector<Candidate> ReadyCandidates() const override {
|
||||
return candidates_;
|
||||
}
|
||||
void PruneAllPorts() override { port_->Prune(); }
|
||||
bool CandidatesAllocationDone() const override { return allocation_done_; }
|
||||
|
||||
int port_config_count() { return port_config_count_; }
|
||||
@ -181,6 +182,7 @@ class FakePortAllocatorSession : public PortAllocatorSession {
|
||||
port->PrepareAddress();
|
||||
ready_ports_.push_back(port);
|
||||
SignalPortReady(this, port);
|
||||
port->KeepAliveUntilPruned();
|
||||
}
|
||||
void OnPortComplete(cricket::Port* port) {
|
||||
const std::vector<Candidate>& candidates = port->Candidates();
|
||||
|
||||
@ -143,14 +143,15 @@ void P2PTransportChannel::AddAllocatorSession(
|
||||
this, &P2PTransportChannel::OnCandidatesRemoved);
|
||||
session->SignalCandidatesAllocationDone.connect(
|
||||
this, &P2PTransportChannel::OnCandidatesAllocationDone);
|
||||
if (!allocator_sessions_.empty()) {
|
||||
allocator_session()->PruneAllPorts();
|
||||
}
|
||||
allocator_sessions_.push_back(std::move(session));
|
||||
|
||||
// We now only want to apply new candidates that we receive to the ports
|
||||
// created by this new session because these are replacing those of the
|
||||
// previous sessions.
|
||||
pruned_ports_.insert(pruned_ports_.end(), ports_.begin(), ports_.end());
|
||||
ports_.clear();
|
||||
|
||||
allocator_sessions_.push_back(std::move(session));
|
||||
PruneAllPorts();
|
||||
}
|
||||
|
||||
void P2PTransportChannel::AddConnection(Connection* connection) {
|
||||
@ -238,7 +239,7 @@ void P2PTransportChannel::SetIceRole(IceRole ice_role) {
|
||||
for (PortInterface* port : ports_) {
|
||||
port->SetIceRole(ice_role);
|
||||
}
|
||||
// Update role on removed ports as well, because they may still have
|
||||
// Update role on pruned ports as well, because they may still have
|
||||
// connections alive that should be using the correct role.
|
||||
for (PortInterface* port : pruned_ports_) {
|
||||
port->SetIceRole(ice_role);
|
||||
@ -1688,7 +1689,7 @@ void P2PTransportChannel::OnPortsPruned(
|
||||
const std::vector<PortInterface*>& ports) {
|
||||
ASSERT(worker_thread_ == rtc::Thread::Current());
|
||||
for (PortInterface* port : ports) {
|
||||
if (OnPortPruned(port)) {
|
||||
if (PrunePort(port)) {
|
||||
LOG(INFO) << "Removed port: " << port->ToString() << " " << ports_.size()
|
||||
<< " remaining";
|
||||
}
|
||||
@ -1727,7 +1728,12 @@ void P2PTransportChannel::OnRegatherOnFailedNetworks() {
|
||||
MSG_REGATHER_ON_FAILED_NETWORKS);
|
||||
}
|
||||
|
||||
bool P2PTransportChannel::OnPortPruned(PortInterface* port) {
|
||||
void P2PTransportChannel::PruneAllPorts() {
|
||||
pruned_ports_.insert(pruned_ports_.end(), ports_.begin(), ports_.end());
|
||||
ports_.clear();
|
||||
}
|
||||
|
||||
bool P2PTransportChannel::PrunePort(PortInterface* port) {
|
||||
auto it = std::find(ports_.begin(), ports_.end(), port);
|
||||
// Don't need to do anything if the port has been deleted from the port list.
|
||||
if (it == ports_.end()) {
|
||||
|
||||
@ -127,9 +127,10 @@ class P2PTransportChannel : public TransportChannelImpl,
|
||||
const Connection* selected_connection() const { return selected_connection_; }
|
||||
void set_incoming_only(bool value) { incoming_only_ = value; }
|
||||
|
||||
// Note: This is only for testing purpose.
|
||||
// |ports_| should not be changed from outside.
|
||||
// Note: These are only for testing purpose.
|
||||
// |ports_| and |pruned_ports| should not be changed from outside.
|
||||
const std::vector<PortInterface*>& ports() { return ports_; }
|
||||
const std::vector<PortInterface*>& pruned_ports() { return pruned_ports_; }
|
||||
|
||||
IceMode remote_ice_mode() const { return remote_ice_mode_; }
|
||||
|
||||
@ -184,6 +185,7 @@ class P2PTransportChannel : public TransportChannelImpl,
|
||||
return false;
|
||||
}
|
||||
|
||||
void PruneAllPorts();
|
||||
int receiving_timeout() const { return config_.receiving_timeout; }
|
||||
int check_receiving_interval() const { return check_receiving_interval_; }
|
||||
|
||||
@ -295,7 +297,7 @@ class P2PTransportChannel : public TransportChannelImpl,
|
||||
void OnPortDestroyed(PortInterface* port);
|
||||
// When pruning a port, move it from |ports_| to |pruned_ports_|.
|
||||
// Returns true if the port is found and removed from |ports_|.
|
||||
bool OnPortPruned(PortInterface* port);
|
||||
bool PrunePort(PortInterface* port);
|
||||
void OnRoleConflict(PortInterface* port);
|
||||
|
||||
void OnConnectionStateChange(Connection* connection);
|
||||
|
||||
@ -2497,6 +2497,13 @@ class P2PTransportChannelPingTest : public testing::Test,
|
||||
return static_cast<Port*>(ch->ports()[0]);
|
||||
}
|
||||
|
||||
Port* GetPrunedPort(P2PTransportChannel* ch) {
|
||||
if (ch->pruned_ports().empty()) {
|
||||
return nullptr;
|
||||
}
|
||||
return static_cast<Port*>(ch->pruned_ports()[0]);
|
||||
}
|
||||
|
||||
Connection* GetConnectionTo(P2PTransportChannel* ch,
|
||||
const std::string& ip,
|
||||
int port_num) {
|
||||
@ -3579,14 +3586,14 @@ TEST_F(P2PTransportChannelPingTest, TestIceRoleUpdatedOnPortAfterIceRestart) {
|
||||
}
|
||||
|
||||
// Test that after some amount of time without receiving data, the connection
|
||||
// and port are destroyed.
|
||||
TEST_F(P2PTransportChannelPingTest, TestPortDestroyedAfterTimeout) {
|
||||
// will be destroyed. The port will only be destroyed after it is marked as
|
||||
// "pruned."
|
||||
TEST_F(P2PTransportChannelPingTest, TestPortDestroyedAfterTimeoutAndPruned) {
|
||||
rtc::ScopedFakeClock fake_clock;
|
||||
|
||||
FakePortAllocator pa(rtc::Thread::Current(), nullptr);
|
||||
P2PTransportChannel ch("test channel", ICE_CANDIDATE_COMPONENT_DEFAULT, &pa);
|
||||
PrepareChannel(&ch);
|
||||
// Only a controlled channel should expect its ports to be destroyed.
|
||||
ch.SetIceRole(ICEROLE_CONTROLLED);
|
||||
ch.MaybeStartGathering();
|
||||
ch.AddRemoteCandidate(CreateUdpCandidate(LOCAL_PORT_TYPE, "1.1.1.1", 1, 1));
|
||||
@ -3600,7 +3607,14 @@ TEST_F(P2PTransportChannelPingTest, TestPortDestroyedAfterTimeout) {
|
||||
fake_clock.AdvanceTime(rtc::TimeDelta::FromSeconds(1));
|
||||
}
|
||||
EXPECT_EQ(nullptr, GetConnectionTo(&ch, "1.1.1.1", 1));
|
||||
EXPECT_EQ(nullptr, GetPort(&ch));
|
||||
// Port will not be removed because it is not pruned yet.
|
||||
PortInterface* port = GetPort(&ch);
|
||||
ASSERT_NE(nullptr, port);
|
||||
|
||||
// If the session prunes all ports, the port will be destroyed.
|
||||
ch.allocator_session()->PruneAllPorts();
|
||||
EXPECT_EQ_SIMULATED_WAIT(nullptr, GetPort(&ch), 1, fake_clock);
|
||||
EXPECT_EQ_SIMULATED_WAIT(nullptr, GetPrunedPort(&ch), 1, fake_clock);
|
||||
}
|
||||
|
||||
class P2PTransportChannelMostLikelyToWorkFirstTest
|
||||
|
||||
@ -198,6 +198,8 @@ void Port::Construct() {
|
||||
network_->SignalTypeChanged.connect(this, &Port::OnNetworkTypeChanged);
|
||||
network_cost_ = network_->GetCost();
|
||||
|
||||
thread_->PostDelayed(RTC_FROM_HERE, timeout_delay_, this,
|
||||
MSG_DESTROY_IF_DEAD);
|
||||
LOG_J(LS_INFO, this) << "Port created with network cost " << network_cost_;
|
||||
}
|
||||
|
||||
@ -644,9 +646,25 @@ void Port::SendBindingErrorResponse(StunMessage* request,
|
||||
<< " to " << addr.ToSensitiveString();
|
||||
}
|
||||
|
||||
void Port::KeepAliveUntilPruned() {
|
||||
// If it is pruned, we won't bring it up again.
|
||||
if (state_ == State::INIT) {
|
||||
state_ = State::KEEP_ALIVE_UNTIL_PRUNED;
|
||||
}
|
||||
}
|
||||
|
||||
void Port::Prune() {
|
||||
state_ = State::PRUNED;
|
||||
thread_->Post(RTC_FROM_HERE, this, MSG_DESTROY_IF_DEAD);
|
||||
}
|
||||
|
||||
void Port::OnMessage(rtc::Message *pmsg) {
|
||||
ASSERT(pmsg->message_id == MSG_CHECK_DEAD);
|
||||
if (dead()) {
|
||||
ASSERT(pmsg->message_id == MSG_DESTROY_IF_DEAD);
|
||||
bool dead =
|
||||
(state_ == State::INIT || state_ == State::PRUNED) &&
|
||||
connections_.empty() &&
|
||||
rtc::TimeMillis() - last_time_all_connections_removed_ >= timeout_delay_;
|
||||
if (dead) {
|
||||
Destroy();
|
||||
}
|
||||
}
|
||||
@ -699,22 +717,18 @@ void Port::OnConnectionDestroyed(Connection* conn) {
|
||||
connections_.erase(iter);
|
||||
HandleConnectionDestroyed(conn);
|
||||
|
||||
// On the controlled side, ports time out after all connections fail.
|
||||
// Ports time out after all connections fail if it is not marked as
|
||||
// "keep alive until pruned."
|
||||
// Note: If a new connection is added after this message is posted, but it
|
||||
// fails and is removed before kPortTimeoutDelay, then this message will
|
||||
// not cause the Port to be destroyed.
|
||||
if (ice_role_ == ICEROLE_CONTROLLED && connections_.empty()) {
|
||||
if (connections_.empty()) {
|
||||
last_time_all_connections_removed_ = rtc::TimeMillis();
|
||||
thread_->PostDelayed(RTC_FROM_HERE, timeout_delay_, this, MSG_CHECK_DEAD);
|
||||
thread_->PostDelayed(RTC_FROM_HERE, timeout_delay_, this,
|
||||
MSG_DESTROY_IF_DEAD);
|
||||
}
|
||||
}
|
||||
|
||||
bool Port::dead() const {
|
||||
return ice_role_ == ICEROLE_CONTROLLED && connections_.empty() &&
|
||||
rtc::TimeMillis() - last_time_all_connections_removed_ >=
|
||||
timeout_delay_;
|
||||
}
|
||||
|
||||
void Port::Destroy() {
|
||||
ASSERT(connections_.empty());
|
||||
LOG_J(LS_INFO, this) << "Port deleted";
|
||||
|
||||
@ -122,6 +122,12 @@ typedef std::set<rtc::SocketAddress> ServerAddresses;
|
||||
class Port : public PortInterface, public rtc::MessageHandler,
|
||||
public sigslot::has_slots<> {
|
||||
public:
|
||||
// INIT: The state when a port is just created.
|
||||
// KEEP_ALIVE_UNTIL_PRUNED: A port should not be destroyed even if no
|
||||
// connection is using it.
|
||||
// PRUNED: It will be destroyed if no connection is using it for a period of
|
||||
// 30 seconds.
|
||||
enum class State { INIT, KEEP_ALIVE_UNTIL_PRUNED, PRUNED };
|
||||
Port(rtc::Thread* thread,
|
||||
const std::string& type,
|
||||
rtc::PacketSocketFactory* factory,
|
||||
@ -153,6 +159,12 @@ class Port : public PortInterface, public rtc::MessageHandler,
|
||||
virtual bool SharedSocket() const { return shared_socket_; }
|
||||
void ResetSharedSocket() { shared_socket_ = false; }
|
||||
|
||||
// Should not destroy the port even if no connection is using it. Called when
|
||||
// a port is ready to use.
|
||||
void KeepAliveUntilPruned();
|
||||
// Allows a port to be destroyed if no connection is using it.
|
||||
void Prune();
|
||||
|
||||
// The thread on which this port performs its I/O.
|
||||
rtc::Thread* thread() { return thread_; }
|
||||
|
||||
@ -297,7 +309,7 @@ class Port : public PortInterface, public rtc::MessageHandler,
|
||||
int16_t network_cost() const { return network_cost_; }
|
||||
|
||||
protected:
|
||||
enum { MSG_CHECK_DEAD = 0, MSG_FIRST_AVAILABLE };
|
||||
enum { MSG_DESTROY_IF_DEAD = 0, MSG_FIRST_AVAILABLE };
|
||||
|
||||
virtual void UpdateNetworkCost();
|
||||
|
||||
@ -354,10 +366,6 @@ class Port : public PortInterface, public rtc::MessageHandler,
|
||||
// Called when one of our connections deletes itself.
|
||||
void OnConnectionDestroyed(Connection* conn);
|
||||
|
||||
// Whether this port is dead, and hence, should be destroyed on the controlled
|
||||
// side.
|
||||
bool dead() const;
|
||||
|
||||
void OnNetworkTypeChanged(const rtc::Network* network);
|
||||
|
||||
rtc::Thread* thread_;
|
||||
@ -396,6 +404,7 @@ class Port : public PortInterface, public rtc::MessageHandler,
|
||||
// (WiFi. vs. Cellular). It takes precedence over the priority when
|
||||
// comparing two connections.
|
||||
uint16_t network_cost_;
|
||||
State state_ = State::INIT;
|
||||
int64_t last_time_all_connections_removed_ = 0;
|
||||
|
||||
friend class Connection;
|
||||
|
||||
@ -384,7 +384,7 @@ class PortTest : public testing::Test, public sigslot::has_slots<> {
|
||||
username_(rtc::CreateRandomString(ICE_UFRAG_LENGTH)),
|
||||
password_(rtc::CreateRandomString(ICE_PWD_LENGTH)),
|
||||
role_conflict_(false),
|
||||
destroyed_(false) {
|
||||
ports_destroyed_(0) {
|
||||
network_.AddIP(rtc::IPAddress(INADDR_ANY));
|
||||
}
|
||||
|
||||
@ -755,10 +755,8 @@ class PortTest : public testing::Test, public sigslot::has_slots<> {
|
||||
port->SignalDestroyed.connect(this, &PortTest::OnDestroyed);
|
||||
}
|
||||
|
||||
void OnDestroyed(PortInterface* port) {
|
||||
destroyed_ = true;
|
||||
}
|
||||
bool destroyed() const { return destroyed_; }
|
||||
void OnDestroyed(PortInterface* port) { ++ports_destroyed_; }
|
||||
int ports_destroyed() const { return ports_destroyed_; }
|
||||
|
||||
rtc::BasicPacketSocketFactory* nat_socket_factory1() {
|
||||
return &nat_socket_factory1_;
|
||||
@ -785,7 +783,7 @@ class PortTest : public testing::Test, public sigslot::has_slots<> {
|
||||
std::string username_;
|
||||
std::string password_;
|
||||
bool role_conflict_;
|
||||
bool destroyed_;
|
||||
int ports_destroyed_;
|
||||
};
|
||||
|
||||
void PortTest::TestConnectivity(const char* name1, Port* port1,
|
||||
@ -2567,15 +2565,21 @@ TEST_F(PortTest, TestIceLiteConnectivity) {
|
||||
ch1.Stop();
|
||||
}
|
||||
|
||||
// This test case verifies that the CONTROLLING port does not time out.
|
||||
TEST_F(PortTest, TestControllingNoTimeout) {
|
||||
// This test case verifies that both the controlling port and the controlled
|
||||
// port will time out after connectivity is lost, if they are not marked as
|
||||
// "keep alive until pruned."
|
||||
TEST_F(PortTest, TestPortTimeoutIfNotKeptAlive) {
|
||||
rtc::ScopedFakeClock clock;
|
||||
int timeout_delay = 100;
|
||||
UDPPort* port1 = CreateUdpPort(kLocalAddr1);
|
||||
ConnectToSignalDestroyed(port1);
|
||||
port1->set_timeout_delay(10); // milliseconds
|
||||
port1->set_timeout_delay(timeout_delay); // milliseconds
|
||||
port1->SetIceRole(cricket::ICEROLE_CONTROLLING);
|
||||
port1->SetIceTiebreaker(kTiebreaker1);
|
||||
|
||||
UDPPort* port2 = CreateUdpPort(kLocalAddr2);
|
||||
ConnectToSignalDestroyed(port2);
|
||||
port2->set_timeout_delay(timeout_delay); // milliseconds
|
||||
port2->SetIceRole(cricket::ICEROLE_CONTROLLED);
|
||||
port2->SetIceTiebreaker(kTiebreaker2);
|
||||
|
||||
@ -2585,90 +2589,91 @@ TEST_F(PortTest, TestControllingNoTimeout) {
|
||||
|
||||
// Simulate a connection that succeeds, and then is destroyed.
|
||||
StartConnectAndStopChannels(&ch1, &ch2);
|
||||
|
||||
// After the connection is destroyed, the port should not be destroyed.
|
||||
rtc::Thread::Current()->ProcessMessages(kTimeout);
|
||||
EXPECT_FALSE(destroyed());
|
||||
// After the connection is destroyed, the port will be destroyed because
|
||||
// none of them is marked as "keep alive until pruned.
|
||||
EXPECT_EQ_SIMULATED_WAIT(2, ports_destroyed(), 110, clock);
|
||||
}
|
||||
|
||||
// This test case verifies that the CONTROLLED port does time out, but only
|
||||
// after connectivity is lost and no connection was created during the timeout
|
||||
// period.
|
||||
TEST_F(PortTest, TestControlledTimeout) {
|
||||
// Test that if after all connection are destroyed, new connections are created
|
||||
// and destroyed again, ports won't be destroyed until a timeout period passes
|
||||
// after the last set of connections are all destroyed.
|
||||
TEST_F(PortTest, TestPortTimeoutAfterNewConnectionCreatedAndDestroyed) {
|
||||
rtc::ScopedFakeClock clock;
|
||||
int timeout_delay = 100;
|
||||
UDPPort* port1 = CreateUdpPort(kLocalAddr1);
|
||||
ConnectToSignalDestroyed(port1);
|
||||
port1->set_timeout_delay(timeout_delay); // milliseconds
|
||||
port1->SetIceRole(cricket::ICEROLE_CONTROLLING);
|
||||
port1->SetIceTiebreaker(kTiebreaker1);
|
||||
|
||||
UDPPort* port2 = CreateUdpPort(kLocalAddr2);
|
||||
ConnectToSignalDestroyed(port2);
|
||||
port2->set_timeout_delay(100); // milliseconds
|
||||
port2->set_timeout_delay(timeout_delay); // milliseconds
|
||||
|
||||
port2->SetIceRole(cricket::ICEROLE_CONTROLLED);
|
||||
port2->SetIceTiebreaker(kTiebreaker2);
|
||||
|
||||
// The connection must not be destroyed before a connection is attempted.
|
||||
EXPECT_FALSE(destroyed());
|
||||
|
||||
port1->set_component(cricket::ICE_CANDIDATE_COMPONENT_DEFAULT);
|
||||
port2->set_component(cricket::ICE_CANDIDATE_COMPONENT_DEFAULT);
|
||||
|
||||
// Set up channels and ensure both ports will be deleted.
|
||||
TestChannel ch1(port1);
|
||||
TestChannel ch2(port2);
|
||||
|
||||
// Simulate a connection that succeeds, and then is destroyed.
|
||||
StartConnectAndStopChannels(&ch1, &ch2);
|
||||
SIMULATED_WAIT(ports_destroyed() > 0, 80, clock);
|
||||
EXPECT_EQ(0, ports_destroyed());
|
||||
|
||||
SIMULATED_WAIT(false, 80, clock);
|
||||
// Start the second set of connection and destroy them.
|
||||
ch1.CreateConnection(GetCandidate(ch2.port()));
|
||||
ch2.CreateConnection(GetCandidate(ch1.port()));
|
||||
|
||||
// ch2 creates a connection so it will not be destroyed.
|
||||
SIMULATED_WAIT(destroyed(), 80, clock);
|
||||
EXPECT_FALSE(destroyed());
|
||||
|
||||
// Even if ch2 stops now, it won't be destroyed until 100ms after the
|
||||
// connection is destroyed.
|
||||
ch1.Stop();
|
||||
ch2.Stop();
|
||||
SIMULATED_WAIT(destroyed(), 80, clock);
|
||||
EXPECT_FALSE(destroyed());
|
||||
|
||||
// The controlled port should be destroyed after timeout.
|
||||
EXPECT_TRUE_SIMULATED_WAIT(destroyed(), 30, clock);
|
||||
SIMULATED_WAIT(ports_destroyed() > 0, 80, clock);
|
||||
EXPECT_EQ(0, ports_destroyed());
|
||||
|
||||
// The ports on both sides should be destroyed after timeout.
|
||||
EXPECT_TRUE_SIMULATED_WAIT(ports_destroyed() == 2, 30, clock);
|
||||
}
|
||||
|
||||
// This test case verifies that if the role of a port changes from controlled
|
||||
// to controlling after all connections fail, the port will not be destroyed.
|
||||
TEST_F(PortTest, TestControlledToControllingNotDestroyed) {
|
||||
// This test case verifies that neither the controlling port nor the controlled
|
||||
// port will time out after connectivity is lost if they are marked as "keep
|
||||
// alive until pruned". They will time out after they are pruned.
|
||||
TEST_F(PortTest, TestPortNotTimeoutUntilPruned) {
|
||||
rtc::ScopedFakeClock clock;
|
||||
int timeout_delay = 100;
|
||||
UDPPort* port1 = CreateUdpPort(kLocalAddr1);
|
||||
ConnectToSignalDestroyed(port1);
|
||||
port1->set_timeout_delay(timeout_delay); // milliseconds
|
||||
port1->SetIceRole(cricket::ICEROLE_CONTROLLING);
|
||||
port1->SetIceTiebreaker(kTiebreaker1);
|
||||
|
||||
UDPPort* port2 = CreateUdpPort(kLocalAddr2);
|
||||
ConnectToSignalDestroyed(port2);
|
||||
port2->set_timeout_delay(10); // milliseconds
|
||||
port2->set_timeout_delay(timeout_delay); // milliseconds
|
||||
port2->SetIceRole(cricket::ICEROLE_CONTROLLED);
|
||||
port2->SetIceTiebreaker(kTiebreaker2);
|
||||
|
||||
// The connection must not be destroyed before a connection is attempted.
|
||||
EXPECT_FALSE(destroyed());
|
||||
EXPECT_EQ(0, ports_destroyed());
|
||||
|
||||
port1->set_component(cricket::ICE_CANDIDATE_COMPONENT_DEFAULT);
|
||||
port2->set_component(cricket::ICE_CANDIDATE_COMPONENT_DEFAULT);
|
||||
|
||||
// Set up channels and ensure both ports will be deleted.
|
||||
// Set up channels and keep the port alive.
|
||||
TestChannel ch1(port1);
|
||||
TestChannel ch2(port2);
|
||||
|
||||
// Simulate a connection that succeeds, and then is destroyed.
|
||||
// Simulate a connection that succeeds, and then is destroyed. But ports
|
||||
// are kept alive. Ports won't be destroyed.
|
||||
StartConnectAndStopChannels(&ch1, &ch2);
|
||||
// Switch the role after all connections are destroyed.
|
||||
EXPECT_TRUE_WAIT(ch2.conn() == nullptr, kTimeout);
|
||||
port1->SetIceRole(cricket::ICEROLE_CONTROLLED);
|
||||
port2->SetIceRole(cricket::ICEROLE_CONTROLLING);
|
||||
port1->KeepAliveUntilPruned();
|
||||
port2->KeepAliveUntilPruned();
|
||||
SIMULATED_WAIT(ports_destroyed() > 0, 150, clock);
|
||||
EXPECT_EQ(0, ports_destroyed());
|
||||
|
||||
// After the connection is destroyed, the port should not be destroyed.
|
||||
rtc::Thread::Current()->ProcessMessages(kTimeout);
|
||||
EXPECT_FALSE(destroyed());
|
||||
// If they are pruned now, they will be destroyed right away.
|
||||
port1->Prune();
|
||||
port2->Prune();
|
||||
// The ports on both sides should be destroyed after timeout.
|
||||
EXPECT_TRUE_SIMULATED_WAIT(ports_destroyed() == 2, 1, clock);
|
||||
}
|
||||
|
||||
TEST_F(PortTest, TestSupportsProtocol) {
|
||||
|
||||
@ -189,6 +189,9 @@ class PortAllocatorSession : public sigslot::has_slots<> {
|
||||
virtual std::vector<PortInterface*> ReadyPorts() const = 0;
|
||||
virtual std::vector<Candidate> ReadyCandidates() const = 0;
|
||||
virtual bool CandidatesAllocationDone() const = 0;
|
||||
// Marks all ports in the current session as "pruned" so that they may be
|
||||
// destroyed if no connection is using them.
|
||||
virtual void PruneAllPorts() {}
|
||||
|
||||
sigslot::signal2<PortAllocatorSession*, PortInterface*> SignalPortReady;
|
||||
// Fires this signal when the network of the ports failed (either because the
|
||||
|
||||
@ -706,6 +706,7 @@ void BasicPortAllocatorSession::OnCandidateReady(
|
||||
if (!data->pruned()) {
|
||||
LOG_J(LS_INFO, port) << "Port ready.";
|
||||
SignalPortReady(this, port);
|
||||
port->KeepAliveUntilPruned();
|
||||
}
|
||||
}
|
||||
|
||||
@ -761,6 +762,7 @@ bool BasicPortAllocatorSession::PruneTurnPorts(Port* newly_pairable_turn_port) {
|
||||
ComparePort(data.port(), best_turn_port) < 0) {
|
||||
data.set_pruned();
|
||||
pruned = true;
|
||||
data.port()->Prune();
|
||||
if (data.port() != newly_pairable_turn_port) {
|
||||
pruned_ports.push_back(data.port());
|
||||
}
|
||||
@ -773,6 +775,12 @@ bool BasicPortAllocatorSession::PruneTurnPorts(Port* newly_pairable_turn_port) {
|
||||
return pruned;
|
||||
}
|
||||
|
||||
void BasicPortAllocatorSession::PruneAllPorts() {
|
||||
for (PortData& data : ports_) {
|
||||
data.port()->Prune();
|
||||
}
|
||||
}
|
||||
|
||||
void BasicPortAllocatorSession::OnPortComplete(Port* port) {
|
||||
ASSERT(rtc::Thread::Current() == network_thread_);
|
||||
LOG_J(LS_INFO, port) << "Port completed gathering candidates.";
|
||||
@ -942,6 +950,8 @@ void BasicPortAllocatorSession::RemovePortsAndCandidates(
|
||||
data.sequence()->network()) == networks.end()) {
|
||||
continue;
|
||||
}
|
||||
// Prune the port so that it may be destroyed.
|
||||
data.port()->Prune();
|
||||
ports_to_remove.push_back(data.port());
|
||||
if (data.has_pairable_candidate()) {
|
||||
GetCandidatesFromPort(data, &candidates_to_remove);
|
||||
|
||||
@ -108,6 +108,7 @@ class BasicPortAllocatorSession : public PortAllocatorSession,
|
||||
std::vector<Candidate> ReadyCandidates() const override;
|
||||
bool CandidatesAllocationDone() const override;
|
||||
void RegatherOnFailedNetworks() override;
|
||||
void PruneAllPorts() override;
|
||||
|
||||
protected:
|
||||
void UpdateIceParametersInternal() override;
|
||||
|
||||
@ -416,11 +416,11 @@ class BasicPortAllocatorTest : public testing::Test,
|
||||
std::find(ready_ports.begin(), ready_ports.end(), port));
|
||||
}
|
||||
void OnPortsPruned(PortAllocatorSession* ses,
|
||||
const std::vector<PortInterface*>& ports_pruned) {
|
||||
LOG(LS_INFO) << "Number of ports pruned: " << ports_pruned.size();
|
||||
const std::vector<PortInterface*>& pruned_ports) {
|
||||
LOG(LS_INFO) << "Number of ports pruned: " << pruned_ports.size();
|
||||
auto ready_ports = ses->ReadyPorts();
|
||||
auto new_end = ports_.end();
|
||||
for (PortInterface* port : ports_pruned) {
|
||||
for (PortInterface* port : pruned_ports) {
|
||||
new_end = std::remove(ports_.begin(), new_end, port);
|
||||
// Make sure the pruned port is not in ReadyPorts.
|
||||
EXPECT_EQ(ready_ports.end(),
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user