Remove BasicPortAllocator::EnableProtocol.

I'm not sure why we ever had this in the first place, and it confuses
people on a nearly weekly basis, so let's get rid of it. The protocols
are enabled right after the corresponding gathering is done, so the only
real effect it has is to produce confusing log messages (first
"candidate not signaled because protocol not enabled", then "protocol
enabled, signaling candidate" right afterwards).

BUG=None

Review-Url: https://codereview.webrtc.org/3018483002
Cr-Commit-Position: refs/heads/master@{#19873}
This commit is contained in:
deadbeef 2017-09-15 17:46:56 -07:00 committed by Commit Bot
parent 7f1563facf
commit 1c5e6d0a3f
4 changed files with 12 additions and 79 deletions

View File

@ -448,6 +448,12 @@ class PortAllocator : public sigslot::has_slots<> {
void set_max_ipv6_networks(int networks) { max_ipv6_networks_ = networks; }
int max_ipv6_networks() { return max_ipv6_networks_; }
// Delay between different candidate gathering phases (UDP, TURN, TCP).
// Defaults to 1 second, but PeerConnection sets it to 50ms.
// TODO(deadbeef): Get rid of this. Its purpose is to avoid sending too many
// STUN transactions at once, but that's already happening if you configure
// multiple STUN servers or have multiple network interfaces. We should
// implement some global pacing logic instead if that's our goal.
uint32_t step_delay() const { return step_delay_; }
void set_step_delay(uint32_t delay) { step_delay_ = delay; }

View File

@ -43,9 +43,8 @@ enum {
const int PHASE_UDP = 0;
const int PHASE_RELAY = 1;
const int PHASE_TCP = 2;
const int PHASE_SSLTCP = 3;
const int kNumPhases = 4;
const int kNumPhases = 3;
// Gets protocol priority: UDP > TCP > SSLTCP == TLS.
int GetProtocolPriority(cricket::ProtocolType protocol) {
@ -408,11 +407,6 @@ void BasicPortAllocatorSession::GetCandidatesFromPort(
if (!CheckCandidateFilter(candidate)) {
continue;
}
ProtocolType pvalue;
if (!StringToProto(candidate.protocol().c_str(), &pvalue) ||
!data.sequence()->ProtocolEnabled(pvalue)) {
continue;
}
candidates->push_back(SanitizeRelatedAddress(candidate));
}
}
@ -817,18 +811,10 @@ void BasicPortAllocatorSession::OnCandidateReady(
}
}
ProtocolType pvalue;
bool candidate_protocol_enabled =
StringToProto(c.protocol().c_str(), &pvalue) &&
data->sequence()->ProtocolEnabled(pvalue);
if (data->ready() && CheckCandidateFilter(c) && candidate_protocol_enabled) {
if (data->ready() && CheckCandidateFilter(c)) {
std::vector<Candidate> candidates;
candidates.push_back(SanitizeRelatedAddress(c));
SignalCandidatesReady(this, candidates);
} else if (!candidate_protocol_enabled) {
LOG(LS_INFO)
<< "Not yet signaling candidate because protocol is not yet enabled.";
} else {
LOG(LS_INFO) << "Discarding candidate because it doesn't match filter.";
}
@ -925,36 +911,6 @@ void BasicPortAllocatorSession::OnPortError(Port* port) {
MaybeSignalCandidatesAllocationDone();
}
void BasicPortAllocatorSession::OnProtocolEnabled(AllocationSequence* seq,
ProtocolType proto) {
std::vector<Candidate> candidates;
for (std::vector<PortData>::iterator it = ports_.begin();
it != ports_.end(); ++it) {
if (it->sequence() != seq)
continue;
const std::vector<Candidate>& potentials = it->port()->Candidates();
for (size_t i = 0; i < potentials.size(); ++i) {
if (!CheckCandidateFilter(potentials[i])) {
continue;
}
ProtocolType pvalue;
bool candidate_protocol_enabled =
StringToProto(potentials[i].protocol().c_str(), &pvalue) &&
pvalue == proto;
if (candidate_protocol_enabled) {
LOG(LS_INFO) << "Signaling candidate because protocol was enabled: "
<< potentials[i].ToSensitiveString();
candidates.push_back(potentials[i]);
}
}
}
if (!candidates.empty()) {
SignalCandidatesReady(this, candidates);
}
}
bool BasicPortAllocatorSession::CheckCandidateFilter(const Candidate& c) const {
uint32_t filter = candidate_filter_;
@ -1189,9 +1145,7 @@ void AllocationSequence::OnMessage(rtc::Message* msg) {
RTC_DCHECK(rtc::Thread::Current() == session_->network_thread());
RTC_DCHECK(msg->message_id == MSG_ALLOCATION_PHASE);
const char* const PHASE_NAMES[kNumPhases] = {
"Udp", "Relay", "Tcp", "SslTcp"
};
const char* const PHASE_NAMES[kNumPhases] = {"Udp", "Relay", "Tcp"};
// Perform all of the phases in the current step.
LOG_J(LS_INFO, network_) << "Allocation Phase="
@ -1201,7 +1155,6 @@ void AllocationSequence::OnMessage(rtc::Message* msg) {
case PHASE_UDP:
CreateUDPPorts();
CreateStunPorts();
EnableProtocol(PROTO_UDP);
break;
case PHASE_RELAY:
@ -1210,12 +1163,7 @@ void AllocationSequence::OnMessage(rtc::Message* msg) {
case PHASE_TCP:
CreateTCPPorts();
EnableProtocol(PROTO_TCP);
break;
case PHASE_SSLTCP:
state_ = kCompleted;
EnableProtocol(PROTO_SSLTCP);
break;
default:
@ -1235,22 +1183,6 @@ void AllocationSequence::OnMessage(rtc::Message* msg) {
}
}
void AllocationSequence::EnableProtocol(ProtocolType proto) {
if (!ProtocolEnabled(proto)) {
protocols_.push_back(proto);
session_->OnProtocolEnabled(this, proto);
}
}
bool AllocationSequence::ProtocolEnabled(ProtocolType proto) const {
for (ProtocolList::const_iterator it = protocols_.begin();
it != protocols_.end(); ++it) {
if (*it == proto)
return true;
}
return false;
}
void AllocationSequence::CreateUDPPorts() {
if (IsFlagSet(PORTALLOCATOR_DISABLE_UDP)) {
LOG(LS_VERBOSE) << "AllocationSequence: UDP ports disabled, skipping.";

View File

@ -329,9 +329,6 @@ class AllocationSequence : public rtc::MessageHandler,
// MessageHandler
void OnMessage(rtc::Message* msg);
void EnableProtocol(ProtocolType proto);
bool ProtocolEnabled(ProtocolType proto) const;
// Signal from AllocationSequence, when it's done with allocating ports.
// This signal is useful, when port allocation fails which doesn't result
// in any candidates. Using this signal BasicPortAllocatorSession can send

View File

@ -961,17 +961,15 @@ TEST_F(BasicPortAllocatorTest, TestGetAllPortsWithOneSecondStepDelay) {
session_->StartGettingPorts();
ASSERT_EQ_SIMULATED_WAIT(2U, candidates_.size(), 1000, fake_clock);
EXPECT_EQ(2U, ports_.size());
ASSERT_EQ_SIMULATED_WAIT(4U, candidates_.size(), 2000, fake_clock);
ASSERT_EQ_SIMULATED_WAIT(6U, candidates_.size(), 2000, fake_clock);
EXPECT_EQ(3U, ports_.size());
EXPECT_PRED4(HasCandidate, candidates_, "relay", "udp", kRelayUdpIntAddr);
EXPECT_PRED4(HasCandidate, candidates_, "relay", "udp", kRelayUdpExtAddr);
ASSERT_EQ_SIMULATED_WAIT(6U, candidates_.size(), 1500, fake_clock);
EXPECT_PRED4(HasCandidate, candidates_, "relay", "tcp", kRelayTcpIntAddr);
EXPECT_PRED4(HasCandidate, candidates_, "local", "tcp", kClientAddr);
EXPECT_EQ(4U, ports_.size());
ASSERT_EQ_SIMULATED_WAIT(7U, candidates_.size(), 2000, fake_clock);
EXPECT_PRED4(HasCandidate, candidates_, "relay", "ssltcp",
kRelaySslTcpIntAddr);
ASSERT_EQ_SIMULATED_WAIT(7U, candidates_.size(), 1500, fake_clock);
EXPECT_PRED4(HasCandidate, candidates_, "local", "tcp", kClientAddr);
EXPECT_EQ(4U, ports_.size());
EXPECT_TRUE(candidate_allocation_done_);
// If we Stop gathering now, we shouldn't get a second "done" callback.