diff --git a/modules/congestion_controller/bbr/BUILD.gn b/modules/congestion_controller/bbr/BUILD.gn index 3773a6cd09..985b578854 100644 --- a/modules/congestion_controller/bbr/BUILD.gn +++ b/modules/congestion_controller/bbr/BUILD.gn @@ -8,7 +8,37 @@ import("../../../webrtc.gni") +rtc_static_library("bbr") { + sources = [ + "bbr_factory.cc", + "bbr_factory.h", + ] + deps = [ + ":bbr_controller", + "../../../rtc_base:rtc_base_approved", + "../network_control", + ] +} + +rtc_source_set("bbr_controller") { + visibility = [ ":*" ] + sources = [ + "bbr_network_controller.cc", + "bbr_network_controller.h", + ] + deps = [ + ":data_transfer_tracker", + ":rtt_stats", + ":windowed_filter", + "../../../api:optional", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../../../rtc_base/system:fallthrough", + "../network_control", + ] +} rtc_source_set("data_transfer_tracker") { + visibility = [ ":*" ] sources = [ "data_transfer_tracker.cc", "data_transfer_tracker.h", @@ -20,6 +50,7 @@ rtc_source_set("data_transfer_tracker") { ] } rtc_source_set("rtt_stats") { + visibility = [ ":*" ] sources = [ "rtt_stats.cc", "rtt_stats.h", @@ -30,6 +61,7 @@ rtc_source_set("rtt_stats") { ] } rtc_source_set("windowed_filter") { + visibility = [ ":*" ] sources = [ "windowed_filter.h", ] @@ -38,11 +70,14 @@ if (rtc_include_tests) { rtc_source_set("bbr_unittests") { testonly = true sources = [ + "bbr_network_controller_unittest.cc", "data_transfer_tracker_unittest.cc", "rtt_stats_unittest.cc", "windowed_filter_unittest.cc", ] deps = [ + ":bbr", + ":bbr_controller", ":data_transfer_tracker", ":rtt_stats", ":windowed_filter", @@ -50,5 +85,9 @@ if (rtc_include_tests) { "../network_control", "../network_control:network_control_test", ] + if (!build_with_chromium && is_clang) { + # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163). + suppressed_configs += [ "//build/config/clang:find_bad_constructs" ] + } } } diff --git a/modules/congestion_controller/bbr/bbr_factory.cc b/modules/congestion_controller/bbr/bbr_factory.cc new file mode 100644 index 0000000000..2b7ed04377 --- /dev/null +++ b/modules/congestion_controller/bbr/bbr_factory.cc @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/congestion_controller/bbr/bbr_factory.h" +#include + +#include "modules/congestion_controller/bbr/bbr_network_controller.h" +#include "rtc_base/ptr_util.h" + +namespace webrtc { + +BbrNetworkControllerFactory::BbrNetworkControllerFactory() {} + +std::unique_ptr BbrNetworkControllerFactory::Create( + NetworkControllerObserver* observer, + NetworkControllerConfig config) { + return rtc::MakeUnique(observer, config); +} + +TimeDelta BbrNetworkControllerFactory::GetProcessInterval() const { + return TimeDelta::PlusInfinity(); +} + +} // namespace webrtc diff --git a/modules/congestion_controller/bbr/bbr_factory.h b/modules/congestion_controller/bbr/bbr_factory.h new file mode 100644 index 0000000000..8e91d9fdd3 --- /dev/null +++ b/modules/congestion_controller/bbr/bbr_factory.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_CONGESTION_CONTROLLER_BBR_BBR_FACTORY_H_ +#define MODULES_CONGESTION_CONTROLLER_BBR_BBR_FACTORY_H_ + +#include + +#include "modules/congestion_controller/network_control/include/network_control.h" + +namespace webrtc { + +class BbrNetworkControllerFactory : public NetworkControllerFactoryInterface { + public: + BbrNetworkControllerFactory(); + std::unique_ptr Create( + NetworkControllerObserver* observer, + NetworkControllerConfig config) override; + TimeDelta GetProcessInterval() const override; +}; +} // namespace webrtc + +#endif // MODULES_CONGESTION_CONTROLLER_BBR_BBR_FACTORY_H_ diff --git a/modules/congestion_controller/bbr/bbr_network_controller.cc b/modules/congestion_controller/bbr/bbr_network_controller.cc new file mode 100644 index 0000000000..40f28b95d1 --- /dev/null +++ b/modules/congestion_controller/bbr/bbr_network_controller.cc @@ -0,0 +1,870 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/congestion_controller/bbr/bbr_network_controller.h" + +#include +#include +#include +#include + +#include "modules/congestion_controller/network_control/include/network_units.h" +#include "modules/congestion_controller/network_control/include/network_units_to_string.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/system/fallthrough.h" + +namespace webrtc { +namespace bbr { +namespace { + +// If greater than zero, mean RTT variation is multiplied by the specified +// factor and added to the congestion window limit. +const double kBbrRttVariationWeight = 0.0f; + +// Congestion window gain for QUIC BBR during PROBE_BW phase. +const double kProbeBWCongestionWindowGain = 2.0f; + +// The maximum packet size of any QUIC packet, based on ethernet's max size, +// minus the IP and UDP headers. IPv6 has a 40 byte header, UDP adds an +// additional 8 bytes. This is a total overhead of 48 bytes. Ethernet's +// max packet size is 1500 bytes, 1500 - 48 = 1452. +const DataSize kMaxPacketSize = DataSize::bytes(1452); + +// Default maximum packet size used in the Linux TCP implementation. +// Used in QUIC for congestion window computations in bytes. +const DataSize kDefaultTCPMSS = DataSize::bytes(1460); +// Constants based on TCP defaults. +const DataSize kMaxSegmentSize = kDefaultTCPMSS; +// The minimum CWND to ensure delayed acks don't reduce bandwidth measurements. +// Does not inflate the pacing rate. +const DataSize kMinimumCongestionWindow = DataSize::bytes(1000); + +// The gain used for the slow start, equal to 2/ln(2). +const double kHighGain = 2.885f; +// The gain used in STARTUP after loss has been detected. +// 1.5 is enough to allow for 25% exogenous loss and still observe a 25% growth +// in measured bandwidth. +const double kStartupAfterLossGain = 1.5; +// The gain used to drain the queue after the slow start. +const double kDrainGain = 1.f / kHighGain; + +// The length of the gain cycle. +const size_t kGainCycleLength = 8; +// The size of the bandwidth filter window, in round-trips. +const BbrRoundTripCount kBandwidthWindowSize = kGainCycleLength + 2; + +// The time after which the current min_rtt value expires. +const TimeDelta kMinRttExpiry = TimeDelta::seconds(10); +// The minimum time the connection can spend in PROBE_RTT mode. +const TimeDelta kProbeRttTime = TimeDelta::ms(200); +// If the bandwidth does not increase by the factor of |kStartupGrowthTarget| +// within |kRoundTripsWithoutGrowthBeforeExitingStartup| rounds, the connection +// will exit the STARTUP mode. +const double kStartupGrowthTarget = 1.25; +// Coefficient of target congestion window to use when basing PROBE_RTT on BDP. +const double kModerateProbeRttMultiplier = 0.75; +// Coefficient to determine if a new RTT is sufficiently similar to min_rtt that +// we don't need to enter PROBE_RTT. +const double kSimilarMinRttThreshold = 1.125; + +const TimeDelta kInitialRtt = TimeDelta::ms(200); +const DataRate kInitialBandwidth = DataRate::kbps(300); + +const TimeDelta kMaxRtt = TimeDelta::ms(1000); +const DataRate kMaxBandwidth = DataRate::kbps(5000); + +const DataSize kInitialCongestionWindow = kInitialRtt * kInitialBandwidth; +const DataSize kDefaultMaxCongestionWindow = kMaxRtt * kMaxBandwidth; + +static std::string ModeToString(BbrNetworkController::Mode mode) { + switch (mode) { + case BbrNetworkController::STARTUP: + return "STARTUP"; + case BbrNetworkController::DRAIN: + return "DRAIN"; + case BbrNetworkController::PROBE_BW: + return "PROBE_BW"; + case BbrNetworkController::PROBE_RTT: + return "PROBE_RTT"; + } + return "???"; +} +} // namespace + +BbrNetworkController::BbrControllerConfig +BbrNetworkController::BbrControllerConfig::DefaultConfig() { + BbrControllerConfig config; + config.probe_bw_pacing_gain_offset = 0.25; + config.encoder_rate_gain = 1; + config.encoder_rate_gain_in_probe_rtt = 1; + config.exit_startup_rtt_threshold_ms = 0; + config.probe_rtt_congestion_window_gain = 0.75; + config.exit_startup_on_loss = true; + config.num_startup_rtts = 3; + config.rate_based_recovery = false; + config.max_aggregation_bytes_multiplier = 0; + config.slower_startup = false; + config.rate_based_startup = false; + config.fully_drain_queue = false; + config.initial_conservation_in_startup = CONSERVATION; + config.max_ack_height_window_multiplier = 1; + config.probe_rtt_based_on_bdp = false; + config.probe_rtt_skipped_if_similar_rtt = false; + config.probe_rtt_disabled_if_app_limited = false; + + return config; +} + +BbrNetworkController::DebugState::DebugState(const BbrNetworkController& sender) + : mode(sender.mode_), + max_bandwidth(sender.max_bandwidth_.GetBest()), + round_trip_count(sender.round_trip_count_), + gain_cycle_index(sender.cycle_current_offset_), + congestion_window(sender.congestion_window_), + is_at_full_bandwidth(sender.is_at_full_bandwidth_), + bandwidth_at_last_round(sender.bandwidth_at_last_round_), + rounds_without_bandwidth_gain(sender.rounds_without_bandwidth_gain_), + min_rtt(sender.min_rtt_), + min_rtt_timestamp(sender.min_rtt_timestamp_), + recovery_state(sender.recovery_state_), + recovery_window(sender.recovery_window_), + last_sample_is_app_limited(sender.last_sample_is_app_limited_), + end_of_app_limited_phase(sender.end_of_app_limited_phase_) {} + +BbrNetworkController::DebugState::DebugState(const DebugState& state) = default; + +BbrNetworkController::BbrNetworkController(NetworkControllerObserver* observer, + NetworkControllerConfig config) + : observer_(observer), + random_(10), + max_bandwidth_(kBandwidthWindowSize, DataRate::Zero(), 0), + default_bandwidth_(kInitialBandwidth), + max_ack_height_(kBandwidthWindowSize, DataSize::Zero(), 0), + congestion_window_(kInitialCongestionWindow), + initial_congestion_window_(kInitialCongestionWindow), + max_congestion_window_(kDefaultMaxCongestionWindow), + congestion_window_gain_constant_(kProbeBWCongestionWindowGain), + rtt_variance_weight_(kBbrRttVariationWeight), + recovery_window_(max_congestion_window_) { + config_ = BbrControllerConfig::DefaultConfig(); + if (config.starting_bandwidth.IsFinite()) + default_bandwidth_ = config.starting_bandwidth; + constraints_ = config.constraints; + Reset(); + EnterStartupMode(); + SignalUpdatedRates(config.constraints.at_time); +} + +BbrNetworkController::~BbrNetworkController() {} + +void BbrNetworkController::Reset() { + round_trip_count_ = 0; + rounds_without_bandwidth_gain_ = 0; + is_at_full_bandwidth_ = false; + last_update_state_.mode = Mode::STARTUP; + last_update_state_.bandwidth = DataRate(); + last_update_state_.rtt = TimeDelta(); + last_update_state_.pacing_rate = DataRate(); + last_update_state_.target_rate = DataRate(); + last_update_state_.probing_for_bandwidth = false; + EnterStartupMode(); +} + +void BbrNetworkController::SignalUpdatedRates(Timestamp at_time) { + DataRate bandwidth = BandwidthEstimate(); + if (bandwidth.IsZero()) + bandwidth = default_bandwidth_; + TimeDelta rtt = GetMinRtt(); + DataRate pacing_rate = PacingRate(); + DataRate target_rate = bandwidth; + if (mode_ == PROBE_RTT) + target_rate = bandwidth * config_.encoder_rate_gain_in_probe_rtt; + else + target_rate = bandwidth * config_.encoder_rate_gain; + target_rate = std::min(target_rate, pacing_rate); + + if (constraints_) { + target_rate = std::min(target_rate, constraints_->max_data_rate); + target_rate = std::max(target_rate, constraints_->min_data_rate); + } + bool probing_for_bandwidth = IsProbingForMoreBandwidth(); + if (last_update_state_.mode == mode_ && + last_update_state_.bandwidth == bandwidth && + last_update_state_.rtt == rtt && + last_update_state_.pacing_rate == pacing_rate && + last_update_state_.target_rate == target_rate && + last_update_state_.probing_for_bandwidth == probing_for_bandwidth) + return; + last_update_state_.mode = mode_; + last_update_state_.bandwidth = bandwidth; + last_update_state_.rtt = rtt; + last_update_state_.pacing_rate = pacing_rate; + last_update_state_.target_rate = target_rate; + last_update_state_.probing_for_bandwidth = probing_for_bandwidth; + + RTC_LOG(LS_INFO) << "RateUpdate, mode: " << ModeToString(mode_) + << ", bw: " << ToString(bandwidth) + << ", min_rtt: " << ToString(rtt) + << ", last_rtt: " << ToString(last_rtt_) + << ", pacing_rate: " << ToString(pacing_rate) + << ", target_rate: " << ToString(target_rate) + << ", Probing:" << probing_for_bandwidth + << ", pacing_gain: " << pacing_gain_; + + TargetTransferRate target_rate_msg; + target_rate_msg.network_estimate.at_time = at_time; + target_rate_msg.network_estimate.bandwidth = bandwidth; + target_rate_msg.network_estimate.round_trip_time = rtt; + + // TODO(srte): Fill in fields below with proper values. + target_rate_msg.network_estimate.loss_rate_ratio = 0; + target_rate_msg.network_estimate.bwe_period = TimeDelta::Zero(); + + target_rate_msg.target_rate = target_rate; + target_rate_msg.at_time = at_time; + observer_->OnTargetTransferRate(target_rate_msg); + + PacerConfig pacer_config; + // A small time window ensures an even pacing rate. + pacer_config.time_window = rtt * 0.25; + pacer_config.data_window = pacer_config.time_window * pacing_rate; + + if (IsProbingForMoreBandwidth()) + pacer_config.pad_window = pacer_config.data_window; + else + pacer_config.pad_window = DataSize::Zero(); + + pacer_config.at_time = at_time; + observer_->OnPacerConfig(pacer_config); + + CongestionWindow congestion_window; + congestion_window.data_window = GetCongestionWindow(); + observer_->OnCongestionWindow(congestion_window); +} + +void BbrNetworkController::OnNetworkAvailability(NetworkAvailability msg) { + Reset(); + rtt_stats_.OnConnectionMigration(); + SignalUpdatedRates(msg.at_time); +} + +void BbrNetworkController::OnNetworkRouteChange(NetworkRouteChange msg) { + constraints_ = msg.constraints; + Reset(); + if (msg.starting_rate.IsFinite()) + default_bandwidth_ = msg.starting_rate; + rtt_stats_.OnConnectionMigration(); + SignalUpdatedRates(msg.at_time); +} + +void BbrNetworkController::OnProcessInterval(ProcessInterval) {} + +void BbrNetworkController::OnStreamsConfig(StreamsConfig msg) {} + +void BbrNetworkController::OnTargetRateConstraints(TargetRateConstraints msg) { + constraints_ = msg; + SignalUpdatedRates(msg.at_time); +} + +bool BbrNetworkController::InSlowStart() const { + return mode_ == STARTUP; +} + +void BbrNetworkController::OnSentPacket(SentPacket msg) { + last_send_time_ = msg.send_time; + if (!aggregation_epoch_start_time_.IsInitialized()) { + aggregation_epoch_start_time_ = msg.send_time; + } +} + +bool BbrNetworkController::CanSend(DataSize bytes_in_flight) { + return bytes_in_flight < GetCongestionWindow(); +} + +DataRate BbrNetworkController::PacingRate() const { + if (pacing_rate_.IsZero()) { + return kHighGain * initial_congestion_window_ / GetMinRtt(); + } + return pacing_rate_; +} + +DataRate BbrNetworkController::BandwidthEstimate() const { + return max_bandwidth_.GetBest(); +} + +DataSize BbrNetworkController::GetCongestionWindow() const { + if (mode_ == PROBE_RTT) { + return ProbeRttCongestionWindow(); + } + + if (InRecovery() && !config_.rate_based_recovery && + !(config_.rate_based_startup && mode_ == STARTUP)) { + return std::min(congestion_window_, recovery_window_); + } + + return congestion_window_; +} + +double BbrNetworkController::GetPacingGain(int round_offset) const { + if (round_offset == 0) + return 1 + config_.probe_bw_pacing_gain_offset; + else if (round_offset == 1) + return 1 - config_.probe_bw_pacing_gain_offset; + else + return 1; +} + +bool BbrNetworkController::InRecovery() const { + return recovery_state_ != NOT_IN_RECOVERY; +} + +bool BbrNetworkController::IsProbingForMoreBandwidth() const { + return (mode_ == PROBE_BW && pacing_gain_ > 1) || mode_ == STARTUP; +} + +void BbrNetworkController::OnTransportPacketsFeedback( + TransportPacketsFeedback msg) { + Timestamp feedback_recv_time = msg.feedback_time; + rtc::Optional last_sent_packet = + msg.PacketsWithFeedback().back().sent_packet; + if (!last_sent_packet.has_value()) { + RTC_LOG(LS_WARNING) << "Last ack packet not in history, no RTT update"; + } else { + Timestamp send_time = last_sent_packet->send_time; + TimeDelta send_delta = feedback_recv_time - send_time; + rtt_stats_.UpdateRtt(send_delta, TimeDelta::Zero(), feedback_recv_time); + } + + DataSize bytes_in_flight = msg.data_in_flight; + DataSize total_acked_size = DataSize::Zero(); + + bool is_round_start = false; + bool min_rtt_expired = false; + + std::vector acked_packets = msg.ReceivedWithSendInfo(); + std::vector lost_packets = msg.LostWithSendInfo(); + // Input the new data into the BBR model of the connection. + if (!acked_packets.empty()) { + for (const PacketResult& packet : acked_packets) { + const SentPacket& sent_packet = *packet.sent_packet; + send_ack_tracker_.AddSample(sent_packet.size, sent_packet.send_time, + msg.feedback_time); + total_acked_size += sent_packet.size; + } + Timestamp last_acked_send_time = + acked_packets.rbegin()->sent_packet->send_time; + is_round_start = UpdateRoundTripCounter(last_acked_send_time); + UpdateBandwidth(msg.feedback_time, acked_packets); + // Min rtt will be the rtt for the last packet, since all packets are acked + // at the same time. + Timestamp last_send_time = acked_packets.back().sent_packet->send_time; + min_rtt_expired = UpdateMinRtt(msg.feedback_time, last_send_time); + UpdateRecoveryState(last_acked_send_time, !lost_packets.empty(), + is_round_start); + + UpdateAckAggregationBytes(msg.feedback_time, total_acked_size); + if (max_aggregation_bytes_multiplier_ > 0) { + if (msg.data_in_flight <= + 1.25 * GetTargetCongestionWindow(pacing_gain_)) { + bytes_acked_since_queue_drained_ = DataSize::Zero(); + } else { + bytes_acked_since_queue_drained_ += total_acked_size; + } + } + } + total_bytes_acked_ += total_acked_size; + + // Handle logic specific to PROBE_BW mode. + if (mode_ == PROBE_BW) { + UpdateGainCyclePhase(msg.feedback_time, msg.prior_in_flight, + !lost_packets.empty()); + } + + // Handle logic specific to STARTUP and DRAIN modes. + if (is_round_start && !is_at_full_bandwidth_) { + CheckIfFullBandwidthReached(); + } + MaybeExitStartupOrDrain(msg); + + // Handle logic specific to PROBE_RTT. + MaybeEnterOrExitProbeRtt(msg, is_round_start, min_rtt_expired); + + // Calculate number of packets acked and lost. + DataSize bytes_lost = DataSize(); + for (const PacketResult& packet : lost_packets) { + bytes_lost += packet.sent_packet->size; + } + + // After the model is updated, recalculate the pacing rate and congestion + // window. + CalculatePacingRate(); + CalculateCongestionWindow(total_acked_size); + CalculateRecoveryWindow(total_acked_size, bytes_lost, bytes_in_flight); + SignalUpdatedRates(msg.feedback_time); +} + +void BbrNetworkController::OnRemoteBitrateReport(RemoteBitrateReport msg) {} +void BbrNetworkController::OnRoundTripTimeUpdate(RoundTripTimeUpdate msg) {} +void BbrNetworkController::OnTransportLossReport(TransportLossReport msg) {} + +TimeDelta BbrNetworkController::GetMinRtt() const { + return !min_rtt_.IsZero() ? min_rtt_ + : TimeDelta::us(rtt_stats_.initial_rtt_us()); +} + +DataSize BbrNetworkController::GetTargetCongestionWindow(double gain) const { + DataSize bdp = GetMinRtt() * BandwidthEstimate(); + DataSize congestion_window = gain * bdp; + + // BDP estimate will be zero if no bandwidth samples are available yet. + if (congestion_window.IsZero()) { + congestion_window = gain * initial_congestion_window_; + } + + return std::max(congestion_window, kMinimumCongestionWindow); +} + +DataSize BbrNetworkController::ProbeRttCongestionWindow() const { + if (config_.probe_rtt_based_on_bdp) { + return GetTargetCongestionWindow(kModerateProbeRttMultiplier); + } + return kMinimumCongestionWindow; +} + +void BbrNetworkController::EnterStartupMode() { + mode_ = STARTUP; + pacing_gain_ = kHighGain; + congestion_window_gain_ = kHighGain; +} + +void BbrNetworkController::EnterProbeBandwidthMode(Timestamp now) { + mode_ = PROBE_BW; + congestion_window_gain_ = congestion_window_gain_constant_; + + // Pick a random offset for the gain cycle out of {0, 2..7} range. 1 is + // excluded because in that case increased gain and decreased gain would not + // follow each other. + cycle_current_offset_ = random_.Rand(kGainCycleLength - 1); + if (cycle_current_offset_ >= 1) { + cycle_current_offset_ += 1; + } + + last_cycle_start_ = now; + pacing_gain_ = GetPacingGain(cycle_current_offset_); +} + +bool BbrNetworkController::UpdateRoundTripCounter( + Timestamp last_acked_send_time) { + if (last_acked_send_time > current_round_trip_end_) { + round_trip_count_++; + current_round_trip_end_ = last_send_time_; + return true; + } + + return false; +} + +bool BbrNetworkController::UpdateMinRtt(Timestamp ack_time, + Timestamp last_packet_send_time) { + // Note: This sample does not account for delayed acknowledgement time. This + // means that the RTT measurements here can be artificially high, especially + // on low bandwidth connections. + TimeDelta sample_rtt = ack_time - last_packet_send_time; + last_rtt_ = sample_rtt; + min_rtt_since_last_probe_rtt_ = + std::min(min_rtt_since_last_probe_rtt_, sample_rtt); + + // Do not expire min_rtt if none was ever available. + bool min_rtt_expired = + !min_rtt_.IsZero() && (ack_time > (min_rtt_timestamp_ + kMinRttExpiry)); + + if (min_rtt_expired || sample_rtt < min_rtt_ || min_rtt_.IsZero()) { + RTC_LOG(LS_INFO) << "Min RTT updated, old value: " << ToString(min_rtt_) + << ", new value: " << ToString(sample_rtt) + << ", current time: " << ToString(ack_time); + + if (ShouldExtendMinRttExpiry()) { + min_rtt_expired = false; + } else { + min_rtt_ = sample_rtt; + } + min_rtt_timestamp_ = ack_time; + // Reset since_last_probe_rtt fields. + min_rtt_since_last_probe_rtt_ = TimeDelta::PlusInfinity(); + app_limited_since_last_probe_rtt_ = false; + } + + return min_rtt_expired; +} + +void BbrNetworkController::UpdateBandwidth( + Timestamp ack_time, + const std::vector& acked_packets) { + // There are two possible maximum receive bandwidths based on the duration + // from send to ack of a packet, either including or excluding the time until + // the current ack was received. Therefore looking at the last and the first + // packet is enough. This holds if at most one feedback was received during + // the sending of the acked packets. + std::array packets = { + {acked_packets.front(), acked_packets.back()}}; + for (const PacketResult& packet : packets) { + const Timestamp& send_time = packet.sent_packet->send_time; + is_app_limited_ = send_time > end_of_app_limited_phase_; + auto result = send_ack_tracker_.GetRatesByAckTime(send_time, ack_time); + if (result.acked_data == DataSize::Zero()) + continue; + send_ack_tracker_.ClearOldSamples(send_time); + + DataRate ack_rate = result.acked_data / result.ack_timespan; + DataRate send_rate = result.send_timespan.IsZero() + ? DataRate::Infinity() + : result.acked_data / result.send_timespan; + DataRate bandwidth = std::min(send_rate, ack_rate); + if (!bandwidth.IsFinite()) + continue; + if (!is_app_limited_ || bandwidth > BandwidthEstimate()) { + max_bandwidth_.Update(bandwidth, round_trip_count_); + } + } +} + +bool BbrNetworkController::ShouldExtendMinRttExpiry() const { + if (config_.probe_rtt_disabled_if_app_limited && + app_limited_since_last_probe_rtt_) { + // Extend the current min_rtt if we've been app limited recently. + return true; + } + const bool min_rtt_increased_since_last_probe = + min_rtt_since_last_probe_rtt_ > min_rtt_ * kSimilarMinRttThreshold; + if (config_.probe_rtt_skipped_if_similar_rtt && + app_limited_since_last_probe_rtt_ && + !min_rtt_increased_since_last_probe) { + // Extend the current min_rtt if we've been app limited recently and an rtt + // has been measured in that time that's less than 12.5% more than the + // current min_rtt. + return true; + } + return false; +} + +void BbrNetworkController::UpdateGainCyclePhase(Timestamp now, + DataSize prior_in_flight, + bool has_losses) { + // In most cases, the cycle is advanced after an RTT passes. + bool should_advance_gain_cycling = now - last_cycle_start_ > GetMinRtt(); + + // If the pacing gain is above 1.0, the connection is trying to probe the + // bandwidth by increasing the number of bytes in flight to at least + // pacing_gain * BDP. Make sure that it actually reaches the target, as long + // as there are no losses suggesting that the buffers are not able to hold + // that much. + if (pacing_gain_ > 1.0 && !has_losses && + prior_in_flight < GetTargetCongestionWindow(pacing_gain_)) { + should_advance_gain_cycling = false; + } + + // If pacing gain is below 1.0, the connection is trying to drain the extra + // queue which could have been incurred by probing prior to it. If the number + // of bytes in flight falls down to the estimated BDP value earlier, conclude + // that the queue has been successfully drained and exit this cycle early. + if (pacing_gain_ < 1.0 && prior_in_flight <= GetTargetCongestionWindow(1)) { + should_advance_gain_cycling = true; + } + + if (should_advance_gain_cycling) { + cycle_current_offset_ = (cycle_current_offset_ + 1) % kGainCycleLength; + last_cycle_start_ = now; + // Stay in low gain mode until the target BDP is hit. + // Low gain mode will be exited immediately when the target BDP is achieved. + if (config_.fully_drain_queue && pacing_gain_ < 1 && + GetPacingGain(cycle_current_offset_) == 1 && + prior_in_flight > GetTargetCongestionWindow(1)) { + return; + } + pacing_gain_ = GetPacingGain(cycle_current_offset_); + } +} + +void BbrNetworkController::CheckIfFullBandwidthReached() { + if (last_sample_is_app_limited_) { + return; + } + + DataRate target = bandwidth_at_last_round_ * kStartupGrowthTarget; + if (BandwidthEstimate() >= target) { + bandwidth_at_last_round_ = BandwidthEstimate(); + rounds_without_bandwidth_gain_ = 0; + return; + } + + rounds_without_bandwidth_gain_++; + if ((rounds_without_bandwidth_gain_ >= config_.num_startup_rtts) || + (exit_startup_on_loss_ && InRecovery())) { + is_at_full_bandwidth_ = true; + } +} + +void BbrNetworkController::MaybeExitStartupOrDrain( + const TransportPacketsFeedback& msg) { + int64_t exit_threshold_ms = config_.exit_startup_rtt_threshold_ms; + bool rtt_over_threshold = + exit_threshold_ms > 0 && (last_rtt_ - min_rtt_).ms() > exit_threshold_ms; + if (mode_ == STARTUP && (is_at_full_bandwidth_ || rtt_over_threshold)) { + if (rtt_over_threshold) + RTC_LOG(LS_INFO) << "Exiting startup due to rtt increase from: " + << ToString(min_rtt_) << " to:" << ToString(last_rtt_) + << " > " + << ToString(min_rtt_ + TimeDelta::ms(exit_threshold_ms)); + mode_ = DRAIN; + pacing_gain_ = kDrainGain; + congestion_window_gain_ = kHighGain; + } + if (mode_ == DRAIN && msg.data_in_flight <= GetTargetCongestionWindow(1)) { + EnterProbeBandwidthMode(msg.feedback_time); + } +} + +void BbrNetworkController::MaybeEnterOrExitProbeRtt( + const TransportPacketsFeedback& msg, + bool is_round_start, + bool min_rtt_expired) { + if (min_rtt_expired && mode_ != PROBE_RTT) { + mode_ = PROBE_RTT; + pacing_gain_ = 1; + // Do not decide on the time to exit PROBE_RTT until the |bytes_in_flight| + // is at the target small value. + exit_probe_rtt_at_ = Timestamp(); + RTC_LOG(LS_INFO) << "Entering RTT Probe"; + } + + if (mode_ == PROBE_RTT) { + is_app_limited_ = true; + end_of_app_limited_phase_ = last_send_time_; + + if (!exit_probe_rtt_at_.IsInitialized()) { + // If the window has reached the appropriate size, schedule exiting + // PROBE_RTT. The CWND during PROBE_RTT is kMinimumCongestionWindow, but + // we allow an extra packet since QUIC checks CWND before sending a + // packet. + if (msg.data_in_flight < ProbeRttCongestionWindow() + kMaxPacketSize) { + exit_probe_rtt_at_ = msg.feedback_time + kProbeRttTime; + probe_rtt_round_passed_ = false; + } + } else { + if (is_round_start) { + probe_rtt_round_passed_ = true; + } + if (msg.feedback_time >= exit_probe_rtt_at_ && probe_rtt_round_passed_) { + min_rtt_timestamp_ = msg.feedback_time; + RTC_LOG(LS_INFO) << "Exiting RTT Probe"; + if (!is_at_full_bandwidth_) { + EnterStartupMode(); + } else { + EnterProbeBandwidthMode(msg.feedback_time); + } + } + } + } +} + +void BbrNetworkController::UpdateRecoveryState(Timestamp last_acked_send_time, + bool has_losses, + bool is_round_start) { + // Exit recovery when there are no losses for a round. + if (has_losses) { + end_recovery_at_ = last_acked_send_time; + } + + switch (recovery_state_) { + case NOT_IN_RECOVERY: + // Enter conservation on the first loss. + if (has_losses) { + recovery_state_ = CONSERVATION; + if (mode_ == STARTUP) { + recovery_state_ = config_.initial_conservation_in_startup; + } + // This will cause the |recovery_window_| to be set to the correct + // value in CalculateRecoveryWindow(). + recovery_window_ = DataSize::Zero(); + // Since the conservation phase is meant to be lasting for a whole + // round, extend the current round as if it were started right now. + current_round_trip_end_ = last_send_time_; + } + break; + + case CONSERVATION: + case MEDIUM_GROWTH: + if (is_round_start) { + recovery_state_ = GROWTH; + } + RTC_FALLTHROUGH(); + case GROWTH: + // Exit recovery if appropriate. + if (!has_losses && last_acked_send_time > end_recovery_at_) { + recovery_state_ = NOT_IN_RECOVERY; + } + + break; + } +} + +void BbrNetworkController::UpdateAckAggregationBytes( + Timestamp ack_time, + DataSize newly_acked_bytes) { + // Compute how many bytes are expected to be delivered, assuming max bandwidth + // is correct. + DataSize expected_bytes_acked = + max_bandwidth_.GetBest() * (ack_time - aggregation_epoch_start_time_); + // Reset the current aggregation epoch as soon as the ack arrival rate is less + // than or equal to the max bandwidth. + if (aggregation_epoch_bytes_ <= expected_bytes_acked) { + // Reset to start measuring a new aggregation epoch. + aggregation_epoch_bytes_ = newly_acked_bytes; + aggregation_epoch_start_time_ = ack_time; + return; + } + + // Compute how many extra bytes were delivered vs max bandwidth. + // Include the bytes most recently acknowledged to account for stretch acks. + aggregation_epoch_bytes_ += newly_acked_bytes; + max_ack_height_.Update(aggregation_epoch_bytes_ - expected_bytes_acked, + round_trip_count_); +} + +void BbrNetworkController::CalculatePacingRate() { + if (BandwidthEstimate().IsZero()) { + return; + } + + DataRate target_rate = pacing_gain_ * BandwidthEstimate(); + if (config_.rate_based_recovery && InRecovery()) { + pacing_rate_ = pacing_gain_ * max_bandwidth_.GetThirdBest(); + } + if (is_at_full_bandwidth_) { + pacing_rate_ = target_rate; + return; + } + + // Pace at the rate of initial_window / RTT as soon as RTT measurements are + // available. + if (pacing_rate_.IsZero() && !rtt_stats_.min_rtt().IsZero()) { + pacing_rate_ = initial_congestion_window_ / rtt_stats_.min_rtt(); + return; + } + // Slow the pacing rate in STARTUP once loss has ever been detected. + const bool has_ever_detected_loss = end_recovery_at_.IsInitialized(); + if (config_.slower_startup && has_ever_detected_loss) { + pacing_rate_ = kStartupAfterLossGain * BandwidthEstimate(); + return; + } + + // Do not decrease the pacing rate during the startup. + pacing_rate_ = std::max(pacing_rate_, target_rate); +} + +void BbrNetworkController::CalculateCongestionWindow(DataSize bytes_acked) { + if (mode_ == PROBE_RTT) { + return; + } + + DataSize target_window = GetTargetCongestionWindow(congestion_window_gain_); + + if (rtt_variance_weight_ > 0.f && !BandwidthEstimate().IsZero()) { + target_window += rtt_variance_weight_ * rtt_stats_.mean_deviation() * + BandwidthEstimate(); + } else if (max_aggregation_bytes_multiplier_ > 0 && is_at_full_bandwidth_) { + // Subtracting only half the bytes_acked_since_queue_drained ensures sending + // doesn't completely stop for a long period of time if the queue hasn't + // been drained recently. + if (max_aggregation_bytes_multiplier_ * max_ack_height_.GetBest() > + bytes_acked_since_queue_drained_ / 2) { + target_window += + max_aggregation_bytes_multiplier_ * max_ack_height_.GetBest() - + bytes_acked_since_queue_drained_ / 2; + } + } else if (is_at_full_bandwidth_) { + target_window += max_ack_height_.GetBest(); + } + + // Instead of immediately setting the target CWND as the new one, BBR grows + // the CWND towards |target_window| by only increasing it |bytes_acked| at a + // time. + if (is_at_full_bandwidth_) { + congestion_window_ = + std::min(target_window, congestion_window_ + bytes_acked); + } else if (congestion_window_ < target_window || + total_bytes_acked_ < initial_congestion_window_) { + // If the connection is not yet out of startup phase, do not decrease the + // window. + congestion_window_ = congestion_window_ + bytes_acked; + } + + // Enforce the limits on the congestion window. + congestion_window_ = std::max(congestion_window_, kMinimumCongestionWindow); + congestion_window_ = std::min(congestion_window_, max_congestion_window_); +} + +void BbrNetworkController::CalculateRecoveryWindow(DataSize bytes_acked, + DataSize bytes_lost, + DataSize bytes_in_flight) { + if (config_.rate_based_recovery || + (config_.rate_based_startup && mode_ == STARTUP)) { + return; + } + + if (recovery_state_ == NOT_IN_RECOVERY) { + return; + } + + // Set up the initial recovery window. + if (recovery_window_.IsZero()) { + recovery_window_ = bytes_in_flight + bytes_acked; + recovery_window_ = std::max(kMinimumCongestionWindow, recovery_window_); + return; + } + + // Remove losses from the recovery window, while accounting for a potential + // integer underflow. + recovery_window_ = recovery_window_ >= bytes_lost + ? recovery_window_ - bytes_lost + : kMaxSegmentSize; + + // In CONSERVATION mode, just subtracting losses is sufficient. In GROWTH, + // release additional |bytes_acked| to achieve a slow-start-like behavior. + // In MEDIUM_GROWTH, release |bytes_acked| / 2 to split the difference. + if (recovery_state_ == GROWTH) { + recovery_window_ += bytes_acked; + } else if (recovery_state_ == MEDIUM_GROWTH) { + recovery_window_ += bytes_acked / 2; + } + + // Sanity checks. Ensure that we always allow to send at leastÅ› + // |bytes_acked| in response. + recovery_window_ = std::max(recovery_window_, bytes_in_flight + bytes_acked); + recovery_window_ = std::max(kMinimumCongestionWindow, recovery_window_); +} + +void BbrNetworkController::OnApplicationLimited(DataSize bytes_in_flight) { + if (bytes_in_flight >= GetCongestionWindow()) { + return; + } + + app_limited_since_last_probe_rtt_ = true; + + is_app_limited_ = true; + end_of_app_limited_phase_ = last_send_time_; + + RTC_LOG(LS_INFO) << "Becoming application limited. Last sent time: " + << ToString(last_send_time_) + << ", CWND: " << ToString(GetCongestionWindow()); +} +} // namespace bbr +} // namespace webrtc diff --git a/modules/congestion_controller/bbr/bbr_network_controller.h b/modules/congestion_controller/bbr/bbr_network_controller.h new file mode 100644 index 0000000000..d39d4a8904 --- /dev/null +++ b/modules/congestion_controller/bbr/bbr_network_controller.h @@ -0,0 +1,407 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// BBR (Bottleneck Bandwidth and RTT) congestion control algorithm. +// Based on the Quic BBR implementation in Chromium. + +#ifndef MODULES_CONGESTION_CONTROLLER_BBR_BBR_NETWORK_CONTROLLER_H_ +#define MODULES_CONGESTION_CONTROLLER_BBR_BBR_NETWORK_CONTROLLER_H_ + +#include +#include +#include + +#include "modules/congestion_controller/bbr/data_transfer_tracker.h" +#include "modules/congestion_controller/bbr/rtt_stats.h" +#include "modules/congestion_controller/bbr/windowed_filter.h" +#include "modules/congestion_controller/network_control/include/network_control.h" +#include "modules/congestion_controller/network_control/include/network_types.h" +#include "modules/congestion_controller/network_control/include/network_units.h" + +#include "api/optional.h" +#include "rtc_base/random.h" + +namespace webrtc { +namespace bbr { + +typedef int64_t BbrPacketCount; +typedef int64_t BbrRoundTripCount; + +// BbrSender implements BBR congestion control algorithm. BBR aims to estimate +// the current available Bottleneck Bandwidth and RTT (hence the name), and +// regulates the pacing rate and the size of the congestion window based on +// those signals. +// +// BBR relies on pacing in order to function properly. Do not use BBR when +// pacing is disabled. +class BbrNetworkController : public NetworkControllerInterface { + public: + enum Mode { + // Startup phase of the connection. + STARTUP, + // After achieving the highest possible bandwidth during the startup, lower + // the pacing rate in order to drain the queue. + DRAIN, + // Cruising mode. + PROBE_BW, + // Temporarily slow down sending in order to empty the buffer and measure + // the real minimum RTT. + PROBE_RTT, + }; + + // Indicates how the congestion control limits the amount of bytes in flight. + enum RecoveryState { + // Do not limit. + NOT_IN_RECOVERY, + // Allow an extra outstanding byte for each byte acknowledged. + CONSERVATION, + // Allow 1.5 extra outstanding bytes for each byte acknowledged. + MEDIUM_GROWTH, + // Allow two extra outstanding bytes for each byte acknowledged (slow + // start). + GROWTH + }; + + // Debug state can be exported in order to troubleshoot potential congestion + // control issues. + struct DebugState { + explicit DebugState(const BbrNetworkController& sender); + DebugState(const DebugState& state); + + Mode mode; + DataRate max_bandwidth; + BbrRoundTripCount round_trip_count; + int gain_cycle_index; + DataSize congestion_window; + + bool is_at_full_bandwidth; + DataRate bandwidth_at_last_round; + BbrRoundTripCount rounds_without_bandwidth_gain; + + TimeDelta min_rtt; + Timestamp min_rtt_timestamp; + + RecoveryState recovery_state; + DataSize recovery_window; + + bool last_sample_is_app_limited; + Timestamp end_of_app_limited_phase; + }; + + BbrNetworkController(NetworkControllerObserver* observer, + NetworkControllerConfig config); + ~BbrNetworkController() override; + + // NetworkControllerInterface + void OnNetworkAvailability(NetworkAvailability msg) override; + void OnNetworkRouteChange(NetworkRouteChange msg) override; + void OnProcessInterval(ProcessInterval msg) override; + void OnSentPacket(SentPacket msg) override; + void OnStreamsConfig(StreamsConfig msg) override; + void OnTargetRateConstraints(TargetRateConstraints msg) override; + void OnTransportPacketsFeedback(TransportPacketsFeedback msg) override; + + // Part of remote bitrate estimation api, not implemented for BBR + void OnRemoteBitrateReport(RemoteBitrateReport msg) override; + void OnRoundTripTimeUpdate(RoundTripTimeUpdate msg) override; + void OnTransportLossReport(TransportLossReport msg) override; + + private: + struct BbrControllerConfig { + // Default config based on default QUIC config + static BbrControllerConfig DefaultConfig(); + + double probe_bw_pacing_gain_offset; + double encoder_rate_gain; + double encoder_rate_gain_in_probe_rtt; + // RTT delta to determine if startup should be exited due to increased RTT. + int64_t exit_startup_rtt_threshold_ms; + + double probe_rtt_congestion_window_gain; + + // Configurable in QUIC BBR: + bool exit_startup_on_loss; + // The number of RTTs to stay in STARTUP mode. Defaults to 3. + BbrRoundTripCount num_startup_rtts; + // When true, recovery is rate based rather than congestion window based. + bool rate_based_recovery; + double max_aggregation_bytes_multiplier; + // When true, pace at 1.5x and disable packet conservation in STARTUP. + bool slower_startup; + // When true, disables packet conservation in STARTUP. + bool rate_based_startup; + // If true, will not exit low gain mode until bytes_in_flight drops below + // BDP or it's time for high gain mode. + bool fully_drain_queue; + // Used as the initial packet conservation mode when first entering + // recovery. + RecoveryState initial_conservation_in_startup; + + double max_ack_height_window_multiplier; + // If true, use a CWND of 0.75*BDP during probe_rtt instead of 4 packets. + bool probe_rtt_based_on_bdp; + // If true, skip probe_rtt and update the timestamp of the existing min_rtt + // to now if min_rtt over the last cycle is within 12.5% of the current + // min_rtt. Even if the min_rtt is 12.5% too low, the 25% gain cycling and + // 2x CWND gain should overcome an overly small min_rtt. + bool probe_rtt_skipped_if_similar_rtt; + // If true, disable PROBE_RTT entirely as long as the connection was + // recently app limited. + bool probe_rtt_disabled_if_app_limited; + }; + // Containing values that when changed should trigger an update. + struct UpdateState { + Mode mode = Mode::STARTUP; + DataRate bandwidth; + TimeDelta rtt; + DataRate pacing_rate; + DataRate target_rate; + bool probing_for_bandwidth = false; + }; + + void Reset(); + void SignalUpdatedRates(Timestamp at_time); + + bool InSlowStart() const; + bool InRecovery() const; + bool IsProbingForMoreBandwidth() const; + + bool CanSend(DataSize bytes_in_flight); + DataRate PacingRate() const; + DataRate BandwidthEstimate() const; + DataSize GetCongestionWindow() const; + + double GetPacingGain(int round_offset) const; + + void OnApplicationLimited(DataSize bytes_in_flight); + // End implementation of SendAlgorithmInterface. + + typedef WindowedFilter, + BbrRoundTripCount, + BbrRoundTripCount> + MaxBandwidthFilter; + + typedef WindowedFilter, + BbrRoundTripCount, + BbrRoundTripCount> + MaxAckDelayFilter; + + typedef WindowedFilter, + BbrRoundTripCount, + BbrRoundTripCount> + MaxAckHeightFilter; + + // Returns the current estimate of the RTT of the connection. Outside of the + // edge cases, this is minimum RTT. + TimeDelta GetMinRtt() const; + + // Computes the target congestion window using the specified gain. + DataSize GetTargetCongestionWindow(double gain) const; + // The target congestion window during PROBE_RTT. + DataSize ProbeRttCongestionWindow() const; + // Returns true if the current min_rtt should be kept and we should not enter + // PROBE_RTT immediately. + bool ShouldExtendMinRttExpiry() const; + + // Enters the STARTUP mode. + void EnterStartupMode(); + // Enters the PROBE_BW mode. + void EnterProbeBandwidthMode(Timestamp now); + + // Updates the round-trip counter if a round-trip has passed. Returns true if + // the counter has been advanced. + bool UpdateRoundTripCounter(Timestamp last_acked_timestamp); + // Updates the current bandwidth and min_rtt estimate based on the samples for + // the received acknowledgements. Returns true if min_rtt has expired. + void UpdateBandwidth(Timestamp now, + const std::vector& acked_packets); + bool UpdateMinRtt(Timestamp ack_time, Timestamp last_packet_send_time); + // Updates the current gain used in PROBE_BW mode. + void UpdateGainCyclePhase(Timestamp now, + DataSize prior_in_flight, + bool has_losses); + // Tracks for how many round-trips the bandwidth has not increased + // significantly. + void CheckIfFullBandwidthReached(); + // Transitions from STARTUP to DRAIN and from DRAIN to PROBE_BW if + // appropriate. + void MaybeExitStartupOrDrain(const TransportPacketsFeedback&); + // Decides whether to enter or exit PROBE_RTT. + void MaybeEnterOrExitProbeRtt(const TransportPacketsFeedback& msg, + bool is_round_start, + bool min_rtt_expired); + // Determines whether BBR needs to enter, exit or advance state of the + // recovery. + void UpdateRecoveryState(Timestamp last_acked_send_time, + bool has_losses, + bool is_round_start); + + // Updates the ack aggregation max filter in bytes. + void UpdateAckAggregationBytes(Timestamp ack_time, + DataSize newly_acked_bytes); + + // Determines the appropriate pacing rate for the connection. + void CalculatePacingRate(); + // Determines the appropriate congestion window for the connection. + void CalculateCongestionWindow(DataSize bytes_acked); + // Determines the approriate wQuicPacketNumberindow that constrains the + // in-flight during recovery. + void CalculateRecoveryWindow(DataSize bytes_acked, + DataSize bytes_lost, + DataSize bytes_in_flight); + NetworkControllerObserver* observer_; + + RttStats rtt_stats_; + webrtc::Random random_; + + DataTransferTracker send_ack_tracker_; + + rtc::Optional constraints_; + + Mode mode_ = STARTUP; + + BbrControllerConfig config_; + + // The total number of congestion controlled bytes which were acknowledged. + DataSize total_bytes_acked_; + + // The total number of congestion controlled bytes sent during the connection. + DataSize total_bytes_sent_; + + // The time at which the last acknowledged packet was sent. Set to + // Timestamp::ms(0) if no valid timestamp is available. + Timestamp last_acked_packet_sent_time_ = Timestamp::ms(0); + + // The time at which the most recent packet was acknowledged. + Timestamp last_acked_packet_ack_time_ = Timestamp::ms(0); + + bool is_app_limited_ = false; + + // The packet that will be acknowledged after this one will cause the sampler + // to exit the app-limited phase. + Timestamp end_of_app_limited_phase_ = Timestamp::ms(0); + + // The number of the round trips that have occurred during the connection. + BbrRoundTripCount round_trip_count_ = 0; + + // The send time of the most recently sent packet. + Timestamp last_send_time_ = Timestamp::ms(0); + + // Acknowledgement of any packet after |current_round_trip_end_| will cause + // the round trip counter to advance. + Timestamp current_round_trip_end_ = Timestamp::ms(0); + + // The filter that tracks the maximum bandwidth over the multiple recent + // round-trips. + MaxBandwidthFilter max_bandwidth_; + + DataRate default_bandwidth_; + + // Tracks the maximum number of bytes acked faster than the sending rate. + MaxAckHeightFilter max_ack_height_; + + // The time this aggregation started and the number of bytes acked during it. + Timestamp aggregation_epoch_start_time_; + DataSize aggregation_epoch_bytes_; + + // The number of bytes acknowledged since the last time bytes in flight + // dropped below the target window. + DataSize bytes_acked_since_queue_drained_; + + // The muliplier for calculating the max amount of extra CWND to add to + // compensate for ack aggregation. + double max_aggregation_bytes_multiplier_ = 0; + + // Minimum RTT estimate. Automatically expires within 10 seconds (and + // triggers PROBE_RTT mode) if no new value is sampled during that period. + TimeDelta min_rtt_ = TimeDelta::Zero(); + TimeDelta last_rtt_ = TimeDelta::Zero(); + // The time at which the current value of |min_rtt_| was assigned. + Timestamp min_rtt_timestamp_ = Timestamp::ms(0); + + // The maximum allowed number of bytes in flight. + DataSize congestion_window_; + + // The initial value of the |congestion_window_|. + DataSize initial_congestion_window_; + + // The largest value the |congestion_window_| can achieve. + DataSize max_congestion_window_; + + // The current pacing rate of the connection. + DataRate pacing_rate_ = DataRate::Zero(); + + // The gain currently applied to the pacing rate. + double pacing_gain_ = 1; + // The gain currently applied to the congestion window. + double congestion_window_gain_ = 1; + + // The gain used for the congestion window during PROBE_BW. Latched from + // quic_bbr_cwnd_gain flag. + const double congestion_window_gain_constant_; + // The coefficient by which mean RTT variance is added to the congestion + // window. Latched from quic_bbr_rtt_variation_weight flag. + const double rtt_variance_weight_; + // If true, exit startup if 1RTT has passed with no bandwidth increase and + // the connection is in recovery. + bool exit_startup_on_loss_ = false; + + // Number of round-trips in PROBE_BW mode, used for determining the current + // pacing gain cycle. + int cycle_current_offset_ = 0; + // The time at which the last pacing gain cycle was started. + Timestamp last_cycle_start_; + + // Indicates whether the connection has reached the full bandwidth mode. + bool is_at_full_bandwidth_ = false; + // Number of rounds during which there was no significant bandwidth increase. + BbrRoundTripCount rounds_without_bandwidth_gain_ = 0; + // The bandwidth compared to which the increase is measured. + DataRate bandwidth_at_last_round_ = DataRate::Zero(); + + // Time at which PROBE_RTT has to be exited. Setting it to zero indicates + // that the time is yet unknown as the number of packets in flight has not + // reached the required value. + Timestamp exit_probe_rtt_at_; + // Indicates whether a round-trip has passed since PROBE_RTT became active. + bool probe_rtt_round_passed_ = false; + + // Indicates whether the most recent bandwidth sample was marked as + // app-limited. + bool last_sample_is_app_limited_ = false; + + // Current state of recovery. + RecoveryState recovery_state_ = NOT_IN_RECOVERY; + // Receiving acknowledgement of a packet after |end_recovery_at_| will cause + // BBR to exit the recovery mode. A value after epoch indicates at least one + // loss has been detected, so it must not be set back to epoch. + Timestamp end_recovery_at_ = Timestamp::ms(0); + // A window used to limit the number of bytes in flight during loss recovery. + DataSize recovery_window_; + + bool app_limited_since_last_probe_rtt_ = false; + TimeDelta min_rtt_since_last_probe_rtt_ = TimeDelta::PlusInfinity(); + UpdateState last_update_state_; + + RTC_DISALLOW_COPY_AND_ASSIGN(BbrNetworkController); +}; + +// Used in log output +std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982) + std::ostream& os, // no-presubmit-check TODO(webrtc:8982) + const BbrNetworkController::Mode& mode); + +} // namespace bbr +} // namespace webrtc + +#endif // MODULES_CONGESTION_CONTROLLER_BBR_BBR_NETWORK_CONTROLLER_H_ diff --git a/modules/congestion_controller/bbr/bbr_network_controller_unittest.cc b/modules/congestion_controller/bbr/bbr_network_controller_unittest.cc new file mode 100644 index 0000000000..cd3f50c7ca --- /dev/null +++ b/modules/congestion_controller/bbr/bbr_network_controller_unittest.cc @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include + +#include "modules/congestion_controller/bbr/bbr_factory.h" +#include "modules/congestion_controller/bbr/bbr_network_controller.h" +#include "modules/congestion_controller/network_control/test/mock_network_control.h" +#include "modules/congestion_controller/network_control/test/network_control_tester.h" +#include "test/gtest.h" + +using testing::Field; +using testing::Matcher; +using testing::AllOf; +using testing::Ge; +using testing::Le; +using testing::NiceMock; +using testing::Property; +using testing::StrictMock; +using testing::_; + +namespace webrtc { +namespace bbr { +namespace test { +namespace { + +const DataRate kInitialBitrate = DataRate::kbps(60); +const Timestamp kDefaultStartTime = Timestamp::ms(10000000); + +constexpr double kDataRateMargin = 0.3; +constexpr double kMinDataRateFactor = 1 - kDataRateMargin; +constexpr double kMaxDataRateFactor = 1 + kDataRateMargin; +inline Matcher TargetRateCloseTo(DataRate rate) { + DataRate min_data_rate = rate * kMinDataRateFactor; + DataRate max_data_rate = rate * kMaxDataRateFactor; + return Field(&TargetTransferRate::target_rate, + AllOf(Ge(min_data_rate), Le(max_data_rate))); +} + +NetworkControllerConfig InitialConfig( + int starting_bandwidth_kbps = kInitialBitrate.kbps(), + int min_data_rate_kbps = 0, + int max_data_rate_kbps = 5 * kInitialBitrate.kbps()) { + NetworkControllerConfig config; + config.constraints.at_time = kDefaultStartTime; + config.constraints.min_data_rate = DataRate::kbps(min_data_rate_kbps); + config.constraints.max_data_rate = DataRate::kbps(max_data_rate_kbps); + config.starting_bandwidth = DataRate::kbps(starting_bandwidth_kbps); + return config; +} + +NetworkRouteChange CreateRouteChange(Timestamp at_time, + DataRate start_rate, + DataRate min_rate = DataRate::Zero(), + DataRate max_rate = DataRate::Infinity()) { + NetworkRouteChange route_change; + route_change.at_time = at_time; + route_change.constraints.at_time = at_time; + route_change.constraints.min_data_rate = min_rate; + route_change.constraints.max_data_rate = max_rate; + route_change.starting_rate = start_rate; + return route_change; +} +} // namespace + +class BbrNetworkControllerTest : public ::testing::Test { + protected: + BbrNetworkControllerTest() {} + ~BbrNetworkControllerTest() override {} +}; + +TEST_F(BbrNetworkControllerTest, SendsConfigurationOnInitialization) { + StrictMock observer; + EXPECT_CALL(observer, + OnTargetTransferRate(TargetRateCloseTo(kInitialBitrate))); + EXPECT_CALL(observer, OnPacerConfig(Property(&PacerConfig::data_rate, + Ge(kInitialBitrate)))); + EXPECT_CALL(observer, + OnCongestionWindow(Field(&CongestionWindow::data_window, + Property(&DataSize::IsFinite, true)))); + + std::unique_ptr controller_; + controller_.reset(new BbrNetworkController(&observer, InitialConfig())); + testing::Mock::VerifyAndClearExpectations(&observer); +} + +TEST_F(BbrNetworkControllerTest, SendsConfigurationOnNetworkRouteChanged) { + StrictMock observer; + EXPECT_CALL(observer, OnTargetTransferRate(_)); + EXPECT_CALL(observer, OnPacerConfig(_)); + EXPECT_CALL(observer, OnCongestionWindow(_)); + std::unique_ptr controller_; + controller_.reset(new BbrNetworkController(&observer, InitialConfig())); + + DataRate new_bitrate = DataRate::bps(200000); + EXPECT_CALL(observer, OnTargetTransferRate(TargetRateCloseTo(new_bitrate))); + EXPECT_CALL(observer, OnPacerConfig(Property(&PacerConfig::data_rate, + Ge(kInitialBitrate)))); + EXPECT_CALL(observer, OnCongestionWindow(_)); + controller_->OnNetworkRouteChange( + CreateRouteChange(kDefaultStartTime, new_bitrate)); + testing::Mock::VerifyAndClearExpectations(&observer); +} + +// Bandwidth estimation is updated when feedbacks are received. +// Feedbacks which show an increasing delay cause the estimation to be reduced. +TEST_F(BbrNetworkControllerTest, UpdatesTargetSendRate) { + BbrNetworkControllerFactory factory; + webrtc::test::NetworkControllerTester tester(&factory, + InitialConfig(60, 0, 600)); + auto packet_producer = &webrtc::test::SimpleTargetRateProducer::ProduceNext; + + tester.RunSimulation(TimeDelta::seconds(5), TimeDelta::ms(10), + DataRate::kbps(300), TimeDelta::ms(100), + packet_producer); + EXPECT_GE(tester.GetState().target_rate->target_rate, + DataRate::kbps(300) * kMinDataRateFactor); + EXPECT_LE(tester.GetState().target_rate->target_rate, + DataRate::kbps(300) * kMaxDataRateFactor); + + tester.RunSimulation(TimeDelta::seconds(30), TimeDelta::ms(10), + DataRate::kbps(500), TimeDelta::ms(100), + packet_producer); + EXPECT_GE(tester.GetState().target_rate->target_rate, + DataRate::kbps(500) * kMinDataRateFactor); + EXPECT_LE(tester.GetState().target_rate->target_rate, + DataRate::kbps(500) * kMaxDataRateFactor); + + tester.RunSimulation(TimeDelta::seconds(30), TimeDelta::ms(10), + DataRate::kbps(100), TimeDelta::ms(200), + packet_producer); + EXPECT_GE(tester.GetState().target_rate->target_rate, + DataRate::kbps(100) * kMinDataRateFactor); + EXPECT_LE(tester.GetState().target_rate->target_rate, + DataRate::kbps(100) * kMaxDataRateFactor); +} + +} // namespace test +} // namespace bbr +} // namespace webrtc diff --git a/modules/congestion_controller/network_control/test/network_control_tester.cc b/modules/congestion_controller/network_control/test/network_control_tester.cc index 0fe6308384..8c0ffc06db 100644 --- a/modules/congestion_controller/network_control/test/network_control_tester.cc +++ b/modules/congestion_controller/network_control/test/network_control_tester.cc @@ -57,7 +57,7 @@ SentPacket SimpleTargetRateProducer::ProduceNext( return packet; } -FeedbackBasedNetworkControllerTester::FeedbackBasedNetworkControllerTester( +NetworkControllerTester::NetworkControllerTester( NetworkControllerFactoryInterface* factory, NetworkControllerConfig initial_config) : current_time_(Timestamp::seconds(100000)), @@ -67,14 +67,12 @@ FeedbackBasedNetworkControllerTester::FeedbackBasedNetworkControllerTester( process_interval_ = factory->GetProcessInterval(); } -FeedbackBasedNetworkControllerTester::~FeedbackBasedNetworkControllerTester() = - default; +NetworkControllerTester::~NetworkControllerTester() = default; -PacketResult FeedbackBasedNetworkControllerTester::SimulateSend( - SentPacket packet, - TimeDelta time_delta, - TimeDelta propagation_delay, - DataRate actual_bandwidth) { +PacketResult NetworkControllerTester::SimulateSend(SentPacket packet, + TimeDelta time_delta, + TimeDelta propagation_delay, + DataRate actual_bandwidth) { TimeDelta bandwidth_delay = packet.size / actual_bandwidth; accumulated_delay_ = std::max(accumulated_delay_ - time_delta, TimeDelta::Zero()); @@ -87,12 +85,11 @@ PacketResult FeedbackBasedNetworkControllerTester::SimulateSend( return result; } -void FeedbackBasedNetworkControllerTester::RunSimulation( - TimeDelta duration, - TimeDelta packet_interval, - DataRate actual_bandwidth, - TimeDelta propagation_delay, - PacketProducer next_packet) { +void NetworkControllerTester::RunSimulation(TimeDelta duration, + TimeDelta packet_interval, + DataRate actual_bandwidth, + TimeDelta propagation_delay, + PacketProducer next_packet) { Timestamp start_time = current_time_; Timestamp last_process_time = current_time_; while (current_time_ - start_time < duration) { diff --git a/modules/congestion_controller/network_control/test/network_control_tester.h b/modules/congestion_controller/network_control/test/network_control_tester.h index 8fce2a3f84..744011ed41 100644 --- a/modules/congestion_controller/network_control/test/network_control_tester.h +++ b/modules/congestion_controller/network_control/test/network_control_tester.h @@ -51,7 +51,7 @@ class NetworkControlCacher : public NetworkControllerObserver { NetworkControlState current_state_; }; -class FeedbackBasedNetworkControllerTester { +class NetworkControllerTester { public: // A PacketProducer is a function that takes a network control state, a // timestamp representing the expected send time and a time delta of the send @@ -59,10 +59,9 @@ class FeedbackBasedNetworkControllerTester { // SentPacket struct with actual send time and packet size. using PacketProducer = std::function< SentPacket(const NetworkControlState&, Timestamp, TimeDelta)>; - FeedbackBasedNetworkControllerTester( - NetworkControllerFactoryInterface* factory, - NetworkControllerConfig initial_config); - ~FeedbackBasedNetworkControllerTester(); + NetworkControllerTester(NetworkControllerFactoryInterface* factory, + NetworkControllerConfig initial_config); + ~NetworkControllerTester(); // Runs the simulations for the given duration, the PacketProducer will be // called repeatedly based on the given packet interval and the network will