Add send-side BWE to WebRtcVoiceEngine under a finch experiment.

This adds negotiation of both transport sequence number and transport
feedback. Only offers transport seq num if the
WebRTC-Audio-SendSideBwe finch experiment is enabled.

TBR=mflodman@webrtc.org
BUG=webrtc:5263

Review URL: https://codereview.webrtc.org/1604563002

Cr-Commit-Position: refs/heads/master@{#11487}
This commit is contained in:
stefan 2016-02-04 04:12:24 -08:00 committed by Commit bot
parent 2ddb8bd359
commit ba4c0e45ff
11 changed files with 218 additions and 255 deletions

View File

@ -326,17 +326,17 @@ std::string DataCodec::ToString() const {
return os.str();
}
bool HasNack(const VideoCodec& codec) {
bool HasNack(const Codec& codec) {
return codec.HasFeedbackParam(
FeedbackParam(kRtcpFbParamNack, kParamValueEmpty));
}
bool HasRemb(const VideoCodec& codec) {
bool HasRemb(const Codec& codec) {
return codec.HasFeedbackParam(
FeedbackParam(kRtcpFbParamRemb, kParamValueEmpty));
}
bool HasTransportCc(const VideoCodec& codec) {
bool HasTransportCc(const Codec& codec) {
return codec.HasFeedbackParam(
FeedbackParam(kRtcpFbParamTransportCc, kParamValueEmpty));
}

View File

@ -235,9 +235,9 @@ bool FindCodecById(const std::vector<Codec>& codecs,
}
bool CodecNamesEq(const std::string& name1, const std::string& name2);
bool HasNack(const VideoCodec& codec);
bool HasRemb(const VideoCodec& codec);
bool HasTransportCc(const VideoCodec& codec);
bool HasNack(const Codec& codec);
bool HasRemb(const Codec& codec);
bool HasTransportCc(const Codec& codec);
} // namespace cricket

View File

@ -204,11 +204,6 @@ bool VerifyUniquePayloadTypes(const std::vector<AudioCodec>& codecs) {
return it == payload_types.end();
}
bool IsNackEnabled(const AudioCodec& codec) {
return codec.HasFeedbackParam(FeedbackParam(kRtcpFbParamNack,
kParamValueEmpty));
}
// Return true if codec.params[feature] == "1", false otherwise.
bool IsCodecFeatureEnabled(const AudioCodec& codec, const char* feature) {
int value;
@ -331,6 +326,8 @@ class WebRtcVoiceCodecs final {
rtc::ToString(kPreferredMaxPTime);
}
codec.SetParam(kCodecParamUseInbandFec, 1);
codec.AddFeedbackParam(
FeedbackParam(kRtcpFbParamTransportCc, kParamValueEmpty));
// TODO(hellner): Add ptime, sprop-stereo, and stereo
// when they can be set to values other than the default.
@ -415,6 +412,45 @@ class WebRtcVoiceCodecs final {
return false;
}
static const AudioCodec* GetPreferredCodec(
const std::vector<AudioCodec>& codecs,
webrtc::CodecInst* voe_codec,
int* red_payload_type) {
RTC_DCHECK(voe_codec);
RTC_DCHECK(red_payload_type);
// Select the preferred send codec (the first non-telephone-event/CN codec).
for (const AudioCodec& codec : codecs) {
*red_payload_type = -1;
if (IsCodec(codec, kDtmfCodecName) || IsCodec(codec, kCnCodecName)) {
// Skip telephone-event/CN codec, which will be handled later.
continue;
}
// We'll use the first codec in the list to actually send audio data.
// Be sure to use the payload type requested by the remote side.
// "red", for RED audio, is a special case where the actual codec to be
// used is specified in params.
const AudioCodec* found_codec = &codec;
if (IsCodec(*found_codec, kRedCodecName)) {
// Parse out the RED parameters. If we fail, just ignore RED;
// we don't support all possible params/usage scenarios.
*red_payload_type = codec.id;
found_codec = GetRedSendCodec(*found_codec, codecs);
if (!found_codec) {
continue;
}
}
// Ignore codecs we don't know about. The negotiation step should prevent
// this, but double-check to be sure.
if (!ToCodecInst(*found_codec, voe_codec)) {
LOG(LS_WARNING) << "Unknown codec " << ToString(*found_codec);
continue;
}
return found_codec;
}
return nullptr;
}
private:
static const int kMaxNumPacketSize = 6;
struct CodecPref {
@ -449,6 +485,44 @@ class WebRtcVoiceCodecs final {
voe_codec->plfreq = new_plfreq;
}
}
static const AudioCodec* GetRedSendCodec(
const AudioCodec& red_codec,
const std::vector<AudioCodec>& all_codecs) {
// Get the RED encodings from the parameter with no name. This may
// change based on what is discussed on the Jingle list.
// The encoding parameter is of the form "a/b"; we only support where
// a == b. Verify this and parse out the value into red_pt.
// If the parameter value is absent (as it will be until we wire up the
// signaling of this message), use the second codec specified (i.e. the
// one after "red") as the encoding parameter.
int red_pt = -1;
std::string red_params;
CodecParameterMap::const_iterator it = red_codec.params.find("");
if (it != red_codec.params.end()) {
red_params = it->second;
std::vector<std::string> red_pts;
if (rtc::split(red_params, '/', &red_pts) != 2 ||
red_pts[0] != red_pts[1] || !rtc::FromString(red_pts[0], &red_pt)) {
LOG(LS_WARNING) << "RED params " << red_params << " not supported.";
return nullptr;
}
} else if (red_codec.params.empty()) {
LOG(LS_WARNING) << "RED params not present, using defaults";
if (all_codecs.size() > 1) {
red_pt = all_codecs[1].id;
}
}
// Try to find red_pt in |codecs|.
for (const AudioCodec& codec : all_codecs) {
if (codec.id == red_pt) {
return &codec;
}
}
LOG(LS_WARNING) << "RED params " << red_params << " are invalid.";
return nullptr;
}
};
const WebRtcVoiceCodecs::CodecPref WebRtcVoiceCodecs::kCodecPrefs[12] = {
@ -943,6 +1017,12 @@ RtpCapabilities WebRtcVoiceEngine::GetCapabilities() const {
capabilities.header_extensions.push_back(
RtpHeaderExtension(kRtpAbsoluteSenderTimeHeaderExtension,
kRtpAbsoluteSenderTimeHeaderExtensionDefaultId));
if (webrtc::field_trial::FindFullName("WebRTC-Audio-SendSideBwe") ==
"Enabled") {
capabilities.header_extensions.push_back(RtpHeaderExtension(
kRtpTransportSequenceNumberHeaderExtension,
kRtpTransportSequenceNumberHeaderExtensionDefaultId));
}
return capabilities;
}
@ -1218,19 +1298,21 @@ class WebRtcVoiceMediaChannel::WebRtcAudioSendStream
class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream {
public:
WebRtcAudioReceiveStream(int ch, uint32_t remote_ssrc, uint32_t local_ssrc,
bool use_combined_bwe, const std::string& sync_group,
WebRtcAudioReceiveStream(int ch,
uint32_t remote_ssrc,
uint32_t local_ssrc,
bool use_transport_cc,
const std::string& sync_group,
const std::vector<webrtc::RtpExtension>& extensions,
webrtc::Call* call)
: call_(call),
config_() {
: call_(call), config_() {
RTC_DCHECK_GE(ch, 0);
RTC_DCHECK(call);
config_.rtp.remote_ssrc = remote_ssrc;
config_.rtp.local_ssrc = local_ssrc;
config_.voe_channel_id = ch;
config_.sync_group = sync_group;
RecreateAudioReceiveStream(use_combined_bwe, extensions);
RecreateAudioReceiveStream(use_transport_cc, extensions);
}
~WebRtcAudioReceiveStream() {
@ -1241,11 +1323,11 @@ class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream {
void RecreateAudioReceiveStream(
const std::vector<webrtc::RtpExtension>& extensions) {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
RecreateAudioReceiveStream(config_.combined_audio_video_bwe, extensions);
RecreateAudioReceiveStream(config_.rtp.transport_cc, extensions);
}
void RecreateAudioReceiveStream(bool use_combined_bwe) {
void RecreateAudioReceiveStream(bool use_transport_cc) {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
RecreateAudioReceiveStream(use_combined_bwe, config_.rtp.extensions);
RecreateAudioReceiveStream(use_transport_cc, config_.rtp.extensions);
}
webrtc::AudioReceiveStream::Stats GetStats() const {
@ -1265,7 +1347,8 @@ class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream {
}
private:
void RecreateAudioReceiveStream(bool use_combined_bwe,
void RecreateAudioReceiveStream(
bool use_transport_cc,
const std::vector<webrtc::RtpExtension>& extensions) {
RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
if (stream_) {
@ -1273,7 +1356,7 @@ class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream {
stream_ = nullptr;
}
config_.rtp.extensions = extensions;
config_.combined_audio_video_bwe = use_combined_bwe;
config_.rtp.transport_cc = use_transport_cc;
RTC_DCHECK(!stream_);
stream_ = call_->CreateAudioReceiveStream(config_);
RTC_CHECK(stream_);
@ -1369,7 +1452,6 @@ bool WebRtcVoiceMediaChannel::SetRecvParameters(
it.second->RecreateAudioReceiveStream(recv_rtp_extensions_);
}
}
return true;
}
@ -1401,12 +1483,6 @@ bool WebRtcVoiceMediaChannel::SetOptions(const AudioOptions& options) {
}
}
// TODO(solenberg): Don't recreate unless options changed.
for (auto& it : recv_streams_) {
it.second->RecreateAudioReceiveStream(
options_.combined_audio_video_bwe.value_or(false));
}
LOG(LS_INFO) << "Set voice channel options. Current options: "
<< options_.ToString();
return true;
@ -1491,7 +1567,6 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
// Scan through the list to figure out the codec to use for sending, along
// with the proper configuration for VAD.
bool found_send_codec = false;
webrtc::CodecInst send_codec;
memset(&send_codec, 0, sizeof(send_codec));
@ -1499,53 +1574,33 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
bool enable_codec_fec = false;
bool enable_opus_dtx = false;
int opus_max_playback_rate = 0;
int red_payload_type = -1;
// Set send codec (the first non-telephone-event/CN codec)
for (const AudioCodec& codec : codecs) {
// Ignore codecs we don't know about. The negotiation step should prevent
// this, but double-check to be sure.
webrtc::CodecInst voe_codec;
if (!WebRtcVoiceEngine::ToCodecInst(codec, &voe_codec)) {
LOG(LS_WARNING) << "Unknown codec " << ToString(codec);
continue;
}
if (IsCodec(codec, kDtmfCodecName) || IsCodec(codec, kCnCodecName)) {
// Skip telephone-event/CN codec, which will be handled later.
continue;
}
// We'll use the first codec in the list to actually send audio data.
// Be sure to use the payload type requested by the remote side.
// "red", for RED audio, is a special case where the actual codec to be
// used is specified in params.
if (IsCodec(codec, kRedCodecName)) {
// Parse out the RED parameters. If we fail, just ignore RED;
// we don't support all possible params/usage scenarios.
if (!GetRedSendCodec(codec, codecs, &send_codec)) {
continue;
}
const AudioCodec* codec = WebRtcVoiceCodecs::GetPreferredCodec(
codecs, &send_codec, &red_payload_type);
if (codec) {
if (red_payload_type != -1) {
// Enable redundant encoding of the specified codec. Treat any
// failure as a fatal internal error.
LOG(LS_INFO) << "Enabling RED on channel " << channel;
if (engine()->voe()->rtp()->SetREDStatus(channel, true, codec.id) == -1) {
LOG_RTCERR3(SetREDStatus, channel, true, codec.id);
if (engine()->voe()->rtp()->SetREDStatus(channel, true,
red_payload_type) == -1) {
LOG_RTCERR3(SetREDStatus, channel, true, red_payload_type);
return false;
}
} else {
send_codec = voe_codec;
nack_enabled = IsNackEnabled(codec);
nack_enabled = HasNack(*codec);
// For Opus as the send codec, we are to determine inband FEC, maximum
// playback rate, and opus internal dtx.
if (IsCodec(codec, kOpusCodecName)) {
GetOpusConfig(codec, &send_codec, &enable_codec_fec,
if (IsCodec(*codec, kOpusCodecName)) {
GetOpusConfig(*codec, &send_codec, &enable_codec_fec,
&opus_max_playback_rate, &enable_opus_dtx);
}
// Set packet size if the AudioCodec param kCodecParamPTime is set.
int ptime_ms = 0;
if (codec.GetParam(kCodecParamPTime, &ptime_ms)) {
if (codec->GetParam(kCodecParamPTime, &ptime_ms)) {
if (!WebRtcVoiceCodecs::SetPTimeAsPacketSize(&send_codec, ptime_ms)) {
LOG(LS_WARNING) << "Failed to set packet size for codec "
<< send_codec.plname;
@ -1553,16 +1608,13 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
}
}
}
found_send_codec = true;
break;
}
if (nack_enabled_ != nack_enabled) {
SetNack(channel, nack_enabled);
nack_enabled_ = nack_enabled;
}
if (!found_send_codec) {
if (!codec) {
LOG(LS_WARNING) << "Received empty list of codecs.";
return false;
}
@ -1710,6 +1762,25 @@ bool WebRtcVoiceMediaChannel::SetSendCodecs(
SetNack(ch.second->channel(), nack_enabled_);
}
// Check if the transport cc feedback has changed on the preferred send codec,
// and in that case reconfigure all receive streams.
webrtc::CodecInst voe_codec;
int red_payload_type;
const AudioCodec* send_codec = WebRtcVoiceCodecs::GetPreferredCodec(
send_codecs_, &voe_codec, &red_payload_type);
if (send_codec) {
bool transport_cc = HasTransportCc(*send_codec);
if (transport_cc_enabled_ != transport_cc) {
LOG(LS_INFO) << "Recreate all the receive streams because the send "
"codec has changed.";
transport_cc_enabled_ = transport_cc;
for (auto& kv : recv_streams_) {
RTC_DCHECK(kv.second != nullptr);
kv.second->RecreateAudioReceiveStream(transport_cc_enabled_);
}
}
}
return true;
}
@ -2016,10 +2087,13 @@ bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
<< " is associated with channel #" << send_channel << ".";
}
recv_streams_.insert(std::make_pair(ssrc, new WebRtcAudioReceiveStream(
channel, ssrc, receiver_reports_ssrc_,
options_.combined_audio_video_bwe.value_or(false), sp.sync_label,
recv_rtp_extensions_, call_)));
transport_cc_enabled_ =
!send_codecs_.empty() ? HasTransportCc(send_codecs_[0]) : false;
recv_streams_.insert(std::make_pair(
ssrc, new WebRtcAudioReceiveStream(channel, ssrc, receiver_reports_ssrc_,
transport_cc_enabled_, sp.sync_label,
recv_rtp_extensions_, call_)));
SetNack(channel, nack_enabled_);
SetPlayout(channel, playout_);
@ -2477,50 +2551,6 @@ int WebRtcVoiceMediaChannel::GetSendChannelId(uint32_t ssrc) const {
return -1;
}
bool WebRtcVoiceMediaChannel::GetRedSendCodec(const AudioCodec& red_codec,
const std::vector<AudioCodec>& all_codecs, webrtc::CodecInst* send_codec) {
// Get the RED encodings from the parameter with no name. This may
// change based on what is discussed on the Jingle list.
// The encoding parameter is of the form "a/b"; we only support where
// a == b. Verify this and parse out the value into red_pt.
// If the parameter value is absent (as it will be until we wire up the
// signaling of this message), use the second codec specified (i.e. the
// one after "red") as the encoding parameter.
int red_pt = -1;
std::string red_params;
CodecParameterMap::const_iterator it = red_codec.params.find("");
if (it != red_codec.params.end()) {
red_params = it->second;
std::vector<std::string> red_pts;
if (rtc::split(red_params, '/', &red_pts) != 2 ||
red_pts[0] != red_pts[1] ||
!rtc::FromString(red_pts[0], &red_pt)) {
LOG(LS_WARNING) << "RED params " << red_params << " not supported.";
return false;
}
} else if (red_codec.params.empty()) {
LOG(LS_WARNING) << "RED params not present, using defaults";
if (all_codecs.size() > 1) {
red_pt = all_codecs[1].id;
}
}
// Try to find red_pt in |codecs|.
for (const AudioCodec& codec : all_codecs) {
if (codec.id == red_pt) {
// If we find the right codec, that will be the codec we pass to
// SetSendCodec, with the desired payload type.
if (WebRtcVoiceEngine::ToCodecInst(codec, send_codec)) {
return true;
} else {
break;
}
}
}
LOG(LS_WARNING) << "RED params " << red_params << " are invalid.";
return false;
}
bool WebRtcVoiceMediaChannel::SetPlayout(int channel, bool playout) {
if (playout) {
LOG(LS_INFO) << "Starting playout for channel #" << channel;

View File

@ -234,9 +234,6 @@ class WebRtcVoiceMediaChannel final : public VoiceMediaChannel,
WebRtcVoiceEngine* engine() { return engine_; }
int GetLastEngineError() { return engine()->GetLastEngineError(); }
int GetOutputLevel(int channel);
bool GetRedSendCodec(const AudioCodec& red_codec,
const std::vector<AudioCodec>& all_codecs,
webrtc::CodecInst* send_codec);
bool SetPlayout(int channel, bool playout);
void SetNack(int channel, bool nack_enabled);
bool SetSendCodec(int channel, const webrtc::CodecInst& send_codec);
@ -263,6 +260,7 @@ class WebRtcVoiceMediaChannel final : public VoiceMediaChannel,
rtc::Optional<int> dtmf_payload_type_;
bool desired_playout_ = false;
bool nack_enabled_ = false;
bool transport_cc_enabled_ = false;
bool playout_ = false;
SendFlags desired_send_ = SEND_NOTHING;
SendFlags send_ = SEND_NOTHING;

View File

@ -29,6 +29,8 @@
#include "webrtc/base/byteorder.h"
#include "webrtc/base/gunit.h"
#include "webrtc/call.h"
#include "webrtc/p2p/base/faketransportcontroller.h"
#include "webrtc/test/field_trial.h"
#include "talk/media/base/constants.h"
#include "talk/media/base/fakemediaengine.h"
#include "talk/media/base/fakenetworkinterface.h"
@ -36,7 +38,6 @@
#include "talk/media/webrtc/fakewebrtccall.h"
#include "talk/media/webrtc/fakewebrtcvoiceengine.h"
#include "talk/media/webrtc/webrtcvoiceengine.h"
#include "webrtc/p2p/base/faketransportcontroller.h"
#include "talk/session/media/channel.h"
using cricket::kRtpAudioLevelHeaderExtension;
@ -79,10 +80,13 @@ class FakeAudioSink : public webrtc::AudioSinkInterface {
class WebRtcVoiceEngineTestFake : public testing::Test {
public:
WebRtcVoiceEngineTestFake()
WebRtcVoiceEngineTestFake() : WebRtcVoiceEngineTestFake("") {}
explicit WebRtcVoiceEngineTestFake(const char* field_trials)
: call_(webrtc::Call::Config()),
engine_(new FakeVoEWrapper(&voe_)),
channel_(nullptr) {
channel_(nullptr),
override_field_trials_(field_trials) {
send_parameters_.codecs.push_back(kPcmuCodec);
recv_parameters_.codecs.push_back(kPcmuCodec);
}
@ -410,6 +414,9 @@ class WebRtcVoiceEngineTestFake : public testing::Test {
cricket::VoiceMediaChannel* channel_;
cricket::AudioSendParameters send_parameters_;
cricket::AudioRecvParameters recv_parameters_;
private:
webrtc::test::ScopedFieldTrials override_field_trials_;
};
// Tests that our stub library "works".
@ -444,6 +451,18 @@ TEST_F(WebRtcVoiceEngineTestFake, CodecPreference) {
}
}
TEST_F(WebRtcVoiceEngineTestFake, OpusSupportsTransportCc) {
const std::vector<cricket::AudioCodec>& codecs = engine_.codecs();
bool opus_found = false;
for (cricket::AudioCodec codec : codecs) {
if (codec.name == "opus") {
EXPECT_TRUE(HasTransportCc(codec));
opus_found = true;
}
}
EXPECT_TRUE(opus_found);
}
// Tests that we can find codecs by name or id, and that we interpret the
// clockrate and bitrate fields properly.
TEST_F(WebRtcVoiceEngineTestFake, FindCodec) {
@ -1287,6 +1306,29 @@ TEST_F(WebRtcVoiceEngineTestFake, ChangeOpusFecStatus) {
EXPECT_TRUE(voe_.GetCodecFEC(channel_num));
}
TEST_F(WebRtcVoiceEngineTestFake, TransportCcCanBeEnabledAndDisabled) {
EXPECT_TRUE(SetupEngine());
cricket::AudioSendParameters send_parameters;
send_parameters.codecs.push_back(kOpusCodec);
EXPECT_TRUE(send_parameters.codecs[0].feedback_params.params().empty());
EXPECT_TRUE(channel_->SetSendParameters(send_parameters));
cricket::AudioRecvParameters recv_parameters;
recv_parameters.codecs.push_back(kIsacCodec);
EXPECT_TRUE(channel_->SetRecvParameters(recv_parameters));
EXPECT_TRUE(
channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(kSsrc1)));
ASSERT_TRUE(call_.GetAudioReceiveStream(kSsrc1) != nullptr);
EXPECT_FALSE(
call_.GetAudioReceiveStream(kSsrc1)->GetConfig().rtp.transport_cc);
send_parameters.codecs = engine_.codecs();
EXPECT_TRUE(channel_->SetSendParameters(send_parameters));
ASSERT_TRUE(call_.GetAudioReceiveStream(kSsrc1) != nullptr);
EXPECT_TRUE(
call_.GetAudioReceiveStream(kSsrc1)->GetConfig().rtp.transport_cc);
}
// Test maxplaybackrate <= 8000 triggers Opus narrow band mode.
TEST_F(WebRtcVoiceEngineTestFake, SetOpusMaxPlaybackRateNb) {
EXPECT_TRUE(SetupEngineWithSendStream());
@ -1929,6 +1971,27 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsBadRED5) {
EXPECT_FALSE(voe_.GetRED(channel_num));
}
class WebRtcVoiceEngineWithSendSideBweTest : public WebRtcVoiceEngineTestFake {
public:
WebRtcVoiceEngineWithSendSideBweTest()
: WebRtcVoiceEngineTestFake("WebRTC-Audio-SendSideBwe/Enabled/") {}
};
TEST_F(WebRtcVoiceEngineWithSendSideBweTest,
SupportsTransportSequenceNumberHeaderExtension) {
cricket::RtpCapabilities capabilities = engine_.GetCapabilities();
ASSERT_FALSE(capabilities.header_extensions.empty());
for (const cricket::RtpHeaderExtension& extension :
capabilities.header_extensions) {
if (extension.uri == cricket::kRtpTransportSequenceNumberHeaderExtension) {
EXPECT_EQ(cricket::kRtpTransportSequenceNumberHeaderExtensionDefaultId,
extension.id);
return;
}
}
FAIL() << "Transport sequence number extension not in header-extension list.";
}
// Test support for audio level header extension.
TEST_F(WebRtcVoiceEngineTestFake, SendAudioLevelHeaderExtensions) {
TestSetSendRtpHeaderExtensions(kRtpAudioLevelHeaderExtension);
@ -2928,68 +2991,6 @@ TEST_F(WebRtcVoiceEngineTestFake, SetsSyncGroupFromSyncLabel) {
<< "SyncGroup should be set based on sync_label";
}
TEST_F(WebRtcVoiceEngineTestFake, CanChangeCombinedBweOption) {
// Test that changing the combined_audio_video_bwe option results in the
// expected state changes on an associated Call.
std::vector<uint32_t> ssrcs;
ssrcs.push_back(223);
ssrcs.push_back(224);
EXPECT_TRUE(SetupEngineWithSendStream());
cricket::WebRtcVoiceMediaChannel* media_channel =
static_cast<cricket::WebRtcVoiceMediaChannel*>(channel_);
for (uint32_t ssrc : ssrcs) {
EXPECT_TRUE(media_channel->AddRecvStream(
cricket::StreamParams::CreateLegacy(ssrc)));
}
EXPECT_EQ(2, call_.GetAudioReceiveStreams().size());
// Combined BWE should be disabled.
for (uint32_t ssrc : ssrcs) {
const auto* s = call_.GetAudioReceiveStream(ssrc);
EXPECT_NE(nullptr, s);
EXPECT_FALSE(s->GetConfig().combined_audio_video_bwe);
}
// Enable combined BWE option - now it should be set up.
send_parameters_.options.combined_audio_video_bwe = rtc::Optional<bool>(true);
EXPECT_TRUE(media_channel->SetSendParameters(send_parameters_));
for (uint32_t ssrc : ssrcs) {
const auto* s = call_.GetAudioReceiveStream(ssrc);
EXPECT_NE(nullptr, s);
EXPECT_EQ(true, s->GetConfig().combined_audio_video_bwe);
}
// Disable combined BWE option - should be disabled again.
send_parameters_.options.combined_audio_video_bwe =
rtc::Optional<bool>(false);
EXPECT_TRUE(media_channel->SetSendParameters(send_parameters_));
for (uint32_t ssrc : ssrcs) {
const auto* s = call_.GetAudioReceiveStream(ssrc);
EXPECT_NE(nullptr, s);
EXPECT_FALSE(s->GetConfig().combined_audio_video_bwe);
}
EXPECT_EQ(2, call_.GetAudioReceiveStreams().size());
}
TEST_F(WebRtcVoiceEngineTestFake, ConfigureCombinedBweForNewRecvStreams) {
// Test that adding receive streams after enabling combined bandwidth
// estimation will correctly configure each channel.
EXPECT_TRUE(SetupEngineWithSendStream());
cricket::WebRtcVoiceMediaChannel* media_channel =
static_cast<cricket::WebRtcVoiceMediaChannel*>(channel_);
send_parameters_.options.combined_audio_video_bwe = rtc::Optional<bool>(true);
EXPECT_TRUE(media_channel->SetSendParameters(send_parameters_));
for (uint32_t ssrc : kSsrcs4) {
EXPECT_TRUE(media_channel->AddRecvStream(
cricket::StreamParams::CreateLegacy(ssrc)));
EXPECT_NE(nullptr, call_.GetAudioReceiveStream(ssrc));
}
EXPECT_EQ(arraysize(kSsrcs4), call_.GetAudioReceiveStreams().size());
}
// TODO(solenberg): Remove, once recv streams are configured through Call.
// (This is then covered by TestSetRecvRtpHeaderExtensions.)
TEST_F(WebRtcVoiceEngineTestFake, ConfiguresAudioReceiveStreamRtpExtensions) {
@ -3002,14 +3003,12 @@ TEST_F(WebRtcVoiceEngineTestFake, ConfiguresAudioReceiveStreamRtpExtensions) {
EXPECT_TRUE(SetupEngineWithSendStream());
cricket::WebRtcVoiceMediaChannel* media_channel =
static_cast<cricket::WebRtcVoiceMediaChannel*>(channel_);
send_parameters_.options.combined_audio_video_bwe = rtc::Optional<bool>(true);
EXPECT_TRUE(media_channel->SetSendParameters(send_parameters_));
for (uint32_t ssrc : ssrcs) {
EXPECT_TRUE(media_channel->AddRecvStream(
cricket::StreamParams::CreateLegacy(ssrc)));
}
// Combined BWE should be set up, but with no configured extensions.
EXPECT_EQ(2, call_.GetAudioReceiveStreams().size());
for (uint32_t ssrc : ssrcs) {
const auto* s = call_.GetAudioReceiveStream(ssrc);
@ -3061,7 +3060,6 @@ TEST_F(WebRtcVoiceEngineTestFake, DeliverAudioPacket_Call) {
EXPECT_TRUE(SetupEngineWithSendStream());
cricket::WebRtcVoiceMediaChannel* media_channel =
static_cast<cricket::WebRtcVoiceMediaChannel*>(channel_);
send_parameters_.options.combined_audio_video_bwe = rtc::Optional<bool>(true);
EXPECT_TRUE(channel_->SetSendParameters(send_parameters_));
EXPECT_TRUE(media_channel->AddRecvStream(
cricket::StreamParams::CreateLegacy(kAudioSsrc)));

View File

@ -58,6 +58,7 @@ std::string AudioReceiveStream::Config::Rtp::ToString() const {
}
}
ss << ']';
ss << ", transport_cc: " << (transport_cc ? "on" : "off");
ss << '}';
return ss.str();
}
@ -73,8 +74,6 @@ std::string AudioReceiveStream::Config::ToString() const {
if (!sync_group.empty()) {
ss << ", sync_group: " << sync_group;
}
ss << ", combined_audio_video_bwe: "
<< (combined_audio_video_bwe ? "true" : "false");
ss << '}';
return ss.str();
}
@ -119,15 +118,9 @@ AudioReceiveStream::AudioReceiveStream(
// Configure bandwidth estimation.
channel_proxy_->RegisterReceiverCongestionControlObjects(
congestion_controller->packet_router());
if (config.combined_audio_video_bwe) {
if (UseSendSideBwe(config)) {
remote_bitrate_estimator_ =
congestion_controller->GetRemoteBitrateEstimator(true);
} else {
remote_bitrate_estimator_ =
congestion_controller->GetRemoteBitrateEstimator(false);
}
RTC_DCHECK(remote_bitrate_estimator_);
if (UseSendSideBwe(config)) {
remote_bitrate_estimator_ =
congestion_controller->GetRemoteBitrateEstimator(true);
}
}
@ -176,8 +169,7 @@ bool AudioReceiveStream::DeliverRtp(const uint8_t* packet,
// bandwidth estimation. RTP timestamps has different rates for audio and
// video and shouldn't be mixed.
if (remote_bitrate_estimator_ &&
(header.extension.hasAbsoluteSendTime ||
header.extension.hasTransportSequenceNumber)) {
header.extension.hasTransportSequenceNumber) {
int64_t arrival_time_ms = TickTime::MillisecondTimestamp();
if (packet_time.timestamp >= 0)
arrival_time_ms = (packet_time.timestamp + 500) / 1000;

View File

@ -213,12 +213,12 @@ TEST(AudioReceiveStreamTest, ConfigToString) {
config.rtp.extensions.push_back(
RtpExtension(RtpExtension::kAbsSendTime, kAbsSendTimeId));
config.voe_channel_id = kChannelId;
config.combined_audio_video_bwe = true;
EXPECT_EQ(
"{rtp: {remote_ssrc: 1234, local_ssrc: 5678, extensions: [{name: "
"http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time, id: 2}]}, "
"http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time, id: 2}], "
"transport_cc: off}, "
"receive_transport: nullptr, rtcp_send_transport: nullptr, "
"voe_channel_id: 2, combined_audio_video_bwe: true}",
"voe_channel_id: 2}",
config.ToString());
}
@ -239,32 +239,8 @@ MATCHER_P(VerifyHeaderExtension, expected_extension, "") {
expected_extension.transportSequenceNumber;
}
TEST(AudioReceiveStreamTest, AudioPacketUpdatesBweWithTimestamp) {
ConfigHelper helper;
helper.config().combined_audio_video_bwe = true;
helper.SetupMockForBweFeedback(false);
internal::AudioReceiveStream recv_stream(
helper.congestion_controller(), helper.config(), helper.audio_state());
const int kAbsSendTimeValue = 1234;
std::vector<uint8_t> rtp_packet =
CreateRtpHeaderWithOneByteExtension(kAbsSendTimeId, kAbsSendTimeValue, 3);
PacketTime packet_time(5678000, 0);
const size_t kExpectedHeaderLength = 20;
RTPHeaderExtension expected_extension;
expected_extension.hasAbsoluteSendTime = true;
expected_extension.absoluteSendTime = kAbsSendTimeValue;
EXPECT_CALL(*helper.remote_bitrate_estimator(),
IncomingPacket(packet_time.timestamp / 1000,
rtp_packet.size() - kExpectedHeaderLength,
VerifyHeaderExtension(expected_extension), false))
.Times(1);
EXPECT_TRUE(
recv_stream.DeliverRtp(&rtp_packet[0], rtp_packet.size(), packet_time));
}
TEST(AudioReceiveStreamTest, AudioPacketUpdatesBweFeedback) {
ConfigHelper helper;
helper.config().combined_audio_video_bwe = true;
helper.config().rtp.transport_cc = true;
helper.SetupMockForBweFeedback(true);
internal::AudioReceiveStream recv_stream(

View File

@ -102,9 +102,6 @@ class AudioReceiveStream : public ReceiveStream {
// Call::CreateReceiveStream().
// TODO(solenberg): Use unique_ptr<> once our std lib fully supports C++11.
std::map<uint8_t, AudioDecoder*> decoder_map;
// TODO(pbos): Remove config option once combined A/V BWE is always on.
bool combined_audio_video_bwe = false;
};
virtual Stats GetStats() const = 0;

View File

@ -190,7 +190,6 @@ class BitrateEstimatorTest : public test::CallTest {
receive_config.voe_channel_id = 0;
receive_config.rtp.extensions.push_back(
RtpExtension(RtpExtension::kAbsSendTime, kASTExtensionId));
receive_config.combined_audio_video_bwe = true;
audio_receive_stream_ =
test_->receiver_call_->CreateAudioReceiveStream(receive_config);
} else {
@ -273,17 +272,6 @@ TEST_F(BitrateEstimatorTest, InstantiatesTOFPerDefaultForVideo) {
EXPECT_TRUE(receiver_log_.Wait());
}
TEST_F(BitrateEstimatorTest, ImmediatelySwitchToASTForAudio) {
video_send_config_.rtp.extensions.push_back(
RtpExtension(RtpExtension::kAbsSendTime, kASTExtensionId));
receiver_log_.PushExpectedLogLine(kSingleStreamLog);
receiver_log_.PushExpectedLogLine(kSingleStreamLog);
receiver_log_.PushExpectedLogLine("Switching to absolute send time RBE.");
receiver_log_.PushExpectedLogLine(kAbsSendTimeLog);
streams_.push_back(new Stream(this, true));
EXPECT_TRUE(receiver_log_.Wait());
}
TEST_F(BitrateEstimatorTest, ImmediatelySwitchToASTForVideo) {
video_send_config_.rtp.extensions.push_back(
RtpExtension(RtpExtension::kAbsSendTime, kASTExtensionId));
@ -295,20 +283,6 @@ TEST_F(BitrateEstimatorTest, ImmediatelySwitchToASTForVideo) {
EXPECT_TRUE(receiver_log_.Wait());
}
TEST_F(BitrateEstimatorTest, SwitchesToASTForAudio) {
receiver_log_.PushExpectedLogLine(kSingleStreamLog);
receiver_log_.PushExpectedLogLine(kSingleStreamLog);
streams_.push_back(new Stream(this, true));
EXPECT_TRUE(receiver_log_.Wait());
video_send_config_.rtp.extensions.push_back(
RtpExtension(RtpExtension::kAbsSendTime, kASTExtensionId));
receiver_log_.PushExpectedLogLine("Switching to absolute send time RBE.");
receiver_log_.PushExpectedLogLine(kAbsSendTimeLog);
streams_.push_back(new Stream(this, true));
EXPECT_TRUE(receiver_log_.Wait());
}
TEST_F(BitrateEstimatorTest, SwitchesToASTForVideo) {
video_send_config_.rtp.extensions.push_back(
RtpExtension(RtpExtension::kTOffset, kTOFExtensionId));

View File

@ -200,7 +200,6 @@ void RampUpTester::ModifyAudioConfigs(
}
for (AudioReceiveStream::Config& recv_config : *receive_configs) {
recv_config.combined_audio_video_bwe = true;
recv_config.rtp.transport_cc = transport_cc;
recv_config.rtp.extensions = send_config->rtp.extensions;
recv_config.rtp.remote_ssrc = send_config->rtp.ssrc;

View File

@ -1639,7 +1639,6 @@ class TransportFeedbackTester : public test::EndToEndTest {
(*receive_configs)[0].rtp.extensions.clear();
(*receive_configs)[0].rtp.extensions = send_config->rtp.extensions;
(*receive_configs)[0].rtp.transport_cc = feedback_enabled_;
(*receive_configs)[0].combined_audio_video_bwe = true;
}
private: