APM: Add build flag to allow building WebRTC without APM
This CL adds a build flag to allow building the non-test parts of WebRTC without the audio processing module. The CL also ensures that the WebRTC code correctly handles the case when no APM is available. Bug: webrtc:5298 Change-Id: I5c8b5d1f7115e5cce2af4c2b5ff701fa1c54e49e Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/171509 Commit-Queue: Per Åhgren <peah@webrtc.org> Reviewed-by: Sam Zackrisson <saza@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org> Cr-Commit-Position: refs/heads/master@{#31133}
This commit is contained in:
parent
86bd33a1e7
commit
cc73ed3e70
4
BUILD.gn
4
BUILD.gn
@ -281,6 +281,10 @@ config("common_config") {
|
||||
defines += [ "WEBRTC_EXCLUDE_TRANSIENT_SUPPRESSOR" ]
|
||||
}
|
||||
|
||||
if (rtc_exclude_audio_processing_module) {
|
||||
defines += [ "WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE" ]
|
||||
}
|
||||
|
||||
cflags = []
|
||||
|
||||
if (build_with_chromium) {
|
||||
|
||||
@ -75,15 +75,21 @@ const NetworkStatistics kNetworkStats = {
|
||||
const AudioDecodingCallStats kAudioDecodeStats = MakeAudioDecodeStatsForTest();
|
||||
|
||||
struct ConfigHelper {
|
||||
ConfigHelper() : ConfigHelper(new rtc::RefCountedObject<MockAudioMixer>()) {}
|
||||
explicit ConfigHelper(bool use_null_audio_processing)
|
||||
: ConfigHelper(new rtc::RefCountedObject<MockAudioMixer>(),
|
||||
use_null_audio_processing) {}
|
||||
|
||||
explicit ConfigHelper(rtc::scoped_refptr<MockAudioMixer> audio_mixer)
|
||||
ConfigHelper(rtc::scoped_refptr<MockAudioMixer> audio_mixer,
|
||||
bool use_null_audio_processing)
|
||||
: audio_mixer_(audio_mixer) {
|
||||
using ::testing::Invoke;
|
||||
|
||||
AudioState::Config config;
|
||||
config.audio_mixer = audio_mixer_;
|
||||
config.audio_processing = new rtc::RefCountedObject<MockAudioProcessing>();
|
||||
config.audio_processing =
|
||||
use_null_audio_processing
|
||||
? nullptr
|
||||
: new rtc::RefCountedObject<MockAudioProcessing>();
|
||||
config.audio_device_module =
|
||||
new rtc::RefCountedObject<testing::NiceMock<MockAudioDeviceModule>>();
|
||||
audio_state_ = AudioState::Create(config);
|
||||
@ -230,182 +236,200 @@ TEST(AudioReceiveStreamTest, ConfigToString) {
|
||||
}
|
||||
|
||||
TEST(AudioReceiveStreamTest, ConstructDestruct) {
|
||||
ConfigHelper helper;
|
||||
auto recv_stream = helper.CreateAudioReceiveStream();
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(use_null_audio_processing);
|
||||
auto recv_stream = helper.CreateAudioReceiveStream();
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioReceiveStreamTest, ReceiveRtpPacket) {
|
||||
ConfigHelper helper;
|
||||
helper.config().rtp.transport_cc = true;
|
||||
auto recv_stream = helper.CreateAudioReceiveStream();
|
||||
const int kTransportSequenceNumberValue = 1234;
|
||||
std::vector<uint8_t> rtp_packet = CreateRtpHeaderWithOneByteExtension(
|
||||
kTransportSequenceNumberId, kTransportSequenceNumberValue, 2);
|
||||
constexpr int64_t packet_time_us = 5678000;
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(use_null_audio_processing);
|
||||
helper.config().rtp.transport_cc = true;
|
||||
auto recv_stream = helper.CreateAudioReceiveStream();
|
||||
const int kTransportSequenceNumberValue = 1234;
|
||||
std::vector<uint8_t> rtp_packet = CreateRtpHeaderWithOneByteExtension(
|
||||
kTransportSequenceNumberId, kTransportSequenceNumberValue, 2);
|
||||
constexpr int64_t packet_time_us = 5678000;
|
||||
|
||||
RtpPacketReceived parsed_packet;
|
||||
ASSERT_TRUE(parsed_packet.Parse(&rtp_packet[0], rtp_packet.size()));
|
||||
parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000);
|
||||
RtpPacketReceived parsed_packet;
|
||||
ASSERT_TRUE(parsed_packet.Parse(&rtp_packet[0], rtp_packet.size()));
|
||||
parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000);
|
||||
|
||||
EXPECT_CALL(*helper.channel_receive(),
|
||||
OnRtpPacket(::testing::Ref(parsed_packet)));
|
||||
EXPECT_CALL(*helper.channel_receive(),
|
||||
OnRtpPacket(::testing::Ref(parsed_packet)));
|
||||
|
||||
recv_stream->OnRtpPacket(parsed_packet);
|
||||
recv_stream->OnRtpPacket(parsed_packet);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioReceiveStreamTest, ReceiveRtcpPacket) {
|
||||
ConfigHelper helper;
|
||||
helper.config().rtp.transport_cc = true;
|
||||
auto recv_stream = helper.CreateAudioReceiveStream();
|
||||
std::vector<uint8_t> rtcp_packet = CreateRtcpSenderReport();
|
||||
EXPECT_CALL(*helper.channel_receive(),
|
||||
ReceivedRTCPPacket(&rtcp_packet[0], rtcp_packet.size()))
|
||||
.WillOnce(Return());
|
||||
recv_stream->DeliverRtcp(&rtcp_packet[0], rtcp_packet.size());
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(use_null_audio_processing);
|
||||
helper.config().rtp.transport_cc = true;
|
||||
auto recv_stream = helper.CreateAudioReceiveStream();
|
||||
std::vector<uint8_t> rtcp_packet = CreateRtcpSenderReport();
|
||||
EXPECT_CALL(*helper.channel_receive(),
|
||||
ReceivedRTCPPacket(&rtcp_packet[0], rtcp_packet.size()))
|
||||
.WillOnce(Return());
|
||||
recv_stream->DeliverRtcp(&rtcp_packet[0], rtcp_packet.size());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioReceiveStreamTest, GetStats) {
|
||||
ConfigHelper helper;
|
||||
auto recv_stream = helper.CreateAudioReceiveStream();
|
||||
helper.SetupMockForGetStats();
|
||||
AudioReceiveStream::Stats stats = recv_stream->GetStats();
|
||||
EXPECT_EQ(kRemoteSsrc, stats.remote_ssrc);
|
||||
EXPECT_EQ(kCallStats.payload_bytes_rcvd, stats.payload_bytes_rcvd);
|
||||
EXPECT_EQ(kCallStats.header_and_padding_bytes_rcvd,
|
||||
stats.header_and_padding_bytes_rcvd);
|
||||
EXPECT_EQ(static_cast<uint32_t>(kCallStats.packetsReceived),
|
||||
stats.packets_rcvd);
|
||||
EXPECT_EQ(kCallStats.cumulativeLost, stats.packets_lost);
|
||||
EXPECT_EQ(kReceiveCodec.second.name, stats.codec_name);
|
||||
EXPECT_EQ(
|
||||
kCallStats.jitterSamples / (kReceiveCodec.second.clockrate_hz / 1000),
|
||||
stats.jitter_ms);
|
||||
EXPECT_EQ(kNetworkStats.currentBufferSize, stats.jitter_buffer_ms);
|
||||
EXPECT_EQ(kNetworkStats.preferredBufferSize,
|
||||
stats.jitter_buffer_preferred_ms);
|
||||
EXPECT_EQ(static_cast<uint32_t>(kJitterBufferDelay + kPlayoutBufferDelay),
|
||||
stats.delay_estimate_ms);
|
||||
EXPECT_EQ(static_cast<int32_t>(kSpeechOutputLevel), stats.audio_level);
|
||||
EXPECT_EQ(kTotalOutputEnergy, stats.total_output_energy);
|
||||
EXPECT_EQ(kNetworkStats.totalSamplesReceived, stats.total_samples_received);
|
||||
EXPECT_EQ(kTotalOutputDuration, stats.total_output_duration);
|
||||
EXPECT_EQ(kNetworkStats.concealedSamples, stats.concealed_samples);
|
||||
EXPECT_EQ(kNetworkStats.concealmentEvents, stats.concealment_events);
|
||||
EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferDelayMs) /
|
||||
static_cast<double>(rtc::kNumMillisecsPerSec),
|
||||
stats.jitter_buffer_delay_seconds);
|
||||
EXPECT_EQ(kNetworkStats.jitterBufferEmittedCount,
|
||||
stats.jitter_buffer_emitted_count);
|
||||
EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferTargetDelayMs) /
|
||||
static_cast<double>(rtc::kNumMillisecsPerSec),
|
||||
stats.jitter_buffer_target_delay_seconds);
|
||||
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentExpandRate), stats.expand_rate);
|
||||
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSpeechExpandRate),
|
||||
stats.speech_expand_rate);
|
||||
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDecodedRate),
|
||||
stats.secondary_decoded_rate);
|
||||
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDiscardedRate),
|
||||
stats.secondary_discarded_rate);
|
||||
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentAccelerateRate),
|
||||
stats.accelerate_rate);
|
||||
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentPreemptiveRate),
|
||||
stats.preemptive_expand_rate);
|
||||
EXPECT_EQ(kAudioDecodeStats.calls_to_silence_generator,
|
||||
stats.decoding_calls_to_silence_generator);
|
||||
EXPECT_EQ(kAudioDecodeStats.calls_to_neteq, stats.decoding_calls_to_neteq);
|
||||
EXPECT_EQ(kAudioDecodeStats.decoded_normal, stats.decoding_normal);
|
||||
EXPECT_EQ(kAudioDecodeStats.decoded_neteq_plc, stats.decoding_plc);
|
||||
EXPECT_EQ(kAudioDecodeStats.decoded_codec_plc, stats.decoding_codec_plc);
|
||||
EXPECT_EQ(kAudioDecodeStats.decoded_cng, stats.decoding_cng);
|
||||
EXPECT_EQ(kAudioDecodeStats.decoded_plc_cng, stats.decoding_plc_cng);
|
||||
EXPECT_EQ(kAudioDecodeStats.decoded_muted_output,
|
||||
stats.decoding_muted_output);
|
||||
EXPECT_EQ(kCallStats.capture_start_ntp_time_ms_,
|
||||
stats.capture_start_ntp_time_ms);
|
||||
EXPECT_EQ(kPlayoutNtpTimestampMs, stats.estimated_playout_ntp_timestamp_ms);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(use_null_audio_processing);
|
||||
auto recv_stream = helper.CreateAudioReceiveStream();
|
||||
helper.SetupMockForGetStats();
|
||||
AudioReceiveStream::Stats stats = recv_stream->GetStats();
|
||||
EXPECT_EQ(kRemoteSsrc, stats.remote_ssrc);
|
||||
EXPECT_EQ(kCallStats.payload_bytes_rcvd, stats.payload_bytes_rcvd);
|
||||
EXPECT_EQ(kCallStats.header_and_padding_bytes_rcvd,
|
||||
stats.header_and_padding_bytes_rcvd);
|
||||
EXPECT_EQ(static_cast<uint32_t>(kCallStats.packetsReceived),
|
||||
stats.packets_rcvd);
|
||||
EXPECT_EQ(kCallStats.cumulativeLost, stats.packets_lost);
|
||||
EXPECT_EQ(kReceiveCodec.second.name, stats.codec_name);
|
||||
EXPECT_EQ(
|
||||
kCallStats.jitterSamples / (kReceiveCodec.second.clockrate_hz / 1000),
|
||||
stats.jitter_ms);
|
||||
EXPECT_EQ(kNetworkStats.currentBufferSize, stats.jitter_buffer_ms);
|
||||
EXPECT_EQ(kNetworkStats.preferredBufferSize,
|
||||
stats.jitter_buffer_preferred_ms);
|
||||
EXPECT_EQ(static_cast<uint32_t>(kJitterBufferDelay + kPlayoutBufferDelay),
|
||||
stats.delay_estimate_ms);
|
||||
EXPECT_EQ(static_cast<int32_t>(kSpeechOutputLevel), stats.audio_level);
|
||||
EXPECT_EQ(kTotalOutputEnergy, stats.total_output_energy);
|
||||
EXPECT_EQ(kNetworkStats.totalSamplesReceived, stats.total_samples_received);
|
||||
EXPECT_EQ(kTotalOutputDuration, stats.total_output_duration);
|
||||
EXPECT_EQ(kNetworkStats.concealedSamples, stats.concealed_samples);
|
||||
EXPECT_EQ(kNetworkStats.concealmentEvents, stats.concealment_events);
|
||||
EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferDelayMs) /
|
||||
static_cast<double>(rtc::kNumMillisecsPerSec),
|
||||
stats.jitter_buffer_delay_seconds);
|
||||
EXPECT_EQ(kNetworkStats.jitterBufferEmittedCount,
|
||||
stats.jitter_buffer_emitted_count);
|
||||
EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferTargetDelayMs) /
|
||||
static_cast<double>(rtc::kNumMillisecsPerSec),
|
||||
stats.jitter_buffer_target_delay_seconds);
|
||||
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentExpandRate), stats.expand_rate);
|
||||
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSpeechExpandRate),
|
||||
stats.speech_expand_rate);
|
||||
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDecodedRate),
|
||||
stats.secondary_decoded_rate);
|
||||
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDiscardedRate),
|
||||
stats.secondary_discarded_rate);
|
||||
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentAccelerateRate),
|
||||
stats.accelerate_rate);
|
||||
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentPreemptiveRate),
|
||||
stats.preemptive_expand_rate);
|
||||
EXPECT_EQ(kAudioDecodeStats.calls_to_silence_generator,
|
||||
stats.decoding_calls_to_silence_generator);
|
||||
EXPECT_EQ(kAudioDecodeStats.calls_to_neteq, stats.decoding_calls_to_neteq);
|
||||
EXPECT_EQ(kAudioDecodeStats.decoded_normal, stats.decoding_normal);
|
||||
EXPECT_EQ(kAudioDecodeStats.decoded_neteq_plc, stats.decoding_plc);
|
||||
EXPECT_EQ(kAudioDecodeStats.decoded_codec_plc, stats.decoding_codec_plc);
|
||||
EXPECT_EQ(kAudioDecodeStats.decoded_cng, stats.decoding_cng);
|
||||
EXPECT_EQ(kAudioDecodeStats.decoded_plc_cng, stats.decoding_plc_cng);
|
||||
EXPECT_EQ(kAudioDecodeStats.decoded_muted_output,
|
||||
stats.decoding_muted_output);
|
||||
EXPECT_EQ(kCallStats.capture_start_ntp_time_ms_,
|
||||
stats.capture_start_ntp_time_ms);
|
||||
EXPECT_EQ(kPlayoutNtpTimestampMs, stats.estimated_playout_ntp_timestamp_ms);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioReceiveStreamTest, SetGain) {
|
||||
ConfigHelper helper;
|
||||
auto recv_stream = helper.CreateAudioReceiveStream();
|
||||
EXPECT_CALL(*helper.channel_receive(),
|
||||
SetChannelOutputVolumeScaling(FloatEq(0.765f)));
|
||||
recv_stream->SetGain(0.765f);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(use_null_audio_processing);
|
||||
auto recv_stream = helper.CreateAudioReceiveStream();
|
||||
EXPECT_CALL(*helper.channel_receive(),
|
||||
SetChannelOutputVolumeScaling(FloatEq(0.765f)));
|
||||
recv_stream->SetGain(0.765f);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioReceiveStreamTest, StreamsShouldBeAddedToMixerOnceOnStart) {
|
||||
ConfigHelper helper1;
|
||||
ConfigHelper helper2(helper1.audio_mixer());
|
||||
auto recv_stream1 = helper1.CreateAudioReceiveStream();
|
||||
auto recv_stream2 = helper2.CreateAudioReceiveStream();
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper1(use_null_audio_processing);
|
||||
ConfigHelper helper2(helper1.audio_mixer(), use_null_audio_processing);
|
||||
auto recv_stream1 = helper1.CreateAudioReceiveStream();
|
||||
auto recv_stream2 = helper2.CreateAudioReceiveStream();
|
||||
|
||||
EXPECT_CALL(*helper1.channel_receive(), StartPlayout()).Times(1);
|
||||
EXPECT_CALL(*helper2.channel_receive(), StartPlayout()).Times(1);
|
||||
EXPECT_CALL(*helper1.channel_receive(), StopPlayout()).Times(1);
|
||||
EXPECT_CALL(*helper2.channel_receive(), StopPlayout()).Times(1);
|
||||
EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream1.get()))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream2.get()))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream1.get()))
|
||||
.Times(1);
|
||||
EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream2.get()))
|
||||
.Times(1);
|
||||
EXPECT_CALL(*helper1.channel_receive(), StartPlayout()).Times(1);
|
||||
EXPECT_CALL(*helper2.channel_receive(), StartPlayout()).Times(1);
|
||||
EXPECT_CALL(*helper1.channel_receive(), StopPlayout()).Times(1);
|
||||
EXPECT_CALL(*helper2.channel_receive(), StopPlayout()).Times(1);
|
||||
EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream1.get()))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream2.get()))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream1.get()))
|
||||
.Times(1);
|
||||
EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream2.get()))
|
||||
.Times(1);
|
||||
|
||||
recv_stream1->Start();
|
||||
recv_stream2->Start();
|
||||
recv_stream1->Start();
|
||||
recv_stream2->Start();
|
||||
|
||||
// One more should not result in any more mixer sources added.
|
||||
recv_stream1->Start();
|
||||
// One more should not result in any more mixer sources added.
|
||||
recv_stream1->Start();
|
||||
|
||||
// Stop stream before it is being destructed.
|
||||
recv_stream2->Stop();
|
||||
// Stop stream before it is being destructed.
|
||||
recv_stream2->Stop();
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioReceiveStreamTest, ReconfigureWithSameConfig) {
|
||||
ConfigHelper helper;
|
||||
auto recv_stream = helper.CreateAudioReceiveStream();
|
||||
recv_stream->Reconfigure(helper.config());
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(use_null_audio_processing);
|
||||
auto recv_stream = helper.CreateAudioReceiveStream();
|
||||
recv_stream->Reconfigure(helper.config());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioReceiveStreamTest, ReconfigureWithUpdatedConfig) {
|
||||
ConfigHelper helper;
|
||||
auto recv_stream = helper.CreateAudioReceiveStream();
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(use_null_audio_processing);
|
||||
auto recv_stream = helper.CreateAudioReceiveStream();
|
||||
|
||||
auto new_config = helper.config();
|
||||
new_config.rtp.nack.rtp_history_ms = 300 + 20;
|
||||
new_config.rtp.extensions.clear();
|
||||
new_config.rtp.extensions.push_back(
|
||||
RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId + 1));
|
||||
new_config.rtp.extensions.push_back(
|
||||
RtpExtension(RtpExtension::kTransportSequenceNumberUri,
|
||||
kTransportSequenceNumberId + 1));
|
||||
new_config.decoder_map.emplace(1, SdpAudioFormat("foo", 8000, 1));
|
||||
auto new_config = helper.config();
|
||||
new_config.rtp.nack.rtp_history_ms = 300 + 20;
|
||||
new_config.rtp.extensions.clear();
|
||||
new_config.rtp.extensions.push_back(
|
||||
RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId + 1));
|
||||
new_config.rtp.extensions.push_back(
|
||||
RtpExtension(RtpExtension::kTransportSequenceNumberUri,
|
||||
kTransportSequenceNumberId + 1));
|
||||
new_config.decoder_map.emplace(1, SdpAudioFormat("foo", 8000, 1));
|
||||
|
||||
MockChannelReceive& channel_receive = *helper.channel_receive();
|
||||
EXPECT_CALL(channel_receive, SetNACKStatus(true, 15 + 1)).Times(1);
|
||||
EXPECT_CALL(channel_receive, SetReceiveCodecs(new_config.decoder_map));
|
||||
MockChannelReceive& channel_receive = *helper.channel_receive();
|
||||
EXPECT_CALL(channel_receive, SetNACKStatus(true, 15 + 1)).Times(1);
|
||||
EXPECT_CALL(channel_receive, SetReceiveCodecs(new_config.decoder_map));
|
||||
|
||||
recv_stream->Reconfigure(new_config);
|
||||
recv_stream->Reconfigure(new_config);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioReceiveStreamTest, ReconfigureWithFrameDecryptor) {
|
||||
ConfigHelper helper;
|
||||
auto recv_stream = helper.CreateAudioReceiveStream();
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(use_null_audio_processing);
|
||||
auto recv_stream = helper.CreateAudioReceiveStream();
|
||||
|
||||
auto new_config_0 = helper.config();
|
||||
rtc::scoped_refptr<FrameDecryptorInterface> mock_frame_decryptor_0(
|
||||
new rtc::RefCountedObject<MockFrameDecryptor>());
|
||||
new_config_0.frame_decryptor = mock_frame_decryptor_0;
|
||||
auto new_config_0 = helper.config();
|
||||
rtc::scoped_refptr<FrameDecryptorInterface> mock_frame_decryptor_0(
|
||||
new rtc::RefCountedObject<MockFrameDecryptor>());
|
||||
new_config_0.frame_decryptor = mock_frame_decryptor_0;
|
||||
|
||||
recv_stream->Reconfigure(new_config_0);
|
||||
recv_stream->Reconfigure(new_config_0);
|
||||
|
||||
auto new_config_1 = helper.config();
|
||||
rtc::scoped_refptr<FrameDecryptorInterface> mock_frame_decryptor_1(
|
||||
new rtc::RefCountedObject<MockFrameDecryptor>());
|
||||
new_config_1.frame_decryptor = mock_frame_decryptor_1;
|
||||
new_config_1.crypto_options.sframe.require_frame_encryption = true;
|
||||
recv_stream->Reconfigure(new_config_1);
|
||||
auto new_config_1 = helper.config();
|
||||
rtc::scoped_refptr<FrameDecryptorInterface> mock_frame_decryptor_1(
|
||||
new rtc::RefCountedObject<MockFrameDecryptor>());
|
||||
new_config_1.frame_decryptor = mock_frame_decryptor_1;
|
||||
new_config_1.crypto_options.sframe.require_frame_encryption = true;
|
||||
recv_stream->Reconfigure(new_config_1);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
|
||||
@ -490,9 +490,11 @@ webrtc::AudioSendStream::Stats AudioSendStream::GetStats(
|
||||
|
||||
stats.typing_noise_detected = audio_state()->typing_noise_detected();
|
||||
stats.ana_statistics = channel_send_->GetANAStatistics();
|
||||
RTC_DCHECK(audio_state_->audio_processing());
|
||||
stats.apm_statistics =
|
||||
audio_state_->audio_processing()->GetStatistics(has_remote_tracks);
|
||||
|
||||
AudioProcessing* ap = audio_state_->audio_processing();
|
||||
if (ap) {
|
||||
stats.apm_statistics = ap->GetStatistics(has_remote_tracks);
|
||||
}
|
||||
|
||||
stats.report_block_datas = std::move(call_stats.report_block_datas);
|
||||
|
||||
|
||||
@ -141,11 +141,16 @@ rtc::scoped_refptr<MockAudioEncoderFactory> SetupEncoderFactoryMock() {
|
||||
}
|
||||
|
||||
struct ConfigHelper {
|
||||
ConfigHelper(bool audio_bwe_enabled, bool expect_set_encoder_call)
|
||||
ConfigHelper(bool audio_bwe_enabled,
|
||||
bool expect_set_encoder_call,
|
||||
bool use_null_audio_processing)
|
||||
: clock_(1000000),
|
||||
task_queue_factory_(CreateDefaultTaskQueueFactory()),
|
||||
stream_config_(/*send_transport=*/nullptr),
|
||||
audio_processing_(new rtc::RefCountedObject<MockAudioProcessing>()),
|
||||
audio_processing_(
|
||||
use_null_audio_processing
|
||||
? nullptr
|
||||
: new rtc::RefCountedObject<MockAudioProcessing>()),
|
||||
bitrate_allocator_(&limit_observer_),
|
||||
worker_queue_(task_queue_factory_->CreateTaskQueue(
|
||||
"ConfigHelper_worker_queue",
|
||||
@ -273,7 +278,7 @@ struct ConfigHelper {
|
||||
.WillOnce(Return(true));
|
||||
}
|
||||
|
||||
void SetupMockForGetStats() {
|
||||
void SetupMockForGetStats(bool use_null_audio_processing) {
|
||||
using ::testing::DoAll;
|
||||
using ::testing::SetArgPointee;
|
||||
using ::testing::SetArgReferee;
|
||||
@ -305,10 +310,13 @@ struct ConfigHelper {
|
||||
audio_processing_stats_.residual_echo_likelihood = kResidualEchoLikelihood;
|
||||
audio_processing_stats_.residual_echo_likelihood_recent_max =
|
||||
kResidualEchoLikelihoodMax;
|
||||
|
||||
EXPECT_CALL(*audio_processing_, GetStatistics(true))
|
||||
.WillRepeatedly(Return(audio_processing_stats_));
|
||||
if (!use_null_audio_processing) {
|
||||
ASSERT_TRUE(audio_processing_);
|
||||
EXPECT_CALL(*audio_processing_, GetStatistics(true))
|
||||
.WillRepeatedly(Return(audio_processing_stats_));
|
||||
}
|
||||
}
|
||||
|
||||
TaskQueueForTest* worker() { return &worker_queue_; }
|
||||
|
||||
private:
|
||||
@ -381,235 +389,270 @@ TEST(AudioSendStreamTest, ConfigToString) {
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, ConstructDestruct) {
|
||||
ConfigHelper helper(false, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(false, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, SendTelephoneEvent) {
|
||||
ConfigHelper helper(false, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
helper.SetupMockForSendTelephoneEvent();
|
||||
EXPECT_TRUE(send_stream->SendTelephoneEvent(
|
||||
kTelephoneEventPayloadType, kTelephoneEventPayloadFrequency,
|
||||
kTelephoneEventCode, kTelephoneEventDuration));
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(false, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
helper.SetupMockForSendTelephoneEvent();
|
||||
EXPECT_TRUE(send_stream->SendTelephoneEvent(
|
||||
kTelephoneEventPayloadType, kTelephoneEventPayloadFrequency,
|
||||
kTelephoneEventCode, kTelephoneEventDuration));
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, SetMuted) {
|
||||
ConfigHelper helper(false, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
EXPECT_CALL(*helper.channel_send(), SetInputMute(true));
|
||||
send_stream->SetMuted(true);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(false, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
EXPECT_CALL(*helper.channel_send(), SetInputMute(true));
|
||||
send_stream->SetMuted(true);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, AudioBweCorrectObjectsOnChannelProxy) {
|
||||
ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/");
|
||||
ConfigHelper helper(true, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(true, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, NoAudioBweCorrectObjectsOnChannelProxy) {
|
||||
ConfigHelper helper(false, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(false, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, GetStats) {
|
||||
ConfigHelper helper(false, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
helper.SetupMockForGetStats();
|
||||
AudioSendStream::Stats stats = send_stream->GetStats(true);
|
||||
EXPECT_EQ(kSsrc, stats.local_ssrc);
|
||||
EXPECT_EQ(kCallStats.payload_bytes_sent, stats.payload_bytes_sent);
|
||||
EXPECT_EQ(kCallStats.header_and_padding_bytes_sent,
|
||||
stats.header_and_padding_bytes_sent);
|
||||
EXPECT_EQ(kCallStats.packetsSent, stats.packets_sent);
|
||||
EXPECT_EQ(kReportBlock.cumulative_num_packets_lost, stats.packets_lost);
|
||||
EXPECT_EQ(Q8ToFloat(kReportBlock.fraction_lost), stats.fraction_lost);
|
||||
EXPECT_EQ(kIsacFormat.name, stats.codec_name);
|
||||
EXPECT_EQ(static_cast<int32_t>(kReportBlock.interarrival_jitter /
|
||||
(kIsacFormat.clockrate_hz / 1000)),
|
||||
stats.jitter_ms);
|
||||
EXPECT_EQ(kCallStats.rttMs, stats.rtt_ms);
|
||||
EXPECT_EQ(0, stats.audio_level);
|
||||
EXPECT_EQ(0, stats.total_input_energy);
|
||||
EXPECT_EQ(0, stats.total_input_duration);
|
||||
EXPECT_EQ(kEchoDelayMedian, stats.apm_statistics.delay_median_ms);
|
||||
EXPECT_EQ(kEchoDelayStdDev, stats.apm_statistics.delay_standard_deviation_ms);
|
||||
EXPECT_EQ(kEchoReturnLoss, stats.apm_statistics.echo_return_loss);
|
||||
EXPECT_EQ(kEchoReturnLossEnhancement,
|
||||
stats.apm_statistics.echo_return_loss_enhancement);
|
||||
EXPECT_EQ(kDivergentFilterFraction,
|
||||
stats.apm_statistics.divergent_filter_fraction);
|
||||
EXPECT_EQ(kResidualEchoLikelihood,
|
||||
stats.apm_statistics.residual_echo_likelihood);
|
||||
EXPECT_EQ(kResidualEchoLikelihoodMax,
|
||||
stats.apm_statistics.residual_echo_likelihood_recent_max);
|
||||
EXPECT_FALSE(stats.typing_noise_detected);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(false, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
helper.SetupMockForGetStats(use_null_audio_processing);
|
||||
AudioSendStream::Stats stats = send_stream->GetStats(true);
|
||||
EXPECT_EQ(kSsrc, stats.local_ssrc);
|
||||
EXPECT_EQ(kCallStats.payload_bytes_sent, stats.payload_bytes_sent);
|
||||
EXPECT_EQ(kCallStats.header_and_padding_bytes_sent,
|
||||
stats.header_and_padding_bytes_sent);
|
||||
EXPECT_EQ(kCallStats.packetsSent, stats.packets_sent);
|
||||
EXPECT_EQ(kReportBlock.cumulative_num_packets_lost, stats.packets_lost);
|
||||
EXPECT_EQ(Q8ToFloat(kReportBlock.fraction_lost), stats.fraction_lost);
|
||||
EXPECT_EQ(kIsacFormat.name, stats.codec_name);
|
||||
EXPECT_EQ(static_cast<int32_t>(kReportBlock.interarrival_jitter /
|
||||
(kIsacFormat.clockrate_hz / 1000)),
|
||||
stats.jitter_ms);
|
||||
EXPECT_EQ(kCallStats.rttMs, stats.rtt_ms);
|
||||
EXPECT_EQ(0, stats.audio_level);
|
||||
EXPECT_EQ(0, stats.total_input_energy);
|
||||
EXPECT_EQ(0, stats.total_input_duration);
|
||||
|
||||
if (!use_null_audio_processing) {
|
||||
EXPECT_EQ(kEchoDelayMedian, stats.apm_statistics.delay_median_ms);
|
||||
EXPECT_EQ(kEchoDelayStdDev,
|
||||
stats.apm_statistics.delay_standard_deviation_ms);
|
||||
EXPECT_EQ(kEchoReturnLoss, stats.apm_statistics.echo_return_loss);
|
||||
EXPECT_EQ(kEchoReturnLossEnhancement,
|
||||
stats.apm_statistics.echo_return_loss_enhancement);
|
||||
EXPECT_EQ(kDivergentFilterFraction,
|
||||
stats.apm_statistics.divergent_filter_fraction);
|
||||
EXPECT_EQ(kResidualEchoLikelihood,
|
||||
stats.apm_statistics.residual_echo_likelihood);
|
||||
EXPECT_EQ(kResidualEchoLikelihoodMax,
|
||||
stats.apm_statistics.residual_echo_likelihood_recent_max);
|
||||
EXPECT_FALSE(stats.typing_noise_detected);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, GetStatsAudioLevel) {
|
||||
ConfigHelper helper(false, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
helper.SetupMockForGetStats();
|
||||
EXPECT_CALL(*helper.channel_send(), ProcessAndEncodeAudioForMock(_))
|
||||
.Times(AnyNumber());
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(false, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
helper.SetupMockForGetStats(use_null_audio_processing);
|
||||
EXPECT_CALL(*helper.channel_send(), ProcessAndEncodeAudioForMock(_))
|
||||
.Times(AnyNumber());
|
||||
|
||||
constexpr int kSampleRateHz = 48000;
|
||||
constexpr size_t kNumChannels = 1;
|
||||
constexpr int kSampleRateHz = 48000;
|
||||
constexpr size_t kNumChannels = 1;
|
||||
|
||||
constexpr int16_t kSilentAudioLevel = 0;
|
||||
constexpr int16_t kMaxAudioLevel = 32767; // Audio level is [0,32767].
|
||||
constexpr int kAudioFrameDurationMs = 10;
|
||||
constexpr int16_t kSilentAudioLevel = 0;
|
||||
constexpr int16_t kMaxAudioLevel = 32767; // Audio level is [0,32767].
|
||||
constexpr int kAudioFrameDurationMs = 10;
|
||||
|
||||
// Process 10 audio frames (100 ms) of silence. After this, on the next
|
||||
// (11-th) frame, the audio level will be updated with the maximum audio level
|
||||
// of the first 11 frames. See AudioLevel.
|
||||
for (size_t i = 0; i < 10; ++i) {
|
||||
send_stream->SendAudioData(CreateAudioFrame1kHzSineWave(
|
||||
kSilentAudioLevel, kAudioFrameDurationMs, kSampleRateHz, kNumChannels));
|
||||
// Process 10 audio frames (100 ms) of silence. After this, on the next
|
||||
// (11-th) frame, the audio level will be updated with the maximum audio
|
||||
// level of the first 11 frames. See AudioLevel.
|
||||
for (size_t i = 0; i < 10; ++i) {
|
||||
send_stream->SendAudioData(
|
||||
CreateAudioFrame1kHzSineWave(kSilentAudioLevel, kAudioFrameDurationMs,
|
||||
kSampleRateHz, kNumChannels));
|
||||
}
|
||||
AudioSendStream::Stats stats = send_stream->GetStats();
|
||||
EXPECT_EQ(kSilentAudioLevel, stats.audio_level);
|
||||
EXPECT_NEAR(0.0f, stats.total_input_energy, kTolerance);
|
||||
EXPECT_NEAR(0.1f, stats.total_input_duration,
|
||||
kTolerance); // 100 ms = 0.1 s
|
||||
|
||||
// Process 10 audio frames (100 ms) of maximum audio level.
|
||||
// Note that AudioLevel updates the audio level every 11th frame, processing
|
||||
// 10 frames above was needed to see a non-zero audio level here.
|
||||
for (size_t i = 0; i < 10; ++i) {
|
||||
send_stream->SendAudioData(CreateAudioFrame1kHzSineWave(
|
||||
kMaxAudioLevel, kAudioFrameDurationMs, kSampleRateHz, kNumChannels));
|
||||
}
|
||||
stats = send_stream->GetStats();
|
||||
EXPECT_EQ(kMaxAudioLevel, stats.audio_level);
|
||||
// Energy increases by energy*duration, where energy is audio level in
|
||||
// [0,1].
|
||||
EXPECT_NEAR(0.1f, stats.total_input_energy, kTolerance); // 0.1 s of max
|
||||
EXPECT_NEAR(0.2f, stats.total_input_duration,
|
||||
kTolerance); // 200 ms = 0.2 s
|
||||
}
|
||||
AudioSendStream::Stats stats = send_stream->GetStats();
|
||||
EXPECT_EQ(kSilentAudioLevel, stats.audio_level);
|
||||
EXPECT_NEAR(0.0f, stats.total_input_energy, kTolerance);
|
||||
EXPECT_NEAR(0.1f, stats.total_input_duration, kTolerance); // 100 ms = 0.1 s
|
||||
|
||||
// Process 10 audio frames (100 ms) of maximum audio level.
|
||||
// Note that AudioLevel updates the audio level every 11th frame, processing
|
||||
// 10 frames above was needed to see a non-zero audio level here.
|
||||
for (size_t i = 0; i < 10; ++i) {
|
||||
send_stream->SendAudioData(CreateAudioFrame1kHzSineWave(
|
||||
kMaxAudioLevel, kAudioFrameDurationMs, kSampleRateHz, kNumChannels));
|
||||
}
|
||||
stats = send_stream->GetStats();
|
||||
EXPECT_EQ(kMaxAudioLevel, stats.audio_level);
|
||||
// Energy increases by energy*duration, where energy is audio level in [0,1].
|
||||
EXPECT_NEAR(0.1f, stats.total_input_energy, kTolerance); // 0.1 s of max
|
||||
EXPECT_NEAR(0.2f, stats.total_input_duration, kTolerance); // 200 ms = 0.2 s
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, SendCodecAppliesAudioNetworkAdaptor) {
|
||||
ConfigHelper helper(false, true);
|
||||
helper.config().send_codec_spec =
|
||||
AudioSendStream::Config::SendCodecSpec(0, kOpusFormat);
|
||||
const std::string kAnaConfigString = "abcde";
|
||||
const std::string kAnaReconfigString = "12345";
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(false, true, use_null_audio_processing);
|
||||
helper.config().send_codec_spec =
|
||||
AudioSendStream::Config::SendCodecSpec(0, kOpusFormat);
|
||||
const std::string kAnaConfigString = "abcde";
|
||||
const std::string kAnaReconfigString = "12345";
|
||||
|
||||
helper.config().rtp.extensions.push_back(RtpExtension(
|
||||
RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberId));
|
||||
helper.config().audio_network_adaptor_config = kAnaConfigString;
|
||||
helper.config().rtp.extensions.push_back(RtpExtension(
|
||||
RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberId));
|
||||
helper.config().audio_network_adaptor_config = kAnaConfigString;
|
||||
|
||||
EXPECT_CALL(helper.mock_encoder_factory(), MakeAudioEncoderMock(_, _, _, _))
|
||||
.WillOnce(Invoke([&kAnaConfigString, &kAnaReconfigString](
|
||||
int payload_type, const SdpAudioFormat& format,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id,
|
||||
std::unique_ptr<AudioEncoder>* return_value) {
|
||||
auto mock_encoder = SetupAudioEncoderMock(payload_type, format);
|
||||
EXPECT_CALL(*mock_encoder,
|
||||
EnableAudioNetworkAdaptor(StrEq(kAnaConfigString), _))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(*mock_encoder,
|
||||
EnableAudioNetworkAdaptor(StrEq(kAnaReconfigString), _))
|
||||
.WillOnce(Return(true));
|
||||
*return_value = std::move(mock_encoder);
|
||||
}));
|
||||
EXPECT_CALL(helper.mock_encoder_factory(), MakeAudioEncoderMock(_, _, _, _))
|
||||
.WillOnce(Invoke([&kAnaConfigString, &kAnaReconfigString](
|
||||
int payload_type, const SdpAudioFormat& format,
|
||||
absl::optional<AudioCodecPairId> codec_pair_id,
|
||||
std::unique_ptr<AudioEncoder>* return_value) {
|
||||
auto mock_encoder = SetupAudioEncoderMock(payload_type, format);
|
||||
EXPECT_CALL(*mock_encoder,
|
||||
EnableAudioNetworkAdaptor(StrEq(kAnaConfigString), _))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(*mock_encoder,
|
||||
EnableAudioNetworkAdaptor(StrEq(kAnaReconfigString), _))
|
||||
.WillOnce(Return(true));
|
||||
*return_value = std::move(mock_encoder);
|
||||
}));
|
||||
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
|
||||
auto stream_config = helper.config();
|
||||
stream_config.audio_network_adaptor_config = kAnaReconfigString;
|
||||
auto stream_config = helper.config();
|
||||
stream_config.audio_network_adaptor_config = kAnaReconfigString;
|
||||
|
||||
send_stream->Reconfigure(stream_config);
|
||||
send_stream->Reconfigure(stream_config);
|
||||
}
|
||||
}
|
||||
|
||||
// VAD is applied when codec is mono and the CNG frequency matches the codec
|
||||
// clock rate.
|
||||
TEST(AudioSendStreamTest, SendCodecCanApplyVad) {
|
||||
ConfigHelper helper(false, false);
|
||||
helper.config().send_codec_spec =
|
||||
AudioSendStream::Config::SendCodecSpec(9, kG722Format);
|
||||
helper.config().send_codec_spec->cng_payload_type = 105;
|
||||
using ::testing::Invoke;
|
||||
std::unique_ptr<AudioEncoder> stolen_encoder;
|
||||
EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
|
||||
.WillOnce(
|
||||
Invoke([&stolen_encoder](int payload_type,
|
||||
std::unique_ptr<AudioEncoder>* encoder) {
|
||||
stolen_encoder = std::move(*encoder);
|
||||
return true;
|
||||
}));
|
||||
EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000));
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(false, false, use_null_audio_processing);
|
||||
helper.config().send_codec_spec =
|
||||
AudioSendStream::Config::SendCodecSpec(9, kG722Format);
|
||||
helper.config().send_codec_spec->cng_payload_type = 105;
|
||||
using ::testing::Invoke;
|
||||
std::unique_ptr<AudioEncoder> stolen_encoder;
|
||||
EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
|
||||
.WillOnce(
|
||||
Invoke([&stolen_encoder](int payload_type,
|
||||
std::unique_ptr<AudioEncoder>* encoder) {
|
||||
stolen_encoder = std::move(*encoder);
|
||||
return true;
|
||||
}));
|
||||
EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000));
|
||||
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
|
||||
// We cannot truly determine if the encoder created is an AudioEncoderCng. It
|
||||
// is the only reasonable implementation that will return something from
|
||||
// ReclaimContainedEncoders, though.
|
||||
ASSERT_TRUE(stolen_encoder);
|
||||
EXPECT_FALSE(stolen_encoder->ReclaimContainedEncoders().empty());
|
||||
// We cannot truly determine if the encoder created is an AudioEncoderCng.
|
||||
// It is the only reasonable implementation that will return something from
|
||||
// ReclaimContainedEncoders, though.
|
||||
ASSERT_TRUE(stolen_encoder);
|
||||
EXPECT_FALSE(stolen_encoder->ReclaimContainedEncoders().empty());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, DoesNotPassHigherBitrateThanMaxBitrate) {
|
||||
ConfigHelper helper(false, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
EXPECT_CALL(*helper.channel_send(),
|
||||
OnBitrateAllocation(Field(
|
||||
&BitrateAllocationUpdate::target_bitrate,
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(false, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
EXPECT_CALL(
|
||||
*helper.channel_send(),
|
||||
OnBitrateAllocation(
|
||||
Field(&BitrateAllocationUpdate::target_bitrate,
|
||||
Eq(DataRate::BitsPerSec(helper.config().max_bitrate_bps)))));
|
||||
BitrateAllocationUpdate update;
|
||||
update.target_bitrate =
|
||||
DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000);
|
||||
update.packet_loss_ratio = 0;
|
||||
update.round_trip_time = TimeDelta::Millis(50);
|
||||
update.bwe_period = TimeDelta::Millis(6000);
|
||||
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
||||
RTC_FROM_HERE);
|
||||
BitrateAllocationUpdate update;
|
||||
update.target_bitrate =
|
||||
DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000);
|
||||
update.packet_loss_ratio = 0;
|
||||
update.round_trip_time = TimeDelta::Millis(50);
|
||||
update.bwe_period = TimeDelta::Millis(6000);
|
||||
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
||||
RTC_FROM_HERE);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, SSBweTargetInRangeRespected) {
|
||||
ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/");
|
||||
ConfigHelper helper(true, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
EXPECT_CALL(
|
||||
*helper.channel_send(),
|
||||
OnBitrateAllocation(Field(
|
||||
&BitrateAllocationUpdate::target_bitrate,
|
||||
Eq(DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000)))));
|
||||
BitrateAllocationUpdate update;
|
||||
update.target_bitrate =
|
||||
DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000);
|
||||
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
||||
RTC_FROM_HERE);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(true, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
EXPECT_CALL(
|
||||
*helper.channel_send(),
|
||||
OnBitrateAllocation(Field(
|
||||
&BitrateAllocationUpdate::target_bitrate,
|
||||
Eq(DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000)))));
|
||||
BitrateAllocationUpdate update;
|
||||
update.target_bitrate =
|
||||
DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000);
|
||||
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
||||
RTC_FROM_HERE);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, SSBweFieldTrialMinRespected) {
|
||||
ScopedFieldTrials field_trials(
|
||||
"WebRTC-Audio-SendSideBwe/Enabled/"
|
||||
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
|
||||
ConfigHelper helper(true, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
EXPECT_CALL(
|
||||
*helper.channel_send(),
|
||||
OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate,
|
||||
Eq(DataRate::KilobitsPerSec(6)))));
|
||||
BitrateAllocationUpdate update;
|
||||
update.target_bitrate = DataRate::KilobitsPerSec(1);
|
||||
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
||||
RTC_FROM_HERE);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(true, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
EXPECT_CALL(
|
||||
*helper.channel_send(),
|
||||
OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate,
|
||||
Eq(DataRate::KilobitsPerSec(6)))));
|
||||
BitrateAllocationUpdate update;
|
||||
update.target_bitrate = DataRate::KilobitsPerSec(1);
|
||||
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
||||
RTC_FROM_HERE);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, SSBweFieldTrialMaxRespected) {
|
||||
ScopedFieldTrials field_trials(
|
||||
"WebRTC-Audio-SendSideBwe/Enabled/"
|
||||
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
|
||||
ConfigHelper helper(true, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
EXPECT_CALL(
|
||||
*helper.channel_send(),
|
||||
OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate,
|
||||
Eq(DataRate::KilobitsPerSec(64)))));
|
||||
BitrateAllocationUpdate update;
|
||||
update.target_bitrate = DataRate::KilobitsPerSec(128);
|
||||
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
||||
RTC_FROM_HERE);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(true, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
EXPECT_CALL(
|
||||
*helper.channel_send(),
|
||||
OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate,
|
||||
Eq(DataRate::KilobitsPerSec(64)))));
|
||||
BitrateAllocationUpdate update;
|
||||
update.target_bitrate = DataRate::KilobitsPerSec(128);
|
||||
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
||||
RTC_FROM_HERE);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, SSBweWithOverhead) {
|
||||
@ -617,19 +660,22 @@ TEST(AudioSendStreamTest, SSBweWithOverhead) {
|
||||
"WebRTC-Audio-SendSideBwe/Enabled/"
|
||||
"WebRTC-SendSideBwe-WithOverhead/Enabled/"
|
||||
"WebRTC-Audio-LegacyOverhead/Disabled/");
|
||||
ConfigHelper helper(true, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
|
||||
send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
|
||||
const DataRate bitrate =
|
||||
DataRate::BitsPerSec(helper.config().max_bitrate_bps) + kMaxOverheadRate;
|
||||
EXPECT_CALL(*helper.channel_send(),
|
||||
OnBitrateAllocation(Field(
|
||||
&BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
|
||||
BitrateAllocationUpdate update;
|
||||
update.target_bitrate = bitrate;
|
||||
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
||||
RTC_FROM_HERE);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(true, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
|
||||
send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
|
||||
const DataRate bitrate =
|
||||
DataRate::BitsPerSec(helper.config().max_bitrate_bps) +
|
||||
kMaxOverheadRate;
|
||||
EXPECT_CALL(*helper.channel_send(),
|
||||
OnBitrateAllocation(Field(
|
||||
&BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
|
||||
BitrateAllocationUpdate update;
|
||||
update.target_bitrate = bitrate;
|
||||
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
||||
RTC_FROM_HERE);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, SSBweWithOverheadMinRespected) {
|
||||
@ -638,18 +684,20 @@ TEST(AudioSendStreamTest, SSBweWithOverheadMinRespected) {
|
||||
"WebRTC-SendSideBwe-WithOverhead/Enabled/"
|
||||
"WebRTC-Audio-LegacyOverhead/Disabled/"
|
||||
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
|
||||
ConfigHelper helper(true, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
|
||||
send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
|
||||
const DataRate bitrate = DataRate::KilobitsPerSec(6) + kMinOverheadRate;
|
||||
EXPECT_CALL(*helper.channel_send(),
|
||||
OnBitrateAllocation(Field(
|
||||
&BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
|
||||
BitrateAllocationUpdate update;
|
||||
update.target_bitrate = DataRate::KilobitsPerSec(1);
|
||||
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
||||
RTC_FROM_HERE);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(true, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
|
||||
send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
|
||||
const DataRate bitrate = DataRate::KilobitsPerSec(6) + kMinOverheadRate;
|
||||
EXPECT_CALL(*helper.channel_send(),
|
||||
OnBitrateAllocation(Field(
|
||||
&BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
|
||||
BitrateAllocationUpdate update;
|
||||
update.target_bitrate = DataRate::KilobitsPerSec(1);
|
||||
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
||||
RTC_FROM_HERE);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, SSBweWithOverheadMaxRespected) {
|
||||
@ -658,152 +706,172 @@ TEST(AudioSendStreamTest, SSBweWithOverheadMaxRespected) {
|
||||
"WebRTC-SendSideBwe-WithOverhead/Enabled/"
|
||||
"WebRTC-Audio-LegacyOverhead/Disabled/"
|
||||
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
|
||||
ConfigHelper helper(true, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
|
||||
send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
|
||||
const DataRate bitrate = DataRate::KilobitsPerSec(64) + kMaxOverheadRate;
|
||||
EXPECT_CALL(*helper.channel_send(),
|
||||
OnBitrateAllocation(Field(
|
||||
&BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
|
||||
BitrateAllocationUpdate update;
|
||||
update.target_bitrate = DataRate::KilobitsPerSec(128);
|
||||
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
||||
RTC_FROM_HERE);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(true, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
|
||||
send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
|
||||
const DataRate bitrate = DataRate::KilobitsPerSec(64) + kMaxOverheadRate;
|
||||
EXPECT_CALL(*helper.channel_send(),
|
||||
OnBitrateAllocation(Field(
|
||||
&BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
|
||||
BitrateAllocationUpdate update;
|
||||
update.target_bitrate = DataRate::KilobitsPerSec(128);
|
||||
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
||||
RTC_FROM_HERE);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, ProbingIntervalOnBitrateUpdated) {
|
||||
ConfigHelper helper(false, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(false, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
|
||||
EXPECT_CALL(*helper.channel_send(),
|
||||
OnBitrateAllocation(Field(&BitrateAllocationUpdate::bwe_period,
|
||||
Eq(TimeDelta::Millis(5000)))));
|
||||
BitrateAllocationUpdate update;
|
||||
update.target_bitrate =
|
||||
DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000);
|
||||
update.packet_loss_ratio = 0;
|
||||
update.round_trip_time = TimeDelta::Millis(50);
|
||||
update.bwe_period = TimeDelta::Millis(5000);
|
||||
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
||||
RTC_FROM_HERE);
|
||||
EXPECT_CALL(*helper.channel_send(),
|
||||
OnBitrateAllocation(Field(&BitrateAllocationUpdate::bwe_period,
|
||||
Eq(TimeDelta::Millis(5000)))));
|
||||
BitrateAllocationUpdate update;
|
||||
update.target_bitrate =
|
||||
DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000);
|
||||
update.packet_loss_ratio = 0;
|
||||
update.round_trip_time = TimeDelta::Millis(50);
|
||||
update.bwe_period = TimeDelta::Millis(5000);
|
||||
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
||||
RTC_FROM_HERE);
|
||||
}
|
||||
}
|
||||
|
||||
// Test that AudioSendStream doesn't recreate the encoder unnecessarily.
|
||||
TEST(AudioSendStreamTest, DontRecreateEncoder) {
|
||||
ConfigHelper helper(false, false);
|
||||
// WillOnce is (currently) the default used by ConfigHelper if asked to set an
|
||||
// expectation for SetEncoder. Since this behavior is essential for this test
|
||||
// to be correct, it's instead set-up manually here. Otherwise a simple change
|
||||
// to ConfigHelper (say to WillRepeatedly) would silently make this test
|
||||
// useless.
|
||||
EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
|
||||
.WillOnce(Return());
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(false, false, use_null_audio_processing);
|
||||
// WillOnce is (currently) the default used by ConfigHelper if asked to set
|
||||
// an expectation for SetEncoder. Since this behavior is essential for this
|
||||
// test to be correct, it's instead set-up manually here. Otherwise a simple
|
||||
// change to ConfigHelper (say to WillRepeatedly) would silently make this
|
||||
// test useless.
|
||||
EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
|
||||
.WillOnce(Return());
|
||||
|
||||
EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000));
|
||||
EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000));
|
||||
|
||||
helper.config().send_codec_spec =
|
||||
AudioSendStream::Config::SendCodecSpec(9, kG722Format);
|
||||
helper.config().send_codec_spec->cng_payload_type = 105;
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
send_stream->Reconfigure(helper.config());
|
||||
helper.config().send_codec_spec =
|
||||
AudioSendStream::Config::SendCodecSpec(9, kG722Format);
|
||||
helper.config().send_codec_spec->cng_payload_type = 105;
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
send_stream->Reconfigure(helper.config());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, ReconfigureTransportCcResetsFirst) {
|
||||
ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/");
|
||||
ConfigHelper helper(false, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
auto new_config = helper.config();
|
||||
ConfigHelper::AddBweToConfig(&new_config);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(false, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
auto new_config = helper.config();
|
||||
ConfigHelper::AddBweToConfig(&new_config);
|
||||
|
||||
EXPECT_CALL(*helper.rtp_rtcp(),
|
||||
RegisterRtpHeaderExtension(TransportSequenceNumber::kUri,
|
||||
kTransportSequenceNumberId))
|
||||
.Times(1);
|
||||
{
|
||||
::testing::InSequence seq;
|
||||
EXPECT_CALL(*helper.channel_send(), ResetSenderCongestionControlObjects())
|
||||
.Times(1);
|
||||
EXPECT_CALL(*helper.channel_send(), RegisterSenderCongestionControlObjects(
|
||||
helper.transport(), Ne(nullptr)))
|
||||
EXPECT_CALL(*helper.rtp_rtcp(),
|
||||
RegisterRtpHeaderExtension(TransportSequenceNumber::kUri,
|
||||
kTransportSequenceNumberId))
|
||||
.Times(1);
|
||||
{
|
||||
::testing::InSequence seq;
|
||||
EXPECT_CALL(*helper.channel_send(), ResetSenderCongestionControlObjects())
|
||||
.Times(1);
|
||||
EXPECT_CALL(*helper.channel_send(),
|
||||
RegisterSenderCongestionControlObjects(helper.transport(),
|
||||
Ne(nullptr)))
|
||||
.Times(1);
|
||||
}
|
||||
|
||||
send_stream->Reconfigure(new_config);
|
||||
}
|
||||
|
||||
send_stream->Reconfigure(new_config);
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, OnTransportOverheadChanged) {
|
||||
ConfigHelper helper(false, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
auto new_config = helper.config();
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(false, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
auto new_config = helper.config();
|
||||
|
||||
// CallEncoder will be called on overhead change.
|
||||
EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(1);
|
||||
// CallEncoder will be called on overhead change.
|
||||
EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(1);
|
||||
|
||||
const size_t transport_overhead_per_packet_bytes = 333;
|
||||
send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
|
||||
const size_t transport_overhead_per_packet_bytes = 333;
|
||||
send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
|
||||
|
||||
EXPECT_EQ(transport_overhead_per_packet_bytes,
|
||||
send_stream->TestOnlyGetPerPacketOverheadBytes());
|
||||
EXPECT_EQ(transport_overhead_per_packet_bytes,
|
||||
send_stream->TestOnlyGetPerPacketOverheadBytes());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, OnAudioOverheadChanged) {
|
||||
ConfigHelper helper(false, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
auto new_config = helper.config();
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(false, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
auto new_config = helper.config();
|
||||
|
||||
// CallEncoder will be called on overhead change.
|
||||
EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(1);
|
||||
// CallEncoder will be called on overhead change.
|
||||
EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(1);
|
||||
|
||||
const size_t audio_overhead_per_packet_bytes = 555;
|
||||
send_stream->OnOverheadChanged(audio_overhead_per_packet_bytes);
|
||||
EXPECT_EQ(audio_overhead_per_packet_bytes,
|
||||
send_stream->TestOnlyGetPerPacketOverheadBytes());
|
||||
const size_t audio_overhead_per_packet_bytes = 555;
|
||||
send_stream->OnOverheadChanged(audio_overhead_per_packet_bytes);
|
||||
EXPECT_EQ(audio_overhead_per_packet_bytes,
|
||||
send_stream->TestOnlyGetPerPacketOverheadBytes());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioSendStreamTest, OnAudioAndTransportOverheadChanged) {
|
||||
ConfigHelper helper(false, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
auto new_config = helper.config();
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(false, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
auto new_config = helper.config();
|
||||
|
||||
// CallEncoder will be called when each of overhead changes.
|
||||
EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(2);
|
||||
// CallEncoder will be called when each of overhead changes.
|
||||
EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(2);
|
||||
|
||||
const size_t transport_overhead_per_packet_bytes = 333;
|
||||
send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
|
||||
const size_t transport_overhead_per_packet_bytes = 333;
|
||||
send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
|
||||
|
||||
const size_t audio_overhead_per_packet_bytes = 555;
|
||||
send_stream->OnOverheadChanged(audio_overhead_per_packet_bytes);
|
||||
const size_t audio_overhead_per_packet_bytes = 555;
|
||||
send_stream->OnOverheadChanged(audio_overhead_per_packet_bytes);
|
||||
|
||||
EXPECT_EQ(
|
||||
transport_overhead_per_packet_bytes + audio_overhead_per_packet_bytes,
|
||||
send_stream->TestOnlyGetPerPacketOverheadBytes());
|
||||
EXPECT_EQ(
|
||||
transport_overhead_per_packet_bytes + audio_overhead_per_packet_bytes,
|
||||
send_stream->TestOnlyGetPerPacketOverheadBytes());
|
||||
}
|
||||
}
|
||||
|
||||
// Validates that reconfiguring the AudioSendStream with a Frame encryptor
|
||||
// correctly reconfigures on the object without crashing.
|
||||
TEST(AudioSendStreamTest, ReconfigureWithFrameEncryptor) {
|
||||
ConfigHelper helper(false, true);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
auto new_config = helper.config();
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(false, true, use_null_audio_processing);
|
||||
auto send_stream = helper.CreateAudioSendStream();
|
||||
auto new_config = helper.config();
|
||||
|
||||
rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_0(
|
||||
new rtc::RefCountedObject<MockFrameEncryptor>());
|
||||
new_config.frame_encryptor = mock_frame_encryptor_0;
|
||||
EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr))).Times(1);
|
||||
send_stream->Reconfigure(new_config);
|
||||
rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_0(
|
||||
new rtc::RefCountedObject<MockFrameEncryptor>());
|
||||
new_config.frame_encryptor = mock_frame_encryptor_0;
|
||||
EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr)))
|
||||
.Times(1);
|
||||
send_stream->Reconfigure(new_config);
|
||||
|
||||
// Not updating the frame encryptor shouldn't force it to reconfigure.
|
||||
EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(_)).Times(0);
|
||||
send_stream->Reconfigure(new_config);
|
||||
// Not updating the frame encryptor shouldn't force it to reconfigure.
|
||||
EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(_)).Times(0);
|
||||
send_stream->Reconfigure(new_config);
|
||||
|
||||
// Updating frame encryptor to a new object should force a call to the proxy.
|
||||
rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_1(
|
||||
new rtc::RefCountedObject<MockFrameEncryptor>());
|
||||
new_config.frame_encryptor = mock_frame_encryptor_1;
|
||||
new_config.crypto_options.sframe.require_frame_encryption = true;
|
||||
EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr))).Times(1);
|
||||
send_stream->Reconfigure(new_config);
|
||||
// Updating frame encryptor to a new object should force a call to the
|
||||
// proxy.
|
||||
rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_1(
|
||||
new rtc::RefCountedObject<MockFrameEncryptor>());
|
||||
new_config.frame_encryptor = mock_frame_encryptor_1;
|
||||
new_config.crypto_options.sframe.require_frame_encryption = true;
|
||||
EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr)))
|
||||
.Times(1);
|
||||
send_stream->Reconfigure(new_config);
|
||||
}
|
||||
}
|
||||
} // namespace test
|
||||
} // namespace webrtc
|
||||
|
||||
@ -41,7 +41,6 @@ AudioState::~AudioState() {
|
||||
}
|
||||
|
||||
AudioProcessing* AudioState::audio_processing() {
|
||||
RTC_DCHECK(config_.audio_processing);
|
||||
return config_.audio_processing.get();
|
||||
}
|
||||
|
||||
|
||||
@ -31,10 +31,14 @@ constexpr int kSampleRate = 16000;
|
||||
constexpr int kNumberOfChannels = 1;
|
||||
|
||||
struct ConfigHelper {
|
||||
ConfigHelper() : audio_mixer(AudioMixerImpl::Create()) {
|
||||
explicit ConfigHelper(bool use_null_audio_processing)
|
||||
: audio_mixer(AudioMixerImpl::Create()) {
|
||||
audio_state_config.audio_mixer = audio_mixer;
|
||||
audio_state_config.audio_processing =
|
||||
new rtc::RefCountedObject<testing::NiceMock<MockAudioProcessing>>();
|
||||
use_null_audio_processing
|
||||
? nullptr
|
||||
: new rtc::RefCountedObject<
|
||||
testing::NiceMock<MockAudioProcessing>>();
|
||||
audio_state_config.audio_device_module =
|
||||
new rtc::RefCountedObject<MockAudioDeviceModule>();
|
||||
}
|
||||
@ -88,162 +92,183 @@ std::vector<uint32_t> ComputeChannelLevels(AudioFrame* audio_frame) {
|
||||
} // namespace
|
||||
|
||||
TEST(AudioStateTest, Create) {
|
||||
ConfigHelper helper;
|
||||
auto audio_state = AudioState::Create(helper.config());
|
||||
EXPECT_TRUE(audio_state.get());
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(use_null_audio_processing);
|
||||
auto audio_state = AudioState::Create(helper.config());
|
||||
EXPECT_TRUE(audio_state.get());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioStateTest, ConstructDestruct) {
|
||||
ConfigHelper helper;
|
||||
rtc::scoped_refptr<internal::AudioState> audio_state(
|
||||
new rtc::RefCountedObject<internal::AudioState>(helper.config()));
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(use_null_audio_processing);
|
||||
rtc::scoped_refptr<internal::AudioState> audio_state(
|
||||
new rtc::RefCountedObject<internal::AudioState>(helper.config()));
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioStateTest, RecordedAudioArrivesAtSingleStream) {
|
||||
ConfigHelper helper;
|
||||
rtc::scoped_refptr<internal::AudioState> audio_state(
|
||||
new rtc::RefCountedObject<internal::AudioState>(helper.config()));
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(use_null_audio_processing);
|
||||
rtc::scoped_refptr<internal::AudioState> audio_state(
|
||||
new rtc::RefCountedObject<internal::AudioState>(helper.config()));
|
||||
|
||||
MockAudioSendStream stream;
|
||||
audio_state->AddSendingStream(&stream, 8000, 2);
|
||||
MockAudioSendStream stream;
|
||||
audio_state->AddSendingStream(&stream, 8000, 2);
|
||||
|
||||
EXPECT_CALL(
|
||||
stream,
|
||||
SendAudioDataForMock(::testing::AllOf(
|
||||
::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(8000)),
|
||||
::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(2u)))))
|
||||
.WillOnce(
|
||||
// Verify that channels are not swapped by default.
|
||||
::testing::Invoke([](AudioFrame* audio_frame) {
|
||||
auto levels = ComputeChannelLevels(audio_frame);
|
||||
EXPECT_LT(0u, levels[0]);
|
||||
EXPECT_EQ(0u, levels[1]);
|
||||
}));
|
||||
MockAudioProcessing* ap =
|
||||
static_cast<MockAudioProcessing*>(audio_state->audio_processing());
|
||||
EXPECT_CALL(*ap, set_stream_delay_ms(0));
|
||||
EXPECT_CALL(*ap, set_stream_key_pressed(false));
|
||||
EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher<int16_t*>(_)));
|
||||
EXPECT_CALL(
|
||||
stream,
|
||||
SendAudioDataForMock(::testing::AllOf(
|
||||
::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(8000)),
|
||||
::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(2u)))))
|
||||
.WillOnce(
|
||||
// Verify that channels are not swapped by default.
|
||||
::testing::Invoke([](AudioFrame* audio_frame) {
|
||||
auto levels = ComputeChannelLevels(audio_frame);
|
||||
EXPECT_LT(0u, levels[0]);
|
||||
EXPECT_EQ(0u, levels[1]);
|
||||
}));
|
||||
MockAudioProcessing* ap = use_null_audio_processing
|
||||
? nullptr
|
||||
: static_cast<MockAudioProcessing*>(
|
||||
audio_state->audio_processing());
|
||||
if (ap) {
|
||||
EXPECT_CALL(*ap, set_stream_delay_ms(0));
|
||||
EXPECT_CALL(*ap, set_stream_key_pressed(false));
|
||||
EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher<int16_t*>(_)));
|
||||
}
|
||||
|
||||
constexpr int kSampleRate = 16000;
|
||||
constexpr size_t kNumChannels = 2;
|
||||
auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
|
||||
uint32_t new_mic_level = 667;
|
||||
audio_state->audio_transport()->RecordedDataIsAvailable(
|
||||
&audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
|
||||
kSampleRate, 0, 0, 0, false, new_mic_level);
|
||||
EXPECT_EQ(667u, new_mic_level);
|
||||
constexpr int kSampleRate = 16000;
|
||||
constexpr size_t kNumChannels = 2;
|
||||
auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
|
||||
uint32_t new_mic_level = 667;
|
||||
audio_state->audio_transport()->RecordedDataIsAvailable(
|
||||
&audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
|
||||
kSampleRate, 0, 0, 0, false, new_mic_level);
|
||||
EXPECT_EQ(667u, new_mic_level);
|
||||
|
||||
audio_state->RemoveSendingStream(&stream);
|
||||
audio_state->RemoveSendingStream(&stream);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioStateTest, RecordedAudioArrivesAtMultipleStreams) {
|
||||
ConfigHelper helper;
|
||||
rtc::scoped_refptr<internal::AudioState> audio_state(
|
||||
new rtc::RefCountedObject<internal::AudioState>(helper.config()));
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(use_null_audio_processing);
|
||||
rtc::scoped_refptr<internal::AudioState> audio_state(
|
||||
new rtc::RefCountedObject<internal::AudioState>(helper.config()));
|
||||
|
||||
MockAudioSendStream stream_1;
|
||||
MockAudioSendStream stream_2;
|
||||
audio_state->AddSendingStream(&stream_1, 8001, 2);
|
||||
audio_state->AddSendingStream(&stream_2, 32000, 1);
|
||||
MockAudioSendStream stream_1;
|
||||
MockAudioSendStream stream_2;
|
||||
audio_state->AddSendingStream(&stream_1, 8001, 2);
|
||||
audio_state->AddSendingStream(&stream_2, 32000, 1);
|
||||
|
||||
EXPECT_CALL(
|
||||
stream_1,
|
||||
SendAudioDataForMock(::testing::AllOf(
|
||||
::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(16000)),
|
||||
::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u)))))
|
||||
.WillOnce(
|
||||
// Verify that there is output signal.
|
||||
::testing::Invoke([](AudioFrame* audio_frame) {
|
||||
auto levels = ComputeChannelLevels(audio_frame);
|
||||
EXPECT_LT(0u, levels[0]);
|
||||
}));
|
||||
EXPECT_CALL(
|
||||
stream_2,
|
||||
SendAudioDataForMock(::testing::AllOf(
|
||||
::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(16000)),
|
||||
::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u)))))
|
||||
.WillOnce(
|
||||
// Verify that there is output signal.
|
||||
::testing::Invoke([](AudioFrame* audio_frame) {
|
||||
auto levels = ComputeChannelLevels(audio_frame);
|
||||
EXPECT_LT(0u, levels[0]);
|
||||
}));
|
||||
MockAudioProcessing* ap =
|
||||
static_cast<MockAudioProcessing*>(audio_state->audio_processing());
|
||||
EXPECT_CALL(*ap, set_stream_delay_ms(5));
|
||||
EXPECT_CALL(*ap, set_stream_key_pressed(true));
|
||||
EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher<int16_t*>(_)));
|
||||
EXPECT_CALL(
|
||||
stream_1,
|
||||
SendAudioDataForMock(::testing::AllOf(
|
||||
::testing::Field(&AudioFrame::sample_rate_hz_,
|
||||
::testing::Eq(16000)),
|
||||
::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u)))))
|
||||
.WillOnce(
|
||||
// Verify that there is output signal.
|
||||
::testing::Invoke([](AudioFrame* audio_frame) {
|
||||
auto levels = ComputeChannelLevels(audio_frame);
|
||||
EXPECT_LT(0u, levels[0]);
|
||||
}));
|
||||
EXPECT_CALL(
|
||||
stream_2,
|
||||
SendAudioDataForMock(::testing::AllOf(
|
||||
::testing::Field(&AudioFrame::sample_rate_hz_,
|
||||
::testing::Eq(16000)),
|
||||
::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u)))))
|
||||
.WillOnce(
|
||||
// Verify that there is output signal.
|
||||
::testing::Invoke([](AudioFrame* audio_frame) {
|
||||
auto levels = ComputeChannelLevels(audio_frame);
|
||||
EXPECT_LT(0u, levels[0]);
|
||||
}));
|
||||
MockAudioProcessing* ap =
|
||||
static_cast<MockAudioProcessing*>(audio_state->audio_processing());
|
||||
if (ap) {
|
||||
EXPECT_CALL(*ap, set_stream_delay_ms(5));
|
||||
EXPECT_CALL(*ap, set_stream_key_pressed(true));
|
||||
EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher<int16_t*>(_)));
|
||||
}
|
||||
|
||||
constexpr int kSampleRate = 16000;
|
||||
constexpr size_t kNumChannels = 1;
|
||||
auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
|
||||
uint32_t new_mic_level = 667;
|
||||
audio_state->audio_transport()->RecordedDataIsAvailable(
|
||||
&audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
|
||||
kSampleRate, 5, 0, 0, true, new_mic_level);
|
||||
EXPECT_EQ(667u, new_mic_level);
|
||||
constexpr int kSampleRate = 16000;
|
||||
constexpr size_t kNumChannels = 1;
|
||||
auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
|
||||
uint32_t new_mic_level = 667;
|
||||
audio_state->audio_transport()->RecordedDataIsAvailable(
|
||||
&audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
|
||||
kSampleRate, 5, 0, 0, true, new_mic_level);
|
||||
EXPECT_EQ(667u, new_mic_level);
|
||||
|
||||
audio_state->RemoveSendingStream(&stream_1);
|
||||
audio_state->RemoveSendingStream(&stream_2);
|
||||
audio_state->RemoveSendingStream(&stream_1);
|
||||
audio_state->RemoveSendingStream(&stream_2);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioStateTest, EnableChannelSwap) {
|
||||
constexpr int kSampleRate = 16000;
|
||||
constexpr size_t kNumChannels = 2;
|
||||
|
||||
ConfigHelper helper;
|
||||
rtc::scoped_refptr<internal::AudioState> audio_state(
|
||||
new rtc::RefCountedObject<internal::AudioState>(helper.config()));
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(use_null_audio_processing);
|
||||
rtc::scoped_refptr<internal::AudioState> audio_state(
|
||||
new rtc::RefCountedObject<internal::AudioState>(helper.config()));
|
||||
|
||||
audio_state->SetStereoChannelSwapping(true);
|
||||
audio_state->SetStereoChannelSwapping(true);
|
||||
|
||||
MockAudioSendStream stream;
|
||||
audio_state->AddSendingStream(&stream, kSampleRate, kNumChannels);
|
||||
MockAudioSendStream stream;
|
||||
audio_state->AddSendingStream(&stream, kSampleRate, kNumChannels);
|
||||
|
||||
EXPECT_CALL(stream, SendAudioDataForMock(_))
|
||||
.WillOnce(
|
||||
// Verify that channels are swapped.
|
||||
::testing::Invoke([](AudioFrame* audio_frame) {
|
||||
auto levels = ComputeChannelLevels(audio_frame);
|
||||
EXPECT_EQ(0u, levels[0]);
|
||||
EXPECT_LT(0u, levels[1]);
|
||||
}));
|
||||
EXPECT_CALL(stream, SendAudioDataForMock(_))
|
||||
.WillOnce(
|
||||
// Verify that channels are swapped.
|
||||
::testing::Invoke([](AudioFrame* audio_frame) {
|
||||
auto levels = ComputeChannelLevels(audio_frame);
|
||||
EXPECT_EQ(0u, levels[0]);
|
||||
EXPECT_LT(0u, levels[1]);
|
||||
}));
|
||||
|
||||
auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
|
||||
uint32_t new_mic_level = 667;
|
||||
audio_state->audio_transport()->RecordedDataIsAvailable(
|
||||
&audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
|
||||
kSampleRate, 0, 0, 0, false, new_mic_level);
|
||||
EXPECT_EQ(667u, new_mic_level);
|
||||
auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
|
||||
uint32_t new_mic_level = 667;
|
||||
audio_state->audio_transport()->RecordedDataIsAvailable(
|
||||
&audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
|
||||
kSampleRate, 0, 0, 0, false, new_mic_level);
|
||||
EXPECT_EQ(667u, new_mic_level);
|
||||
|
||||
audio_state->RemoveSendingStream(&stream);
|
||||
audio_state->RemoveSendingStream(&stream);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AudioStateTest,
|
||||
QueryingTransportForAudioShouldResultInGetAudioCallOnMixerSource) {
|
||||
ConfigHelper helper;
|
||||
auto audio_state = AudioState::Create(helper.config());
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
ConfigHelper helper(use_null_audio_processing);
|
||||
auto audio_state = AudioState::Create(helper.config());
|
||||
|
||||
FakeAudioSource fake_source;
|
||||
helper.mixer()->AddSource(&fake_source);
|
||||
FakeAudioSource fake_source;
|
||||
helper.mixer()->AddSource(&fake_source);
|
||||
|
||||
EXPECT_CALL(fake_source, GetAudioFrameWithInfo(_, _))
|
||||
.WillOnce(
|
||||
::testing::Invoke([](int sample_rate_hz, AudioFrame* audio_frame) {
|
||||
audio_frame->sample_rate_hz_ = sample_rate_hz;
|
||||
audio_frame->samples_per_channel_ = sample_rate_hz / 100;
|
||||
audio_frame->num_channels_ = kNumberOfChannels;
|
||||
return AudioMixer::Source::AudioFrameInfo::kNormal;
|
||||
}));
|
||||
EXPECT_CALL(fake_source, GetAudioFrameWithInfo(_, _))
|
||||
.WillOnce(
|
||||
::testing::Invoke([](int sample_rate_hz, AudioFrame* audio_frame) {
|
||||
audio_frame->sample_rate_hz_ = sample_rate_hz;
|
||||
audio_frame->samples_per_channel_ = sample_rate_hz / 100;
|
||||
audio_frame->num_channels_ = kNumberOfChannels;
|
||||
return AudioMixer::Source::AudioFrameInfo::kNormal;
|
||||
}));
|
||||
|
||||
int16_t audio_buffer[kSampleRate / 100 * kNumberOfChannels];
|
||||
size_t n_samples_out;
|
||||
int64_t elapsed_time_ms;
|
||||
int64_t ntp_time_ms;
|
||||
audio_state->audio_transport()->NeedMorePlayData(
|
||||
kSampleRate / 100, kNumberOfChannels * 2, kNumberOfChannels, kSampleRate,
|
||||
audio_buffer, n_samples_out, &elapsed_time_ms, &ntp_time_ms);
|
||||
int16_t audio_buffer[kSampleRate / 100 * kNumberOfChannels];
|
||||
size_t n_samples_out;
|
||||
int64_t elapsed_time_ms;
|
||||
int64_t ntp_time_ms;
|
||||
audio_state->audio_transport()->NeedMorePlayData(
|
||||
kSampleRate / 100, kNumberOfChannels * 2, kNumberOfChannels,
|
||||
kSampleRate, audio_buffer, n_samples_out, &elapsed_time_ms,
|
||||
&ntp_time_ms);
|
||||
}
|
||||
}
|
||||
} // namespace test
|
||||
} // namespace webrtc
|
||||
|
||||
@ -49,13 +49,15 @@ void ProcessCaptureFrame(uint32_t delay_ms,
|
||||
bool swap_stereo_channels,
|
||||
AudioProcessing* audio_processing,
|
||||
AudioFrame* audio_frame) {
|
||||
RTC_DCHECK(audio_processing);
|
||||
RTC_DCHECK(audio_frame);
|
||||
audio_processing->set_stream_delay_ms(delay_ms);
|
||||
audio_processing->set_stream_key_pressed(key_pressed);
|
||||
int error = ProcessAudioFrame(audio_processing, audio_frame);
|
||||
if (audio_processing) {
|
||||
audio_processing->set_stream_delay_ms(delay_ms);
|
||||
audio_processing->set_stream_key_pressed(key_pressed);
|
||||
int error = ProcessAudioFrame(audio_processing, audio_frame);
|
||||
|
||||
RTC_DCHECK_EQ(0, error) << "ProcessStream() error: " << error;
|
||||
}
|
||||
|
||||
RTC_DCHECK_EQ(0, error) << "ProcessStream() error: " << error;
|
||||
if (swap_stereo_channels) {
|
||||
AudioFrameOperations::SwapStereoChannels(audio_frame);
|
||||
}
|
||||
@ -85,7 +87,6 @@ AudioTransportImpl::AudioTransportImpl(AudioMixer* mixer,
|
||||
AudioProcessing* audio_processing)
|
||||
: audio_processing_(audio_processing), mixer_(mixer) {
|
||||
RTC_DCHECK(mixer);
|
||||
RTC_DCHECK(audio_processing);
|
||||
}
|
||||
|
||||
AudioTransportImpl::~AudioTransportImpl() {}
|
||||
@ -137,7 +138,8 @@ int32_t AudioTransportImpl::RecordedDataIsAvailable(
|
||||
// if we're using this feature or not.
|
||||
// TODO(solenberg): GetConfig() takes a lock. Work around that.
|
||||
bool typing_detected = false;
|
||||
if (audio_processing_->GetConfig().voice_detection.enabled) {
|
||||
if (audio_processing_ &&
|
||||
audio_processing_->GetConfig().voice_detection.enabled) {
|
||||
if (audio_frame->vad_activity_ != AudioFrame::kVadUnknown) {
|
||||
bool vad_active = audio_frame->vad_activity_ == AudioFrame::kVadActive;
|
||||
typing_detected = typing_detection_.Process(key_pressed, vad_active);
|
||||
@ -192,8 +194,11 @@ int32_t AudioTransportImpl::NeedMorePlayData(const size_t nSamples,
|
||||
*elapsed_time_ms = mixed_frame_.elapsed_time_ms_;
|
||||
*ntp_time_ms = mixed_frame_.ntp_time_ms_;
|
||||
|
||||
const auto error = ProcessReverseAudioFrame(audio_processing_, &mixed_frame_);
|
||||
RTC_DCHECK_EQ(error, AudioProcessing::kNoError);
|
||||
if (audio_processing_) {
|
||||
const auto error =
|
||||
ProcessReverseAudioFrame(audio_processing_, &mixed_frame_);
|
||||
RTC_DCHECK_EQ(error, AudioProcessing::kNoError);
|
||||
}
|
||||
|
||||
nSamplesOut = Resample(mixed_frame_, samplesPerSec, &render_resampler_,
|
||||
static_cast<int16_t*>(audioSamples));
|
||||
|
||||
@ -35,13 +35,15 @@
|
||||
namespace {
|
||||
|
||||
struct CallHelper {
|
||||
CallHelper() {
|
||||
explicit CallHelper(bool use_null_audio_processing) {
|
||||
task_queue_factory_ = webrtc::CreateDefaultTaskQueueFactory();
|
||||
webrtc::AudioState::Config audio_state_config;
|
||||
audio_state_config.audio_mixer =
|
||||
new rtc::RefCountedObject<webrtc::test::MockAudioMixer>();
|
||||
audio_state_config.audio_processing =
|
||||
new rtc::RefCountedObject<webrtc::test::MockAudioProcessing>();
|
||||
use_null_audio_processing
|
||||
? nullptr
|
||||
: new rtc::RefCountedObject<webrtc::test::MockAudioProcessing>();
|
||||
audio_state_config.audio_device_module =
|
||||
new rtc::RefCountedObject<webrtc::test::MockAudioDeviceModule>();
|
||||
webrtc::Call::Config config(&event_log_);
|
||||
@ -64,236 +66,261 @@ struct CallHelper {
|
||||
namespace webrtc {
|
||||
|
||||
TEST(CallTest, ConstructDestruct) {
|
||||
CallHelper call;
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
CallHelper call(use_null_audio_processing);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(CallTest, CreateDestroy_AudioSendStream) {
|
||||
CallHelper call;
|
||||
MockTransport send_transport;
|
||||
AudioSendStream::Config config(&send_transport);
|
||||
config.rtp.ssrc = 42;
|
||||
AudioSendStream* stream = call->CreateAudioSendStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
call->DestroyAudioSendStream(stream);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
CallHelper call(use_null_audio_processing);
|
||||
MockTransport send_transport;
|
||||
AudioSendStream::Config config(&send_transport);
|
||||
config.rtp.ssrc = 42;
|
||||
AudioSendStream* stream = call->CreateAudioSendStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
call->DestroyAudioSendStream(stream);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(CallTest, CreateDestroy_AudioReceiveStream) {
|
||||
CallHelper call;
|
||||
AudioReceiveStream::Config config;
|
||||
MockTransport rtcp_send_transport;
|
||||
config.rtp.remote_ssrc = 42;
|
||||
config.rtcp_send_transport = &rtcp_send_transport;
|
||||
config.decoder_factory =
|
||||
new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
|
||||
AudioReceiveStream* stream = call->CreateAudioReceiveStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
call->DestroyAudioReceiveStream(stream);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
CallHelper call(use_null_audio_processing);
|
||||
AudioReceiveStream::Config config;
|
||||
MockTransport rtcp_send_transport;
|
||||
config.rtp.remote_ssrc = 42;
|
||||
config.rtcp_send_transport = &rtcp_send_transport;
|
||||
config.decoder_factory =
|
||||
new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
|
||||
AudioReceiveStream* stream = call->CreateAudioReceiveStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
call->DestroyAudioReceiveStream(stream);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(CallTest, CreateDestroy_AudioSendStreams) {
|
||||
CallHelper call;
|
||||
MockTransport send_transport;
|
||||
AudioSendStream::Config config(&send_transport);
|
||||
std::list<AudioSendStream*> streams;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
|
||||
config.rtp.ssrc = ssrc;
|
||||
AudioSendStream* stream = call->CreateAudioSendStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
if (ssrc & 1) {
|
||||
streams.push_back(stream);
|
||||
} else {
|
||||
streams.push_front(stream);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
CallHelper call(use_null_audio_processing);
|
||||
MockTransport send_transport;
|
||||
AudioSendStream::Config config(&send_transport);
|
||||
std::list<AudioSendStream*> streams;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
|
||||
config.rtp.ssrc = ssrc;
|
||||
AudioSendStream* stream = call->CreateAudioSendStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
if (ssrc & 1) {
|
||||
streams.push_back(stream);
|
||||
} else {
|
||||
streams.push_front(stream);
|
||||
}
|
||||
}
|
||||
for (auto s : streams) {
|
||||
call->DestroyAudioSendStream(s);
|
||||
}
|
||||
streams.clear();
|
||||
}
|
||||
for (auto s : streams) {
|
||||
call->DestroyAudioSendStream(s);
|
||||
}
|
||||
streams.clear();
|
||||
}
|
||||
}
|
||||
|
||||
TEST(CallTest, CreateDestroy_AudioReceiveStreams) {
|
||||
CallHelper call;
|
||||
AudioReceiveStream::Config config;
|
||||
MockTransport rtcp_send_transport;
|
||||
config.rtcp_send_transport = &rtcp_send_transport;
|
||||
config.decoder_factory =
|
||||
new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
|
||||
std::list<AudioReceiveStream*> streams;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
|
||||
config.rtp.remote_ssrc = ssrc;
|
||||
AudioReceiveStream* stream = call->CreateAudioReceiveStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
if (ssrc & 1) {
|
||||
streams.push_back(stream);
|
||||
} else {
|
||||
streams.push_front(stream);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
CallHelper call(use_null_audio_processing);
|
||||
AudioReceiveStream::Config config;
|
||||
MockTransport rtcp_send_transport;
|
||||
config.rtcp_send_transport = &rtcp_send_transport;
|
||||
config.decoder_factory =
|
||||
new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
|
||||
std::list<AudioReceiveStream*> streams;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
|
||||
config.rtp.remote_ssrc = ssrc;
|
||||
AudioReceiveStream* stream = call->CreateAudioReceiveStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
if (ssrc & 1) {
|
||||
streams.push_back(stream);
|
||||
} else {
|
||||
streams.push_front(stream);
|
||||
}
|
||||
}
|
||||
for (auto s : streams) {
|
||||
call->DestroyAudioReceiveStream(s);
|
||||
}
|
||||
streams.clear();
|
||||
}
|
||||
for (auto s : streams) {
|
||||
call->DestroyAudioReceiveStream(s);
|
||||
}
|
||||
streams.clear();
|
||||
}
|
||||
}
|
||||
|
||||
TEST(CallTest, CreateDestroy_AssociateAudioSendReceiveStreams_RecvFirst) {
|
||||
CallHelper call;
|
||||
AudioReceiveStream::Config recv_config;
|
||||
MockTransport rtcp_send_transport;
|
||||
recv_config.rtp.remote_ssrc = 42;
|
||||
recv_config.rtp.local_ssrc = 777;
|
||||
recv_config.rtcp_send_transport = &rtcp_send_transport;
|
||||
recv_config.decoder_factory =
|
||||
new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
|
||||
AudioReceiveStream* recv_stream = call->CreateAudioReceiveStream(recv_config);
|
||||
EXPECT_NE(recv_stream, nullptr);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
CallHelper call(use_null_audio_processing);
|
||||
AudioReceiveStream::Config recv_config;
|
||||
MockTransport rtcp_send_transport;
|
||||
recv_config.rtp.remote_ssrc = 42;
|
||||
recv_config.rtp.local_ssrc = 777;
|
||||
recv_config.rtcp_send_transport = &rtcp_send_transport;
|
||||
recv_config.decoder_factory =
|
||||
new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
|
||||
AudioReceiveStream* recv_stream =
|
||||
call->CreateAudioReceiveStream(recv_config);
|
||||
EXPECT_NE(recv_stream, nullptr);
|
||||
|
||||
MockTransport send_transport;
|
||||
AudioSendStream::Config send_config(&send_transport);
|
||||
send_config.rtp.ssrc = 777;
|
||||
AudioSendStream* send_stream = call->CreateAudioSendStream(send_config);
|
||||
EXPECT_NE(send_stream, nullptr);
|
||||
MockTransport send_transport;
|
||||
AudioSendStream::Config send_config(&send_transport);
|
||||
send_config.rtp.ssrc = 777;
|
||||
AudioSendStream* send_stream = call->CreateAudioSendStream(send_config);
|
||||
EXPECT_NE(send_stream, nullptr);
|
||||
|
||||
internal::AudioReceiveStream* internal_recv_stream =
|
||||
static_cast<internal::AudioReceiveStream*>(recv_stream);
|
||||
EXPECT_EQ(send_stream,
|
||||
internal_recv_stream->GetAssociatedSendStreamForTesting());
|
||||
internal::AudioReceiveStream* internal_recv_stream =
|
||||
static_cast<internal::AudioReceiveStream*>(recv_stream);
|
||||
EXPECT_EQ(send_stream,
|
||||
internal_recv_stream->GetAssociatedSendStreamForTesting());
|
||||
|
||||
call->DestroyAudioSendStream(send_stream);
|
||||
EXPECT_EQ(nullptr, internal_recv_stream->GetAssociatedSendStreamForTesting());
|
||||
call->DestroyAudioSendStream(send_stream);
|
||||
EXPECT_EQ(nullptr,
|
||||
internal_recv_stream->GetAssociatedSendStreamForTesting());
|
||||
|
||||
call->DestroyAudioReceiveStream(recv_stream);
|
||||
call->DestroyAudioReceiveStream(recv_stream);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(CallTest, CreateDestroy_AssociateAudioSendReceiveStreams_SendFirst) {
|
||||
CallHelper call;
|
||||
MockTransport send_transport;
|
||||
AudioSendStream::Config send_config(&send_transport);
|
||||
send_config.rtp.ssrc = 777;
|
||||
AudioSendStream* send_stream = call->CreateAudioSendStream(send_config);
|
||||
EXPECT_NE(send_stream, nullptr);
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
CallHelper call(use_null_audio_processing);
|
||||
MockTransport send_transport;
|
||||
AudioSendStream::Config send_config(&send_transport);
|
||||
send_config.rtp.ssrc = 777;
|
||||
AudioSendStream* send_stream = call->CreateAudioSendStream(send_config);
|
||||
EXPECT_NE(send_stream, nullptr);
|
||||
|
||||
AudioReceiveStream::Config recv_config;
|
||||
MockTransport rtcp_send_transport;
|
||||
recv_config.rtp.remote_ssrc = 42;
|
||||
recv_config.rtp.local_ssrc = 777;
|
||||
recv_config.rtcp_send_transport = &rtcp_send_transport;
|
||||
recv_config.decoder_factory =
|
||||
new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
|
||||
AudioReceiveStream* recv_stream = call->CreateAudioReceiveStream(recv_config);
|
||||
EXPECT_NE(recv_stream, nullptr);
|
||||
AudioReceiveStream::Config recv_config;
|
||||
MockTransport rtcp_send_transport;
|
||||
recv_config.rtp.remote_ssrc = 42;
|
||||
recv_config.rtp.local_ssrc = 777;
|
||||
recv_config.rtcp_send_transport = &rtcp_send_transport;
|
||||
recv_config.decoder_factory =
|
||||
new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
|
||||
AudioReceiveStream* recv_stream =
|
||||
call->CreateAudioReceiveStream(recv_config);
|
||||
EXPECT_NE(recv_stream, nullptr);
|
||||
|
||||
internal::AudioReceiveStream* internal_recv_stream =
|
||||
static_cast<internal::AudioReceiveStream*>(recv_stream);
|
||||
EXPECT_EQ(send_stream,
|
||||
internal_recv_stream->GetAssociatedSendStreamForTesting());
|
||||
internal::AudioReceiveStream* internal_recv_stream =
|
||||
static_cast<internal::AudioReceiveStream*>(recv_stream);
|
||||
EXPECT_EQ(send_stream,
|
||||
internal_recv_stream->GetAssociatedSendStreamForTesting());
|
||||
|
||||
call->DestroyAudioReceiveStream(recv_stream);
|
||||
call->DestroyAudioReceiveStream(recv_stream);
|
||||
|
||||
call->DestroyAudioSendStream(send_stream);
|
||||
call->DestroyAudioSendStream(send_stream);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(CallTest, CreateDestroy_FlexfecReceiveStream) {
|
||||
CallHelper call;
|
||||
MockTransport rtcp_send_transport;
|
||||
FlexfecReceiveStream::Config config(&rtcp_send_transport);
|
||||
config.payload_type = 118;
|
||||
config.remote_ssrc = 38837212;
|
||||
config.protected_media_ssrcs = {27273};
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
CallHelper call(use_null_audio_processing);
|
||||
MockTransport rtcp_send_transport;
|
||||
FlexfecReceiveStream::Config config(&rtcp_send_transport);
|
||||
config.payload_type = 118;
|
||||
config.remote_ssrc = 38837212;
|
||||
config.protected_media_ssrcs = {27273};
|
||||
|
||||
FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
call->DestroyFlexfecReceiveStream(stream);
|
||||
FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
call->DestroyFlexfecReceiveStream(stream);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(CallTest, CreateDestroy_FlexfecReceiveStreams) {
|
||||
CallHelper call;
|
||||
MockTransport rtcp_send_transport;
|
||||
FlexfecReceiveStream::Config config(&rtcp_send_transport);
|
||||
config.payload_type = 118;
|
||||
std::list<FlexfecReceiveStream*> streams;
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
CallHelper call(use_null_audio_processing);
|
||||
MockTransport rtcp_send_transport;
|
||||
FlexfecReceiveStream::Config config(&rtcp_send_transport);
|
||||
config.payload_type = 118;
|
||||
std::list<FlexfecReceiveStream*> streams;
|
||||
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
|
||||
config.remote_ssrc = ssrc;
|
||||
config.protected_media_ssrcs = {ssrc + 1};
|
||||
FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
if (ssrc & 1) {
|
||||
streams.push_back(stream);
|
||||
} else {
|
||||
streams.push_front(stream);
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
|
||||
config.remote_ssrc = ssrc;
|
||||
config.protected_media_ssrcs = {ssrc + 1};
|
||||
FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
if (ssrc & 1) {
|
||||
streams.push_back(stream);
|
||||
} else {
|
||||
streams.push_front(stream);
|
||||
}
|
||||
}
|
||||
for (auto s : streams) {
|
||||
call->DestroyFlexfecReceiveStream(s);
|
||||
}
|
||||
streams.clear();
|
||||
}
|
||||
for (auto s : streams) {
|
||||
call->DestroyFlexfecReceiveStream(s);
|
||||
}
|
||||
streams.clear();
|
||||
}
|
||||
}
|
||||
|
||||
TEST(CallTest, MultipleFlexfecReceiveStreamsProtectingSingleVideoStream) {
|
||||
CallHelper call;
|
||||
MockTransport rtcp_send_transport;
|
||||
FlexfecReceiveStream::Config config(&rtcp_send_transport);
|
||||
config.payload_type = 118;
|
||||
config.protected_media_ssrcs = {1324234};
|
||||
FlexfecReceiveStream* stream;
|
||||
std::list<FlexfecReceiveStream*> streams;
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
CallHelper call(use_null_audio_processing);
|
||||
MockTransport rtcp_send_transport;
|
||||
FlexfecReceiveStream::Config config(&rtcp_send_transport);
|
||||
config.payload_type = 118;
|
||||
config.protected_media_ssrcs = {1324234};
|
||||
FlexfecReceiveStream* stream;
|
||||
std::list<FlexfecReceiveStream*> streams;
|
||||
|
||||
config.remote_ssrc = 838383;
|
||||
stream = call->CreateFlexfecReceiveStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
streams.push_back(stream);
|
||||
config.remote_ssrc = 838383;
|
||||
stream = call->CreateFlexfecReceiveStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
streams.push_back(stream);
|
||||
|
||||
config.remote_ssrc = 424993;
|
||||
stream = call->CreateFlexfecReceiveStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
streams.push_back(stream);
|
||||
config.remote_ssrc = 424993;
|
||||
stream = call->CreateFlexfecReceiveStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
streams.push_back(stream);
|
||||
|
||||
config.remote_ssrc = 99383;
|
||||
stream = call->CreateFlexfecReceiveStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
streams.push_back(stream);
|
||||
config.remote_ssrc = 99383;
|
||||
stream = call->CreateFlexfecReceiveStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
streams.push_back(stream);
|
||||
|
||||
config.remote_ssrc = 5548;
|
||||
stream = call->CreateFlexfecReceiveStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
streams.push_back(stream);
|
||||
config.remote_ssrc = 5548;
|
||||
stream = call->CreateFlexfecReceiveStream(config);
|
||||
EXPECT_NE(stream, nullptr);
|
||||
streams.push_back(stream);
|
||||
|
||||
for (auto s : streams) {
|
||||
call->DestroyFlexfecReceiveStream(s);
|
||||
for (auto s : streams) {
|
||||
call->DestroyFlexfecReceiveStream(s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(CallTest, RecreatingAudioStreamWithSameSsrcReusesRtpState) {
|
||||
constexpr uint32_t kSSRC = 12345;
|
||||
CallHelper call;
|
||||
for (bool use_null_audio_processing : {false, true}) {
|
||||
CallHelper call(use_null_audio_processing);
|
||||
|
||||
auto create_stream_and_get_rtp_state = [&](uint32_t ssrc) {
|
||||
MockTransport send_transport;
|
||||
AudioSendStream::Config config(&send_transport);
|
||||
config.rtp.ssrc = ssrc;
|
||||
AudioSendStream* stream = call->CreateAudioSendStream(config);
|
||||
const RtpState rtp_state =
|
||||
static_cast<internal::AudioSendStream*>(stream)->GetRtpState();
|
||||
call->DestroyAudioSendStream(stream);
|
||||
return rtp_state;
|
||||
};
|
||||
auto create_stream_and_get_rtp_state = [&](uint32_t ssrc) {
|
||||
MockTransport send_transport;
|
||||
AudioSendStream::Config config(&send_transport);
|
||||
config.rtp.ssrc = ssrc;
|
||||
AudioSendStream* stream = call->CreateAudioSendStream(config);
|
||||
const RtpState rtp_state =
|
||||
static_cast<internal::AudioSendStream*>(stream)->GetRtpState();
|
||||
call->DestroyAudioSendStream(stream);
|
||||
return rtp_state;
|
||||
};
|
||||
|
||||
const RtpState rtp_state1 = create_stream_and_get_rtp_state(kSSRC);
|
||||
const RtpState rtp_state2 = create_stream_and_get_rtp_state(kSSRC);
|
||||
const RtpState rtp_state1 = create_stream_and_get_rtp_state(kSSRC);
|
||||
const RtpState rtp_state2 = create_stream_and_get_rtp_state(kSSRC);
|
||||
|
||||
EXPECT_EQ(rtp_state1.sequence_number, rtp_state2.sequence_number);
|
||||
EXPECT_EQ(rtp_state1.start_timestamp, rtp_state2.start_timestamp);
|
||||
EXPECT_EQ(rtp_state1.timestamp, rtp_state2.timestamp);
|
||||
EXPECT_EQ(rtp_state1.capture_time_ms, rtp_state2.capture_time_ms);
|
||||
EXPECT_EQ(rtp_state1.last_timestamp_time_ms,
|
||||
rtp_state2.last_timestamp_time_ms);
|
||||
EXPECT_EQ(rtp_state1.media_has_been_sent, rtp_state2.media_has_been_sent);
|
||||
EXPECT_EQ(rtp_state1.sequence_number, rtp_state2.sequence_number);
|
||||
EXPECT_EQ(rtp_state1.start_timestamp, rtp_state2.start_timestamp);
|
||||
EXPECT_EQ(rtp_state1.timestamp, rtp_state2.timestamp);
|
||||
EXPECT_EQ(rtp_state1.capture_time_ms, rtp_state2.capture_time_ms);
|
||||
EXPECT_EQ(rtp_state1.last_timestamp_time_ms,
|
||||
rtp_state2.last_timestamp_time_ms);
|
||||
EXPECT_EQ(rtp_state1.media_has_been_sent, rtp_state2.media_has_been_sent);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -206,7 +206,6 @@ WebRtcVoiceEngine::WebRtcVoiceEngine(
|
||||
RTC_LOG(LS_INFO) << "WebRtcVoiceEngine::WebRtcVoiceEngine";
|
||||
RTC_DCHECK(decoder_factory);
|
||||
RTC_DCHECK(encoder_factory);
|
||||
RTC_DCHECK(audio_processing);
|
||||
// The rest of our initialization will happen in Init.
|
||||
}
|
||||
|
||||
@ -458,6 +457,14 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
|
||||
*options.audio_jitter_buffer_enable_rtx_handling;
|
||||
}
|
||||
|
||||
webrtc::AudioProcessing* ap = apm();
|
||||
if (!ap) {
|
||||
RTC_LOG(LS_INFO)
|
||||
<< "No audio processing module present. No software-provided effects "
|
||||
"(AEC, NS, AGC, ...) are activated";
|
||||
return true;
|
||||
}
|
||||
|
||||
webrtc::Config config;
|
||||
|
||||
if (options.experimental_ns) {
|
||||
@ -469,7 +476,7 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
|
||||
new webrtc::ExperimentalNs(*experimental_ns_));
|
||||
}
|
||||
|
||||
webrtc::AudioProcessing::Config apm_config = apm()->GetConfig();
|
||||
webrtc::AudioProcessing::Config apm_config = ap->GetConfig();
|
||||
|
||||
if (options.echo_cancellation) {
|
||||
apm_config.echo_canceller.enabled = *options.echo_cancellation;
|
||||
@ -524,8 +531,8 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
|
||||
apm_config.voice_detection.enabled = *options.typing_detection;
|
||||
}
|
||||
|
||||
apm()->SetExtraOptions(config);
|
||||
apm()->ApplyConfig(apm_config);
|
||||
ap->SetExtraOptions(config);
|
||||
ap->ApplyConfig(apm_config);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -571,18 +578,34 @@ void WebRtcVoiceEngine::UnregisterChannel(WebRtcVoiceMediaChannel* channel) {
|
||||
bool WebRtcVoiceEngine::StartAecDump(webrtc::FileWrapper file,
|
||||
int64_t max_size_bytes) {
|
||||
RTC_DCHECK(worker_thread_checker_.IsCurrent());
|
||||
|
||||
webrtc::AudioProcessing* ap = apm();
|
||||
if (!ap) {
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Attempting to start aecdump when no audio processing module is "
|
||||
"present, hence no aecdump is started.";
|
||||
return false;
|
||||
}
|
||||
|
||||
auto aec_dump = webrtc::AecDumpFactory::Create(
|
||||
std::move(file), max_size_bytes, low_priority_worker_queue_.get());
|
||||
if (!aec_dump) {
|
||||
return false;
|
||||
}
|
||||
apm()->AttachAecDump(std::move(aec_dump));
|
||||
|
||||
ap->AttachAecDump(std::move(aec_dump));
|
||||
return true;
|
||||
}
|
||||
|
||||
void WebRtcVoiceEngine::StopAecDump() {
|
||||
RTC_DCHECK(worker_thread_checker_.IsCurrent());
|
||||
apm()->DetachAecDump();
|
||||
webrtc::AudioProcessing* ap = apm();
|
||||
if (ap) {
|
||||
ap->DetachAecDump();
|
||||
} else {
|
||||
RTC_LOG(LS_WARNING) << "Attempting to stop aecdump when no audio "
|
||||
"processing module is present";
|
||||
}
|
||||
}
|
||||
|
||||
webrtc::AudioDeviceModule* WebRtcVoiceEngine::adm() {
|
||||
@ -593,7 +616,6 @@ webrtc::AudioDeviceModule* WebRtcVoiceEngine::adm() {
|
||||
|
||||
webrtc::AudioProcessing* WebRtcVoiceEngine::apm() const {
|
||||
RTC_DCHECK(worker_thread_checker_.IsCurrent());
|
||||
RTC_DCHECK(apm_);
|
||||
return apm_.get();
|
||||
}
|
||||
|
||||
@ -2141,7 +2163,10 @@ bool WebRtcVoiceMediaChannel::MuteStream(uint32_t ssrc, bool muted) {
|
||||
for (const auto& kv : send_streams_) {
|
||||
all_muted = all_muted && kv.second->muted();
|
||||
}
|
||||
engine()->apm()->set_output_will_be_muted(all_muted);
|
||||
webrtc::AudioProcessing* ap = engine()->apm();
|
||||
if (ap) {
|
||||
ap->set_output_will_be_muted(all_muted);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -116,6 +116,7 @@ rtc_library("audio_processing") {
|
||||
visibility = [ "*" ]
|
||||
configs += [ ":apm_debug_dump" ]
|
||||
sources = [
|
||||
"audio_processing_builder_impl.cc",
|
||||
"audio_processing_impl.cc",
|
||||
"audio_processing_impl.h",
|
||||
"common.h",
|
||||
@ -169,6 +170,7 @@ rtc_library("audio_processing") {
|
||||
"../../rtc_base:deprecation",
|
||||
"../../rtc_base:gtest_prod",
|
||||
"../../rtc_base:ignore_wundef",
|
||||
"../../rtc_base:refcount",
|
||||
"../../rtc_base:safe_minmax",
|
||||
"../../rtc_base:sanitizer",
|
||||
"../../rtc_base/system:rtc_export",
|
||||
@ -556,41 +558,6 @@ if (rtc_include_tests) {
|
||||
} # audioproc_f_impl
|
||||
}
|
||||
|
||||
rtc_library("audioproc_test_utils") {
|
||||
visibility = [ "*" ]
|
||||
testonly = true
|
||||
sources = [
|
||||
"test/audio_buffer_tools.cc",
|
||||
"test/audio_buffer_tools.h",
|
||||
"test/bitexactness_tools.cc",
|
||||
"test/bitexactness_tools.h",
|
||||
"test/performance_timer.cc",
|
||||
"test/performance_timer.h",
|
||||
"test/simulator_buffers.cc",
|
||||
"test/simulator_buffers.h",
|
||||
"test/test_utils.cc",
|
||||
"test/test_utils.h",
|
||||
]
|
||||
|
||||
deps = [
|
||||
":api",
|
||||
":audio_buffer",
|
||||
":audio_processing",
|
||||
"../../api:array_view",
|
||||
"../../api/audio:audio_frame_api",
|
||||
"../../common_audio",
|
||||
"../../rtc_base:checks",
|
||||
"../../rtc_base:rtc_base_approved",
|
||||
"../../rtc_base/system:arch",
|
||||
"../../system_wrappers",
|
||||
"../../test:fileutils",
|
||||
"../../test:test_support",
|
||||
"../audio_coding:neteq_input_audio_tools",
|
||||
"//testing/gtest",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
}
|
||||
|
||||
if (rtc_enable_protobuf) {
|
||||
proto_library("audioproc_unittest_proto") {
|
||||
sources = [ "test/unittest.proto" ]
|
||||
@ -629,3 +596,42 @@ if (rtc_include_tests) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rtc_library("audioproc_test_utils") {
|
||||
visibility = [ "*" ]
|
||||
testonly = true
|
||||
sources = [
|
||||
"test/audio_buffer_tools.cc",
|
||||
"test/audio_buffer_tools.h",
|
||||
"test/audio_processing_builder_for_testing.cc",
|
||||
"test/audio_processing_builder_for_testing.h",
|
||||
"test/bitexactness_tools.cc",
|
||||
"test/bitexactness_tools.h",
|
||||
"test/performance_timer.cc",
|
||||
"test/performance_timer.h",
|
||||
"test/simulator_buffers.cc",
|
||||
"test/simulator_buffers.h",
|
||||
"test/test_utils.cc",
|
||||
"test/test_utils.h",
|
||||
]
|
||||
|
||||
configs += [ ":apm_debug_dump" ]
|
||||
|
||||
deps = [
|
||||
":api",
|
||||
":audio_buffer",
|
||||
":audio_processing",
|
||||
"../../api:array_view",
|
||||
"../../api/audio:audio_frame_api",
|
||||
"../../common_audio",
|
||||
"../../rtc_base:checks",
|
||||
"../../rtc_base:rtc_base_approved",
|
||||
"../../rtc_base/system:arch",
|
||||
"../../system_wrappers",
|
||||
"../../test:fileutils",
|
||||
"../../test:test_support",
|
||||
"../audio_coding:neteq_input_audio_tools",
|
||||
"//testing/gtest",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
}
|
||||
|
||||
@ -20,31 +20,35 @@ rtc_source_set("aec_dump") {
|
||||
]
|
||||
}
|
||||
|
||||
rtc_library("mock_aec_dump") {
|
||||
testonly = true
|
||||
sources = [
|
||||
"mock_aec_dump.cc",
|
||||
"mock_aec_dump.h",
|
||||
]
|
||||
if (rtc_include_tests) {
|
||||
rtc_library("mock_aec_dump") {
|
||||
testonly = true
|
||||
sources = [
|
||||
"mock_aec_dump.cc",
|
||||
"mock_aec_dump.h",
|
||||
]
|
||||
|
||||
deps = [
|
||||
"../",
|
||||
"../../../test:test_support",
|
||||
]
|
||||
}
|
||||
deps = [
|
||||
"..:audioproc_test_utils",
|
||||
"../",
|
||||
"../../../test:test_support",
|
||||
]
|
||||
}
|
||||
|
||||
rtc_library("mock_aec_dump_unittests") {
|
||||
testonly = true
|
||||
configs += [ "..:apm_debug_dump" ]
|
||||
sources = [ "aec_dump_integration_test.cc" ]
|
||||
rtc_library("mock_aec_dump_unittests") {
|
||||
testonly = true
|
||||
configs += [ "..:apm_debug_dump" ]
|
||||
sources = [ "aec_dump_integration_test.cc" ]
|
||||
|
||||
deps = [
|
||||
":mock_aec_dump",
|
||||
"..:api",
|
||||
"../",
|
||||
"../../../rtc_base:rtc_base_approved",
|
||||
"//testing/gtest",
|
||||
]
|
||||
deps = [
|
||||
":mock_aec_dump",
|
||||
"..:api",
|
||||
"..:audioproc_test_utils",
|
||||
"../",
|
||||
"../../../rtc_base:rtc_base_approved",
|
||||
"//testing/gtest",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
if (rtc_enable_protobuf) {
|
||||
@ -75,20 +79,22 @@ if (rtc_enable_protobuf) {
|
||||
deps += [ "../:audioproc_debug_proto" ]
|
||||
}
|
||||
|
||||
rtc_library("aec_dump_unittests") {
|
||||
testonly = true
|
||||
defines = []
|
||||
deps = [
|
||||
":aec_dump",
|
||||
":aec_dump_impl",
|
||||
"..:audioproc_debug_proto",
|
||||
"../",
|
||||
"../../../rtc_base:task_queue_for_test",
|
||||
"../../../test:fileutils",
|
||||
"../../../test:test_support",
|
||||
"//testing/gtest",
|
||||
]
|
||||
sources = [ "aec_dump_unittest.cc" ]
|
||||
if (rtc_include_tests) {
|
||||
rtc_library("aec_dump_unittests") {
|
||||
testonly = true
|
||||
defines = []
|
||||
deps = [
|
||||
":aec_dump",
|
||||
":aec_dump_impl",
|
||||
"..:audioproc_debug_proto",
|
||||
"../",
|
||||
"../../../rtc_base:task_queue_for_test",
|
||||
"../../../test:fileutils",
|
||||
"../../../test:test_support",
|
||||
"//testing/gtest",
|
||||
]
|
||||
sources = [ "aec_dump_unittest.cc" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
#include "modules/audio_processing/aec_dump/mock_aec_dump.h"
|
||||
#include "modules/audio_processing/audio_processing_impl.h"
|
||||
#include "modules/audio_processing/include/audio_processing.h"
|
||||
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
|
||||
|
||||
using ::testing::_;
|
||||
using ::testing::AtLeast;
|
||||
@ -25,7 +26,7 @@ namespace {
|
||||
std::unique_ptr<webrtc::AudioProcessing> CreateAudioProcessing() {
|
||||
webrtc::Config config;
|
||||
std::unique_ptr<webrtc::AudioProcessing> apm(
|
||||
webrtc::AudioProcessingBuilder().Create(config));
|
||||
webrtc::AudioProcessingBuilderForTesting().Create(config));
|
||||
RTC_DCHECK(apm);
|
||||
return apm;
|
||||
}
|
||||
|
||||
51
modules/audio_processing/audio_processing_builder_impl.cc
Normal file
51
modules/audio_processing/audio_processing_builder_impl.cc
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_processing/include/audio_processing.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "modules/audio_processing/audio_processing_impl.h"
|
||||
#include "rtc_base/ref_counted_object.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
AudioProcessingBuilder::AudioProcessingBuilder() = default;
|
||||
AudioProcessingBuilder::~AudioProcessingBuilder() = default;
|
||||
|
||||
AudioProcessing* AudioProcessingBuilder::Create() {
|
||||
webrtc::Config config;
|
||||
return Create(config);
|
||||
}
|
||||
|
||||
AudioProcessing* AudioProcessingBuilder::Create(const webrtc::Config& config) {
|
||||
#ifdef WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE
|
||||
|
||||
// Implementation returning a null pointer for using when the APM is excluded
|
||||
// from the build..
|
||||
return nullptr;
|
||||
|
||||
#else
|
||||
|
||||
// Standard implementation.
|
||||
AudioProcessingImpl* apm = new rtc::RefCountedObject<AudioProcessingImpl>(
|
||||
config, std::move(capture_post_processing_),
|
||||
std::move(render_pre_processing_), std::move(echo_control_factory_),
|
||||
std::move(echo_detector_), std::move(capture_analyzer_));
|
||||
if (apm->Initialize() != AudioProcessing::kNoError) {
|
||||
delete apm;
|
||||
apm = nullptr;
|
||||
}
|
||||
return apm;
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
@ -229,56 +229,6 @@ bool AudioProcessingImpl::SubmoduleStates::HighPassFilteringRequired() const {
|
||||
noise_suppressor_enabled_;
|
||||
}
|
||||
|
||||
AudioProcessingBuilder::AudioProcessingBuilder() = default;
|
||||
AudioProcessingBuilder::~AudioProcessingBuilder() = default;
|
||||
|
||||
AudioProcessingBuilder& AudioProcessingBuilder::SetCapturePostProcessing(
|
||||
std::unique_ptr<CustomProcessing> capture_post_processing) {
|
||||
capture_post_processing_ = std::move(capture_post_processing);
|
||||
return *this;
|
||||
}
|
||||
|
||||
AudioProcessingBuilder& AudioProcessingBuilder::SetRenderPreProcessing(
|
||||
std::unique_ptr<CustomProcessing> render_pre_processing) {
|
||||
render_pre_processing_ = std::move(render_pre_processing);
|
||||
return *this;
|
||||
}
|
||||
|
||||
AudioProcessingBuilder& AudioProcessingBuilder::SetCaptureAnalyzer(
|
||||
std::unique_ptr<CustomAudioAnalyzer> capture_analyzer) {
|
||||
capture_analyzer_ = std::move(capture_analyzer);
|
||||
return *this;
|
||||
}
|
||||
|
||||
AudioProcessingBuilder& AudioProcessingBuilder::SetEchoControlFactory(
|
||||
std::unique_ptr<EchoControlFactory> echo_control_factory) {
|
||||
echo_control_factory_ = std::move(echo_control_factory);
|
||||
return *this;
|
||||
}
|
||||
|
||||
AudioProcessingBuilder& AudioProcessingBuilder::SetEchoDetector(
|
||||
rtc::scoped_refptr<EchoDetector> echo_detector) {
|
||||
echo_detector_ = std::move(echo_detector);
|
||||
return *this;
|
||||
}
|
||||
|
||||
AudioProcessing* AudioProcessingBuilder::Create() {
|
||||
webrtc::Config config;
|
||||
return Create(config);
|
||||
}
|
||||
|
||||
AudioProcessing* AudioProcessingBuilder::Create(const webrtc::Config& config) {
|
||||
AudioProcessingImpl* apm = new rtc::RefCountedObject<AudioProcessingImpl>(
|
||||
config, std::move(capture_post_processing_),
|
||||
std::move(render_pre_processing_), std::move(echo_control_factory_),
|
||||
std::move(echo_detector_), std::move(capture_analyzer_));
|
||||
if (apm->Initialize() != AudioProcessing::kNoError) {
|
||||
delete apm;
|
||||
apm = nullptr;
|
||||
}
|
||||
return apm;
|
||||
}
|
||||
|
||||
AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config)
|
||||
: AudioProcessingImpl(config,
|
||||
/*capture_post_processor=*/nullptr,
|
||||
|
||||
@ -14,6 +14,7 @@
|
||||
|
||||
#include "api/array_view.h"
|
||||
#include "modules/audio_processing/audio_processing_impl.h"
|
||||
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
|
||||
#include "modules/audio_processing/test/test_utils.h"
|
||||
#include "rtc_base/critical_section.h"
|
||||
#include "rtc_base/event.h"
|
||||
@ -496,7 +497,7 @@ AudioProcessingImplLockTest::AudioProcessingImplLockTest()
|
||||
this,
|
||||
"stats",
|
||||
rtc::kNormalPriority),
|
||||
apm_(AudioProcessingBuilder().Create()),
|
||||
apm_(AudioProcessingBuilderForTesting().Create()),
|
||||
render_thread_state_(kMaxFrameSize,
|
||||
&rand_gen_,
|
||||
&render_call_event_,
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "modules/audio_processing/include/audio_processing.h"
|
||||
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
|
||||
#include "modules/audio_processing/test/echo_control_mock.h"
|
||||
#include "modules/audio_processing/test/test_utils.h"
|
||||
#include "rtc_base/checks.h"
|
||||
@ -167,7 +168,8 @@ TEST(AudioProcessingImplTest, AudioParameterChangeTriggersInit) {
|
||||
}
|
||||
|
||||
TEST(AudioProcessingImplTest, UpdateCapturePreGainRuntimeSetting) {
|
||||
std::unique_ptr<AudioProcessing> apm(AudioProcessingBuilder().Create());
|
||||
std::unique_ptr<AudioProcessing> apm(
|
||||
AudioProcessingBuilderForTesting().Create());
|
||||
webrtc::AudioProcessing::Config apm_config;
|
||||
apm_config.pre_amplifier.enabled = true;
|
||||
apm_config.pre_amplifier.fixed_gain_factor = 1.f;
|
||||
@ -205,7 +207,7 @@ TEST(AudioProcessingImplTest,
|
||||
const auto* echo_control_factory_ptr = echo_control_factory.get();
|
||||
|
||||
std::unique_ptr<AudioProcessing> apm(
|
||||
AudioProcessingBuilder()
|
||||
AudioProcessingBuilderForTesting()
|
||||
.SetEchoControlFactory(std::move(echo_control_factory))
|
||||
.Create());
|
||||
// Disable AGC.
|
||||
@ -248,7 +250,7 @@ TEST(AudioProcessingImplTest,
|
||||
const auto* echo_control_factory_ptr = echo_control_factory.get();
|
||||
|
||||
std::unique_ptr<AudioProcessing> apm(
|
||||
AudioProcessingBuilder()
|
||||
AudioProcessingBuilderForTesting()
|
||||
.SetEchoControlFactory(std::move(echo_control_factory))
|
||||
.Create());
|
||||
webrtc::AudioProcessing::Config apm_config;
|
||||
@ -294,7 +296,7 @@ TEST(AudioProcessingImplTest, EchoControllerObservesPlayoutVolumeChange) {
|
||||
const auto* echo_control_factory_ptr = echo_control_factory.get();
|
||||
|
||||
std::unique_ptr<AudioProcessing> apm(
|
||||
AudioProcessingBuilder()
|
||||
AudioProcessingBuilderForTesting()
|
||||
.SetEchoControlFactory(std::move(echo_control_factory))
|
||||
.Create());
|
||||
// Disable AGC.
|
||||
@ -353,7 +355,7 @@ TEST(AudioProcessingImplTest, RenderPreProcessorBeforeEchoDetector) {
|
||||
new TestRenderPreProcessor());
|
||||
// Create APM injecting the test echo detector and render pre-processor.
|
||||
std::unique_ptr<AudioProcessing> apm(
|
||||
AudioProcessingBuilder()
|
||||
AudioProcessingBuilderForTesting()
|
||||
.SetEchoDetector(test_echo_detector)
|
||||
.SetRenderPreProcessing(std::move(test_render_pre_processor))
|
||||
.Create());
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
|
||||
#include "api/array_view.h"
|
||||
#include "modules/audio_processing/audio_processing_impl.h"
|
||||
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
|
||||
#include "modules/audio_processing/test/test_utils.h"
|
||||
#include "rtc_base/atomic_ops.h"
|
||||
#include "rtc_base/event.h"
|
||||
@ -486,28 +487,28 @@ class CallSimulator : public ::testing::TestWithParam<SimulationConfig> {
|
||||
int num_capture_channels = 1;
|
||||
switch (simulation_config_.simulation_settings) {
|
||||
case SettingsType::kDefaultApmMobile: {
|
||||
apm_.reset(AudioProcessingBuilder().Create());
|
||||
apm_.reset(AudioProcessingBuilderForTesting().Create());
|
||||
ASSERT_TRUE(!!apm_);
|
||||
set_default_mobile_apm_runtime_settings(apm_.get());
|
||||
break;
|
||||
}
|
||||
case SettingsType::kDefaultApmDesktop: {
|
||||
Config config;
|
||||
apm_.reset(AudioProcessingBuilder().Create(config));
|
||||
apm_.reset(AudioProcessingBuilderForTesting().Create(config));
|
||||
ASSERT_TRUE(!!apm_);
|
||||
set_default_desktop_apm_runtime_settings(apm_.get());
|
||||
apm_->SetExtraOptions(config);
|
||||
break;
|
||||
}
|
||||
case SettingsType::kAllSubmodulesTurnedOff: {
|
||||
apm_.reset(AudioProcessingBuilder().Create());
|
||||
apm_.reset(AudioProcessingBuilderForTesting().Create());
|
||||
ASSERT_TRUE(!!apm_);
|
||||
turn_off_default_apm_runtime_settings(apm_.get());
|
||||
break;
|
||||
}
|
||||
case SettingsType::kDefaultApmDesktopWithoutDelayAgnostic: {
|
||||
Config config;
|
||||
apm_.reset(AudioProcessingBuilder().Create(config));
|
||||
apm_.reset(AudioProcessingBuilderForTesting().Create(config));
|
||||
ASSERT_TRUE(!!apm_);
|
||||
set_default_desktop_apm_runtime_settings(apm_.get());
|
||||
apm_->SetExtraOptions(config);
|
||||
@ -515,7 +516,7 @@ class CallSimulator : public ::testing::TestWithParam<SimulationConfig> {
|
||||
}
|
||||
case SettingsType::kDefaultApmDesktopWithoutExtendedFilter: {
|
||||
Config config;
|
||||
apm_.reset(AudioProcessingBuilder().Create(config));
|
||||
apm_.reset(AudioProcessingBuilderForTesting().Create(config));
|
||||
ASSERT_TRUE(!!apm_);
|
||||
set_default_desktop_apm_runtime_settings(apm_.get());
|
||||
apm_->SetExtraOptions(config);
|
||||
|
||||
@ -28,6 +28,7 @@
|
||||
#include "modules/audio_processing/audio_processing_impl.h"
|
||||
#include "modules/audio_processing/common.h"
|
||||
#include "modules/audio_processing/include/mock_audio_processing.h"
|
||||
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
|
||||
#include "modules/audio_processing/test/protobuf_utils.h"
|
||||
#include "modules/audio_processing/test/test_utils.h"
|
||||
#include "rtc_base/arraysize.h"
|
||||
@ -426,7 +427,7 @@ ApmTest::ApmTest()
|
||||
far_file_(NULL),
|
||||
near_file_(NULL),
|
||||
out_file_(NULL) {
|
||||
apm_.reset(AudioProcessingBuilder().Create());
|
||||
apm_.reset(AudioProcessingBuilderForTesting().Create());
|
||||
AudioProcessing::Config apm_config = apm_->GetConfig();
|
||||
apm_config.gain_controller1.analog_gain_controller.enabled = false;
|
||||
apm_config.pipeline.maximum_internal_processing_rate = 48000;
|
||||
@ -1176,7 +1177,7 @@ TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabledFloat) {
|
||||
auto src_channels = &src[0];
|
||||
auto dest_channels = &dest[0];
|
||||
|
||||
apm_.reset(AudioProcessingBuilder().Create());
|
||||
apm_.reset(AudioProcessingBuilderForTesting().Create());
|
||||
EXPECT_NOERR(apm_->ProcessStream(&src_channels, StreamConfig(sample_rate, 1),
|
||||
StreamConfig(sample_rate, 1),
|
||||
&dest_channels));
|
||||
@ -1637,7 +1638,7 @@ TEST_F(ApmTest, Process) {
|
||||
if (test->num_input_channels() != test->num_output_channels())
|
||||
continue;
|
||||
|
||||
apm_.reset(AudioProcessingBuilder().Create());
|
||||
apm_.reset(AudioProcessingBuilderForTesting().Create());
|
||||
AudioProcessing::Config apm_config = apm_->GetConfig();
|
||||
apm_config.gain_controller1.analog_gain_controller.enabled = false;
|
||||
apm_->ApplyConfig(apm_config);
|
||||
@ -1806,7 +1807,8 @@ TEST_F(ApmTest, NoErrorsWithKeyboardChannel) {
|
||||
{AudioProcessing::kStereoAndKeyboard, AudioProcessing::kStereo},
|
||||
};
|
||||
|
||||
std::unique_ptr<AudioProcessing> ap(AudioProcessingBuilder().Create());
|
||||
std::unique_ptr<AudioProcessing> ap(
|
||||
AudioProcessingBuilderForTesting().Create());
|
||||
// Enable one component just to ensure some processing takes place.
|
||||
AudioProcessing::Config config;
|
||||
config.noise_suppression.enabled = true;
|
||||
@ -1932,7 +1934,8 @@ class AudioProcessingTest
|
||||
size_t num_reverse_input_channels,
|
||||
size_t num_reverse_output_channels,
|
||||
const std::string& output_file_prefix) {
|
||||
std::unique_ptr<AudioProcessing> ap(AudioProcessingBuilder().Create());
|
||||
std::unique_ptr<AudioProcessing> ap(
|
||||
AudioProcessingBuilderForTesting().Create());
|
||||
AudioProcessing::Config apm_config = ap->GetConfig();
|
||||
apm_config.gain_controller1.analog_gain_controller.enabled = false;
|
||||
ap->ApplyConfig(apm_config);
|
||||
@ -2316,7 +2319,8 @@ void RunApmRateAndChannelTest(
|
||||
rtc::ArrayView<const int> sample_rates_hz,
|
||||
rtc::ArrayView<const int> render_channel_counts,
|
||||
rtc::ArrayView<const int> capture_channel_counts) {
|
||||
std::unique_ptr<AudioProcessing> apm(AudioProcessingBuilder().Create());
|
||||
std::unique_ptr<AudioProcessing> apm(
|
||||
AudioProcessingBuilderForTesting().Create());
|
||||
webrtc::AudioProcessing::Config apm_config;
|
||||
apm_config.echo_canceller.enabled = true;
|
||||
apm->ApplyConfig(apm_config);
|
||||
@ -2455,7 +2459,7 @@ TEST(ApmConfiguration, EnablePostProcessing) {
|
||||
auto mock_post_processor =
|
||||
std::unique_ptr<CustomProcessing>(mock_post_processor_ptr);
|
||||
rtc::scoped_refptr<AudioProcessing> apm =
|
||||
AudioProcessingBuilder()
|
||||
AudioProcessingBuilderForTesting()
|
||||
.SetCapturePostProcessing(std::move(mock_post_processor))
|
||||
.Create();
|
||||
|
||||
@ -2477,7 +2481,7 @@ TEST(ApmConfiguration, EnablePreProcessing) {
|
||||
auto mock_pre_processor =
|
||||
std::unique_ptr<CustomProcessing>(mock_pre_processor_ptr);
|
||||
rtc::scoped_refptr<AudioProcessing> apm =
|
||||
AudioProcessingBuilder()
|
||||
AudioProcessingBuilderForTesting()
|
||||
.SetRenderPreProcessing(std::move(mock_pre_processor))
|
||||
.Create();
|
||||
|
||||
@ -2499,7 +2503,7 @@ TEST(ApmConfiguration, EnableCaptureAnalyzer) {
|
||||
auto mock_capture_analyzer =
|
||||
std::unique_ptr<CustomAudioAnalyzer>(mock_capture_analyzer_ptr);
|
||||
rtc::scoped_refptr<AudioProcessing> apm =
|
||||
AudioProcessingBuilder()
|
||||
AudioProcessingBuilderForTesting()
|
||||
.SetCaptureAnalyzer(std::move(mock_capture_analyzer))
|
||||
.Create();
|
||||
|
||||
@ -2520,7 +2524,7 @@ TEST(ApmConfiguration, PreProcessingReceivesRuntimeSettings) {
|
||||
auto mock_pre_processor =
|
||||
std::unique_ptr<CustomProcessing>(mock_pre_processor_ptr);
|
||||
rtc::scoped_refptr<AudioProcessing> apm =
|
||||
AudioProcessingBuilder()
|
||||
AudioProcessingBuilderForTesting()
|
||||
.SetRenderPreProcessing(std::move(mock_pre_processor))
|
||||
.Create();
|
||||
apm->SetRuntimeSetting(
|
||||
@ -2565,7 +2569,7 @@ TEST(ApmConfiguration, EchoControlInjection) {
|
||||
new MyEchoControlFactory());
|
||||
|
||||
rtc::scoped_refptr<AudioProcessing> apm =
|
||||
AudioProcessingBuilder()
|
||||
AudioProcessingBuilderForTesting()
|
||||
.SetEchoControlFactory(std::move(echo_control_factory))
|
||||
.Create(webrtc_config);
|
||||
|
||||
@ -2589,7 +2593,7 @@ TEST(ApmConfiguration, EchoControlInjection) {
|
||||
std::unique_ptr<AudioProcessing> CreateApm(bool mobile_aec) {
|
||||
Config old_config;
|
||||
std::unique_ptr<AudioProcessing> apm(
|
||||
AudioProcessingBuilder().Create(old_config));
|
||||
AudioProcessingBuilderForTesting().Create(old_config));
|
||||
if (!apm) {
|
||||
return apm;
|
||||
}
|
||||
@ -2740,7 +2744,8 @@ TEST(ApmStatistics, ReportOutputRmsDbfs) {
|
||||
ptr[i] = 10000 * ((i % 3) - 1);
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioProcessing> apm(AudioProcessingBuilder().Create());
|
||||
std::unique_ptr<AudioProcessing> apm(
|
||||
AudioProcessingBuilderForTesting().Create());
|
||||
apm->Initialize(processing_config);
|
||||
|
||||
// If not enabled, no metric should be reported.
|
||||
@ -2793,7 +2798,8 @@ TEST(ApmStatistics, ReportHasVoice) {
|
||||
ptr[i] = 10000 * ((i % 3) - 1);
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioProcessing> apm(AudioProcessingBuilder().Create());
|
||||
std::unique_ptr<AudioProcessing> apm(
|
||||
AudioProcessingBuilderForTesting().Create());
|
||||
apm->Initialize(processing_config);
|
||||
|
||||
// If not enabled, no metric should be reported.
|
||||
|
||||
@ -685,19 +685,34 @@ class RTC_EXPORT AudioProcessingBuilder {
|
||||
~AudioProcessingBuilder();
|
||||
// The AudioProcessingBuilder takes ownership of the echo_control_factory.
|
||||
AudioProcessingBuilder& SetEchoControlFactory(
|
||||
std::unique_ptr<EchoControlFactory> echo_control_factory);
|
||||
std::unique_ptr<EchoControlFactory> echo_control_factory) {
|
||||
echo_control_factory_ = std::move(echo_control_factory);
|
||||
return *this;
|
||||
}
|
||||
// The AudioProcessingBuilder takes ownership of the capture_post_processing.
|
||||
AudioProcessingBuilder& SetCapturePostProcessing(
|
||||
std::unique_ptr<CustomProcessing> capture_post_processing);
|
||||
std::unique_ptr<CustomProcessing> capture_post_processing) {
|
||||
capture_post_processing_ = std::move(capture_post_processing);
|
||||
return *this;
|
||||
}
|
||||
// The AudioProcessingBuilder takes ownership of the render_pre_processing.
|
||||
AudioProcessingBuilder& SetRenderPreProcessing(
|
||||
std::unique_ptr<CustomProcessing> render_pre_processing);
|
||||
std::unique_ptr<CustomProcessing> render_pre_processing) {
|
||||
render_pre_processing_ = std::move(render_pre_processing);
|
||||
return *this;
|
||||
}
|
||||
// The AudioProcessingBuilder takes ownership of the echo_detector.
|
||||
AudioProcessingBuilder& SetEchoDetector(
|
||||
rtc::scoped_refptr<EchoDetector> echo_detector);
|
||||
rtc::scoped_refptr<EchoDetector> echo_detector) {
|
||||
echo_detector_ = std::move(echo_detector);
|
||||
return *this;
|
||||
}
|
||||
// The AudioProcessingBuilder takes ownership of the capture_analyzer.
|
||||
AudioProcessingBuilder& SetCaptureAnalyzer(
|
||||
std::unique_ptr<CustomAudioAnalyzer> capture_analyzer);
|
||||
std::unique_ptr<CustomAudioAnalyzer> capture_analyzer) {
|
||||
capture_analyzer_ = std::move(capture_analyzer);
|
||||
return *this;
|
||||
}
|
||||
// This creates an APM instance using the previously set components. Calling
|
||||
// the Create function resets the AudioProcessingBuilder to its initial state.
|
||||
AudioProcessing* Create();
|
||||
|
||||
@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "modules/audio_processing/audio_processing_impl.h"
|
||||
#include "rtc_base/ref_counted_object.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
AudioProcessingBuilderForTesting::AudioProcessingBuilderForTesting() = default;
|
||||
AudioProcessingBuilderForTesting::~AudioProcessingBuilderForTesting() = default;
|
||||
|
||||
#ifdef WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE
|
||||
|
||||
AudioProcessing* AudioProcessingBuilderForTesting::Create() {
|
||||
webrtc::Config config;
|
||||
return Create(config);
|
||||
}
|
||||
|
||||
AudioProcessing* AudioProcessingBuilderForTesting::Create(
|
||||
const webrtc::Config& config) {
|
||||
AudioProcessingImpl* apm = new rtc::RefCountedObject<AudioProcessingImpl>(
|
||||
config, std::move(capture_post_processing_),
|
||||
std::move(render_pre_processing_), std::move(echo_control_factory_),
|
||||
std::move(echo_detector_), std::move(capture_analyzer_));
|
||||
int error = apm->Initialize();
|
||||
RTC_CHECK_EQ(error, AudioProcessing::kNoError);
|
||||
return apm;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
AudioProcessing* AudioProcessingBuilderForTesting::Create() {
|
||||
AudioProcessingBuilder builder;
|
||||
TransferOwnershipsToBuilder(&builder);
|
||||
return builder.Create();
|
||||
}
|
||||
|
||||
AudioProcessing* AudioProcessingBuilderForTesting::Create(
|
||||
const webrtc::Config& config) {
|
||||
AudioProcessingBuilder builder;
|
||||
TransferOwnershipsToBuilder(&builder);
|
||||
return builder.Create(config);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void AudioProcessingBuilderForTesting::TransferOwnershipsToBuilder(
|
||||
AudioProcessingBuilder* builder) {
|
||||
builder->SetCapturePostProcessing(std::move(capture_post_processing_));
|
||||
builder->SetRenderPreProcessing(std::move(render_pre_processing_));
|
||||
builder->SetCaptureAnalyzer(std::move(capture_analyzer_));
|
||||
builder->SetEchoControlFactory(std::move(echo_control_factory_));
|
||||
builder->SetEchoDetector(std::move(echo_detector_));
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
@ -0,0 +1,81 @@
|
||||
/*
|
||||
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_AUDIO_PROCESSING_TEST_AUDIO_PROCESSING_BUILDER_FOR_TESTING_H_
|
||||
#define MODULES_AUDIO_PROCESSING_TEST_AUDIO_PROCESSING_BUILDER_FOR_TESTING_H_
|
||||
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "modules/audio_processing/include/audio_processing.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Facilitates building of AudioProcessingImp for the tests.
|
||||
class AudioProcessingBuilderForTesting {
|
||||
public:
|
||||
AudioProcessingBuilderForTesting();
|
||||
~AudioProcessingBuilderForTesting();
|
||||
// The AudioProcessingBuilderForTesting takes ownership of the
|
||||
// echo_control_factory.
|
||||
AudioProcessingBuilderForTesting& SetEchoControlFactory(
|
||||
std::unique_ptr<EchoControlFactory> echo_control_factory) {
|
||||
echo_control_factory_ = std::move(echo_control_factory);
|
||||
return *this;
|
||||
}
|
||||
// The AudioProcessingBuilderForTesting takes ownership of the
|
||||
// capture_post_processing.
|
||||
AudioProcessingBuilderForTesting& SetCapturePostProcessing(
|
||||
std::unique_ptr<CustomProcessing> capture_post_processing) {
|
||||
capture_post_processing_ = std::move(capture_post_processing);
|
||||
return *this;
|
||||
}
|
||||
// The AudioProcessingBuilderForTesting takes ownership of the
|
||||
// render_pre_processing.
|
||||
AudioProcessingBuilderForTesting& SetRenderPreProcessing(
|
||||
std::unique_ptr<CustomProcessing> render_pre_processing) {
|
||||
render_pre_processing_ = std::move(render_pre_processing);
|
||||
return *this;
|
||||
}
|
||||
// The AudioProcessingBuilderForTesting takes ownership of the echo_detector.
|
||||
AudioProcessingBuilderForTesting& SetEchoDetector(
|
||||
rtc::scoped_refptr<EchoDetector> echo_detector) {
|
||||
echo_detector_ = std::move(echo_detector);
|
||||
return *this;
|
||||
}
|
||||
// The AudioProcessingBuilderForTesting takes ownership of the
|
||||
// capture_analyzer.
|
||||
AudioProcessingBuilderForTesting& SetCaptureAnalyzer(
|
||||
std::unique_ptr<CustomAudioAnalyzer> capture_analyzer) {
|
||||
capture_analyzer_ = std::move(capture_analyzer);
|
||||
return *this;
|
||||
}
|
||||
// This creates an APM instance using the previously set components. Calling
|
||||
// the Create function resets the AudioProcessingBuilderForTesting to its
|
||||
// initial state.
|
||||
AudioProcessing* Create();
|
||||
AudioProcessing* Create(const webrtc::Config& config);
|
||||
|
||||
private:
|
||||
// Transfers the ownership to a non-testing builder.
|
||||
void TransferOwnershipsToBuilder(AudioProcessingBuilder* builder);
|
||||
|
||||
std::unique_ptr<EchoControlFactory> echo_control_factory_;
|
||||
std::unique_ptr<CustomProcessing> capture_post_processing_;
|
||||
std::unique_ptr<CustomProcessing> render_pre_processing_;
|
||||
rtc::scoped_refptr<EchoDetector> echo_detector_;
|
||||
std::unique_ptr<CustomAudioAnalyzer> capture_analyzer_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_AUDIO_PROCESSING_TEST_AUDIO_PROCESSING_BUILDER_FOR_TESTING_H_
|
||||
@ -10,6 +10,7 @@
|
||||
|
||||
#include "modules/audio_processing/test/debug_dump_replayer.h"
|
||||
|
||||
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
|
||||
#include "modules/audio_processing/test/protobuf_utils.h"
|
||||
#include "modules/audio_processing/test/runtime_setting_util.h"
|
||||
#include "rtc_base/checks.h"
|
||||
@ -185,7 +186,7 @@ void DebugDumpReplayer::MaybeRecreateApm(const audioproc::Config& msg) {
|
||||
// We only create APM once, since changes on these fields should not
|
||||
// happen in current implementation.
|
||||
if (!apm_.get()) {
|
||||
apm_.reset(AudioProcessingBuilder().Create(config));
|
||||
apm_.reset(AudioProcessingBuilderForTesting().Create(config));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
#include "api/audio/echo_canceller3_factory.h"
|
||||
#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
|
||||
#include "modules/audio_processing/aec_dump/aec_dump_factory.h"
|
||||
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
|
||||
#include "modules/audio_processing/test/debug_dump_replayer.h"
|
||||
#include "modules/audio_processing/test/test_utils.h"
|
||||
#include "rtc_base/task_queue_for_test.h"
|
||||
@ -141,7 +142,7 @@ DebugDumpGenerator::DebugDumpGenerator(const std::string& input_file_name,
|
||||
enable_pre_amplifier_(enable_pre_amplifier),
|
||||
worker_queue_("debug_dump_generator_worker_queue"),
|
||||
dump_file_name_(dump_file_name) {
|
||||
AudioProcessingBuilder apm_builder;
|
||||
AudioProcessingBuilderForTesting apm_builder;
|
||||
apm_.reset(apm_builder.Create(config));
|
||||
}
|
||||
|
||||
|
||||
@ -587,6 +587,7 @@ if (rtc_include_tests) {
|
||||
"../media:rtc_media_engine_defaults",
|
||||
"../modules/audio_device:audio_device_api",
|
||||
"../modules/audio_processing:audio_processing_statistics",
|
||||
"../modules/audio_processing:audioproc_test_utils",
|
||||
"../modules/rtp_rtcp:rtp_rtcp_format",
|
||||
"../p2p:fake_ice_transport",
|
||||
"../p2p:fake_port_allocator",
|
||||
|
||||
@ -36,6 +36,7 @@
|
||||
#include "media/engine/fake_webrtc_video_engine.h"
|
||||
#include "media/engine/webrtc_media_engine.h"
|
||||
#include "media/engine/webrtc_media_engine_defaults.h"
|
||||
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
|
||||
#include "p2p/base/fake_ice_transport.h"
|
||||
#include "p2p/base/mock_async_resolver.h"
|
||||
#include "p2p/base/p2p_constants.h"
|
||||
@ -648,6 +649,12 @@ class PeerConnectionWrapper : public webrtc::PeerConnectionObserver,
|
||||
media_deps.video_decoder_factory.reset();
|
||||
}
|
||||
|
||||
if (!media_deps.audio_processing) {
|
||||
// If the standard Creation method for APM returns a null pointer, instead
|
||||
// use the builder for testing to create an APM object.
|
||||
media_deps.audio_processing = AudioProcessingBuilderForTesting().Create();
|
||||
}
|
||||
|
||||
pc_factory_dependencies.media_engine =
|
||||
cricket::CreateMediaEngine(std::move(media_deps));
|
||||
pc_factory_dependencies.call_factory = webrtc::CreateCallFactory();
|
||||
|
||||
@ -453,6 +453,7 @@ webrtc_fuzzer_test("audio_processing_fuzzer") {
|
||||
"../../modules/audio_processing",
|
||||
"../../modules/audio_processing:api",
|
||||
"../../modules/audio_processing:audio_buffer",
|
||||
"../../modules/audio_processing:audioproc_test_utils",
|
||||
"../../modules/audio_processing/aec3",
|
||||
"../../modules/audio_processing/aec_dump",
|
||||
"../../modules/audio_processing/aec_dump:aec_dump_impl",
|
||||
|
||||
@ -16,6 +16,7 @@
|
||||
#include "api/task_queue/default_task_queue_factory.h"
|
||||
#include "modules/audio_processing/aec_dump/aec_dump_factory.h"
|
||||
#include "modules/audio_processing/include/audio_processing.h"
|
||||
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
|
||||
#include "rtc_base/arraysize.h"
|
||||
#include "rtc_base/numerics/safe_minmax.h"
|
||||
#include "rtc_base/task_queue.h"
|
||||
@ -108,7 +109,7 @@ std::unique_ptr<AudioProcessing> CreateApm(test::FuzzDataHelper* fuzz_data,
|
||||
config.Set<ExperimentalNs>(new ExperimentalNs(exp_ns));
|
||||
|
||||
std::unique_ptr<AudioProcessing> apm(
|
||||
AudioProcessingBuilder()
|
||||
AudioProcessingBuilderForTesting()
|
||||
.SetEchoControlFactory(std::move(echo_control_factory))
|
||||
.Create(config));
|
||||
|
||||
|
||||
@ -33,7 +33,11 @@ class TestPeer final : public PeerConnectionWrapper {
|
||||
return std::move(video_generators_[i]);
|
||||
}
|
||||
|
||||
void DetachAecDump() { audio_processing_->DetachAecDump(); }
|
||||
void DetachAecDump() {
|
||||
if (audio_processing_) {
|
||||
audio_processing_->DetachAecDump();
|
||||
}
|
||||
}
|
||||
|
||||
// Adds provided |candidates| to the owned peer connection.
|
||||
bool AddIceCandidates(
|
||||
|
||||
@ -290,7 +290,7 @@ std::unique_ptr<TestPeer> TestPeerFactory::CreateTestPeer(
|
||||
// Create peer connection factory.
|
||||
rtc::scoped_refptr<AudioProcessing> audio_processing =
|
||||
webrtc::AudioProcessingBuilder().Create();
|
||||
if (params->aec_dump_path) {
|
||||
if (params->aec_dump_path && audio_processing) {
|
||||
audio_processing->AttachAecDump(
|
||||
AecDumpFactory::Create(*params->aec_dump_path, -1, task_queue));
|
||||
}
|
||||
|
||||
@ -96,6 +96,9 @@ declare_args() {
|
||||
# should be generated.
|
||||
apm_debug_dump = false
|
||||
|
||||
# Selects whether the audio processing module should be excluded.
|
||||
rtc_exclude_audio_processing_module = false
|
||||
|
||||
# Set this to true to enable BWE test logging.
|
||||
rtc_enable_bwe_test_logging = false
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user