APM: Add build flag to allow building WebRTC without APM

This CL adds a build flag to allow building the non-test parts
of WebRTC without the audio processing module.
The CL also ensures that the WebRTC code correctly handles
the case when no APM is available.

Bug: webrtc:5298
Change-Id: I5c8b5d1f7115e5cce2af4c2b5ff701fa1c54e49e
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/171509
Commit-Queue: Per Åhgren <peah@webrtc.org>
Reviewed-by: Sam Zackrisson <saza@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#31133}
This commit is contained in:
Per Åhgren 2020-04-26 23:56:17 +02:00 committed by Commit Bot
parent 86bd33a1e7
commit cc73ed3e70
31 changed files with 1887 additions and 1403 deletions

View File

@ -281,6 +281,10 @@ config("common_config") {
defines += [ "WEBRTC_EXCLUDE_TRANSIENT_SUPPRESSOR" ] defines += [ "WEBRTC_EXCLUDE_TRANSIENT_SUPPRESSOR" ]
} }
if (rtc_exclude_audio_processing_module) {
defines += [ "WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE" ]
}
cflags = [] cflags = []
if (build_with_chromium) { if (build_with_chromium) {

View File

@ -75,15 +75,21 @@ const NetworkStatistics kNetworkStats = {
const AudioDecodingCallStats kAudioDecodeStats = MakeAudioDecodeStatsForTest(); const AudioDecodingCallStats kAudioDecodeStats = MakeAudioDecodeStatsForTest();
struct ConfigHelper { struct ConfigHelper {
ConfigHelper() : ConfigHelper(new rtc::RefCountedObject<MockAudioMixer>()) {} explicit ConfigHelper(bool use_null_audio_processing)
: ConfigHelper(new rtc::RefCountedObject<MockAudioMixer>(),
use_null_audio_processing) {}
explicit ConfigHelper(rtc::scoped_refptr<MockAudioMixer> audio_mixer) ConfigHelper(rtc::scoped_refptr<MockAudioMixer> audio_mixer,
bool use_null_audio_processing)
: audio_mixer_(audio_mixer) { : audio_mixer_(audio_mixer) {
using ::testing::Invoke; using ::testing::Invoke;
AudioState::Config config; AudioState::Config config;
config.audio_mixer = audio_mixer_; config.audio_mixer = audio_mixer_;
config.audio_processing = new rtc::RefCountedObject<MockAudioProcessing>(); config.audio_processing =
use_null_audio_processing
? nullptr
: new rtc::RefCountedObject<MockAudioProcessing>();
config.audio_device_module = config.audio_device_module =
new rtc::RefCountedObject<testing::NiceMock<MockAudioDeviceModule>>(); new rtc::RefCountedObject<testing::NiceMock<MockAudioDeviceModule>>();
audio_state_ = AudioState::Create(config); audio_state_ = AudioState::Create(config);
@ -230,182 +236,200 @@ TEST(AudioReceiveStreamTest, ConfigToString) {
} }
TEST(AudioReceiveStreamTest, ConstructDestruct) { TEST(AudioReceiveStreamTest, ConstructDestruct) {
ConfigHelper helper; for (bool use_null_audio_processing : {false, true}) {
auto recv_stream = helper.CreateAudioReceiveStream(); ConfigHelper helper(use_null_audio_processing);
auto recv_stream = helper.CreateAudioReceiveStream();
}
} }
TEST(AudioReceiveStreamTest, ReceiveRtpPacket) { TEST(AudioReceiveStreamTest, ReceiveRtpPacket) {
ConfigHelper helper; for (bool use_null_audio_processing : {false, true}) {
helper.config().rtp.transport_cc = true; ConfigHelper helper(use_null_audio_processing);
auto recv_stream = helper.CreateAudioReceiveStream(); helper.config().rtp.transport_cc = true;
const int kTransportSequenceNumberValue = 1234; auto recv_stream = helper.CreateAudioReceiveStream();
std::vector<uint8_t> rtp_packet = CreateRtpHeaderWithOneByteExtension( const int kTransportSequenceNumberValue = 1234;
kTransportSequenceNumberId, kTransportSequenceNumberValue, 2); std::vector<uint8_t> rtp_packet = CreateRtpHeaderWithOneByteExtension(
constexpr int64_t packet_time_us = 5678000; kTransportSequenceNumberId, kTransportSequenceNumberValue, 2);
constexpr int64_t packet_time_us = 5678000;
RtpPacketReceived parsed_packet; RtpPacketReceived parsed_packet;
ASSERT_TRUE(parsed_packet.Parse(&rtp_packet[0], rtp_packet.size())); ASSERT_TRUE(parsed_packet.Parse(&rtp_packet[0], rtp_packet.size()));
parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000); parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000);
EXPECT_CALL(*helper.channel_receive(), EXPECT_CALL(*helper.channel_receive(),
OnRtpPacket(::testing::Ref(parsed_packet))); OnRtpPacket(::testing::Ref(parsed_packet)));
recv_stream->OnRtpPacket(parsed_packet); recv_stream->OnRtpPacket(parsed_packet);
}
} }
TEST(AudioReceiveStreamTest, ReceiveRtcpPacket) { TEST(AudioReceiveStreamTest, ReceiveRtcpPacket) {
ConfigHelper helper; for (bool use_null_audio_processing : {false, true}) {
helper.config().rtp.transport_cc = true; ConfigHelper helper(use_null_audio_processing);
auto recv_stream = helper.CreateAudioReceiveStream(); helper.config().rtp.transport_cc = true;
std::vector<uint8_t> rtcp_packet = CreateRtcpSenderReport(); auto recv_stream = helper.CreateAudioReceiveStream();
EXPECT_CALL(*helper.channel_receive(), std::vector<uint8_t> rtcp_packet = CreateRtcpSenderReport();
ReceivedRTCPPacket(&rtcp_packet[0], rtcp_packet.size())) EXPECT_CALL(*helper.channel_receive(),
.WillOnce(Return()); ReceivedRTCPPacket(&rtcp_packet[0], rtcp_packet.size()))
recv_stream->DeliverRtcp(&rtcp_packet[0], rtcp_packet.size()); .WillOnce(Return());
recv_stream->DeliverRtcp(&rtcp_packet[0], rtcp_packet.size());
}
} }
TEST(AudioReceiveStreamTest, GetStats) { TEST(AudioReceiveStreamTest, GetStats) {
ConfigHelper helper; for (bool use_null_audio_processing : {false, true}) {
auto recv_stream = helper.CreateAudioReceiveStream(); ConfigHelper helper(use_null_audio_processing);
helper.SetupMockForGetStats(); auto recv_stream = helper.CreateAudioReceiveStream();
AudioReceiveStream::Stats stats = recv_stream->GetStats(); helper.SetupMockForGetStats();
EXPECT_EQ(kRemoteSsrc, stats.remote_ssrc); AudioReceiveStream::Stats stats = recv_stream->GetStats();
EXPECT_EQ(kCallStats.payload_bytes_rcvd, stats.payload_bytes_rcvd); EXPECT_EQ(kRemoteSsrc, stats.remote_ssrc);
EXPECT_EQ(kCallStats.header_and_padding_bytes_rcvd, EXPECT_EQ(kCallStats.payload_bytes_rcvd, stats.payload_bytes_rcvd);
stats.header_and_padding_bytes_rcvd); EXPECT_EQ(kCallStats.header_and_padding_bytes_rcvd,
EXPECT_EQ(static_cast<uint32_t>(kCallStats.packetsReceived), stats.header_and_padding_bytes_rcvd);
stats.packets_rcvd); EXPECT_EQ(static_cast<uint32_t>(kCallStats.packetsReceived),
EXPECT_EQ(kCallStats.cumulativeLost, stats.packets_lost); stats.packets_rcvd);
EXPECT_EQ(kReceiveCodec.second.name, stats.codec_name); EXPECT_EQ(kCallStats.cumulativeLost, stats.packets_lost);
EXPECT_EQ( EXPECT_EQ(kReceiveCodec.second.name, stats.codec_name);
kCallStats.jitterSamples / (kReceiveCodec.second.clockrate_hz / 1000), EXPECT_EQ(
stats.jitter_ms); kCallStats.jitterSamples / (kReceiveCodec.second.clockrate_hz / 1000),
EXPECT_EQ(kNetworkStats.currentBufferSize, stats.jitter_buffer_ms); stats.jitter_ms);
EXPECT_EQ(kNetworkStats.preferredBufferSize, EXPECT_EQ(kNetworkStats.currentBufferSize, stats.jitter_buffer_ms);
stats.jitter_buffer_preferred_ms); EXPECT_EQ(kNetworkStats.preferredBufferSize,
EXPECT_EQ(static_cast<uint32_t>(kJitterBufferDelay + kPlayoutBufferDelay), stats.jitter_buffer_preferred_ms);
stats.delay_estimate_ms); EXPECT_EQ(static_cast<uint32_t>(kJitterBufferDelay + kPlayoutBufferDelay),
EXPECT_EQ(static_cast<int32_t>(kSpeechOutputLevel), stats.audio_level); stats.delay_estimate_ms);
EXPECT_EQ(kTotalOutputEnergy, stats.total_output_energy); EXPECT_EQ(static_cast<int32_t>(kSpeechOutputLevel), stats.audio_level);
EXPECT_EQ(kNetworkStats.totalSamplesReceived, stats.total_samples_received); EXPECT_EQ(kTotalOutputEnergy, stats.total_output_energy);
EXPECT_EQ(kTotalOutputDuration, stats.total_output_duration); EXPECT_EQ(kNetworkStats.totalSamplesReceived, stats.total_samples_received);
EXPECT_EQ(kNetworkStats.concealedSamples, stats.concealed_samples); EXPECT_EQ(kTotalOutputDuration, stats.total_output_duration);
EXPECT_EQ(kNetworkStats.concealmentEvents, stats.concealment_events); EXPECT_EQ(kNetworkStats.concealedSamples, stats.concealed_samples);
EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferDelayMs) / EXPECT_EQ(kNetworkStats.concealmentEvents, stats.concealment_events);
static_cast<double>(rtc::kNumMillisecsPerSec), EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferDelayMs) /
stats.jitter_buffer_delay_seconds); static_cast<double>(rtc::kNumMillisecsPerSec),
EXPECT_EQ(kNetworkStats.jitterBufferEmittedCount, stats.jitter_buffer_delay_seconds);
stats.jitter_buffer_emitted_count); EXPECT_EQ(kNetworkStats.jitterBufferEmittedCount,
EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferTargetDelayMs) / stats.jitter_buffer_emitted_count);
static_cast<double>(rtc::kNumMillisecsPerSec), EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferTargetDelayMs) /
stats.jitter_buffer_target_delay_seconds); static_cast<double>(rtc::kNumMillisecsPerSec),
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentExpandRate), stats.expand_rate); stats.jitter_buffer_target_delay_seconds);
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSpeechExpandRate), EXPECT_EQ(Q14ToFloat(kNetworkStats.currentExpandRate), stats.expand_rate);
stats.speech_expand_rate); EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSpeechExpandRate),
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDecodedRate), stats.speech_expand_rate);
stats.secondary_decoded_rate); EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDecodedRate),
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDiscardedRate), stats.secondary_decoded_rate);
stats.secondary_discarded_rate); EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDiscardedRate),
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentAccelerateRate), stats.secondary_discarded_rate);
stats.accelerate_rate); EXPECT_EQ(Q14ToFloat(kNetworkStats.currentAccelerateRate),
EXPECT_EQ(Q14ToFloat(kNetworkStats.currentPreemptiveRate), stats.accelerate_rate);
stats.preemptive_expand_rate); EXPECT_EQ(Q14ToFloat(kNetworkStats.currentPreemptiveRate),
EXPECT_EQ(kAudioDecodeStats.calls_to_silence_generator, stats.preemptive_expand_rate);
stats.decoding_calls_to_silence_generator); EXPECT_EQ(kAudioDecodeStats.calls_to_silence_generator,
EXPECT_EQ(kAudioDecodeStats.calls_to_neteq, stats.decoding_calls_to_neteq); stats.decoding_calls_to_silence_generator);
EXPECT_EQ(kAudioDecodeStats.decoded_normal, stats.decoding_normal); EXPECT_EQ(kAudioDecodeStats.calls_to_neteq, stats.decoding_calls_to_neteq);
EXPECT_EQ(kAudioDecodeStats.decoded_neteq_plc, stats.decoding_plc); EXPECT_EQ(kAudioDecodeStats.decoded_normal, stats.decoding_normal);
EXPECT_EQ(kAudioDecodeStats.decoded_codec_plc, stats.decoding_codec_plc); EXPECT_EQ(kAudioDecodeStats.decoded_neteq_plc, stats.decoding_plc);
EXPECT_EQ(kAudioDecodeStats.decoded_cng, stats.decoding_cng); EXPECT_EQ(kAudioDecodeStats.decoded_codec_plc, stats.decoding_codec_plc);
EXPECT_EQ(kAudioDecodeStats.decoded_plc_cng, stats.decoding_plc_cng); EXPECT_EQ(kAudioDecodeStats.decoded_cng, stats.decoding_cng);
EXPECT_EQ(kAudioDecodeStats.decoded_muted_output, EXPECT_EQ(kAudioDecodeStats.decoded_plc_cng, stats.decoding_plc_cng);
stats.decoding_muted_output); EXPECT_EQ(kAudioDecodeStats.decoded_muted_output,
EXPECT_EQ(kCallStats.capture_start_ntp_time_ms_, stats.decoding_muted_output);
stats.capture_start_ntp_time_ms); EXPECT_EQ(kCallStats.capture_start_ntp_time_ms_,
EXPECT_EQ(kPlayoutNtpTimestampMs, stats.estimated_playout_ntp_timestamp_ms); stats.capture_start_ntp_time_ms);
EXPECT_EQ(kPlayoutNtpTimestampMs, stats.estimated_playout_ntp_timestamp_ms);
}
} }
TEST(AudioReceiveStreamTest, SetGain) { TEST(AudioReceiveStreamTest, SetGain) {
ConfigHelper helper; for (bool use_null_audio_processing : {false, true}) {
auto recv_stream = helper.CreateAudioReceiveStream(); ConfigHelper helper(use_null_audio_processing);
EXPECT_CALL(*helper.channel_receive(), auto recv_stream = helper.CreateAudioReceiveStream();
SetChannelOutputVolumeScaling(FloatEq(0.765f))); EXPECT_CALL(*helper.channel_receive(),
recv_stream->SetGain(0.765f); SetChannelOutputVolumeScaling(FloatEq(0.765f)));
recv_stream->SetGain(0.765f);
}
} }
TEST(AudioReceiveStreamTest, StreamsShouldBeAddedToMixerOnceOnStart) { TEST(AudioReceiveStreamTest, StreamsShouldBeAddedToMixerOnceOnStart) {
ConfigHelper helper1; for (bool use_null_audio_processing : {false, true}) {
ConfigHelper helper2(helper1.audio_mixer()); ConfigHelper helper1(use_null_audio_processing);
auto recv_stream1 = helper1.CreateAudioReceiveStream(); ConfigHelper helper2(helper1.audio_mixer(), use_null_audio_processing);
auto recv_stream2 = helper2.CreateAudioReceiveStream(); auto recv_stream1 = helper1.CreateAudioReceiveStream();
auto recv_stream2 = helper2.CreateAudioReceiveStream();
EXPECT_CALL(*helper1.channel_receive(), StartPlayout()).Times(1); EXPECT_CALL(*helper1.channel_receive(), StartPlayout()).Times(1);
EXPECT_CALL(*helper2.channel_receive(), StartPlayout()).Times(1); EXPECT_CALL(*helper2.channel_receive(), StartPlayout()).Times(1);
EXPECT_CALL(*helper1.channel_receive(), StopPlayout()).Times(1); EXPECT_CALL(*helper1.channel_receive(), StopPlayout()).Times(1);
EXPECT_CALL(*helper2.channel_receive(), StopPlayout()).Times(1); EXPECT_CALL(*helper2.channel_receive(), StopPlayout()).Times(1);
EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream1.get())) EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream1.get()))
.WillOnce(Return(true)); .WillOnce(Return(true));
EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream2.get())) EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream2.get()))
.WillOnce(Return(true)); .WillOnce(Return(true));
EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream1.get())) EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream1.get()))
.Times(1); .Times(1);
EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream2.get())) EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream2.get()))
.Times(1); .Times(1);
recv_stream1->Start(); recv_stream1->Start();
recv_stream2->Start(); recv_stream2->Start();
// One more should not result in any more mixer sources added. // One more should not result in any more mixer sources added.
recv_stream1->Start(); recv_stream1->Start();
// Stop stream before it is being destructed. // Stop stream before it is being destructed.
recv_stream2->Stop(); recv_stream2->Stop();
}
} }
TEST(AudioReceiveStreamTest, ReconfigureWithSameConfig) { TEST(AudioReceiveStreamTest, ReconfigureWithSameConfig) {
ConfigHelper helper; for (bool use_null_audio_processing : {false, true}) {
auto recv_stream = helper.CreateAudioReceiveStream(); ConfigHelper helper(use_null_audio_processing);
recv_stream->Reconfigure(helper.config()); auto recv_stream = helper.CreateAudioReceiveStream();
recv_stream->Reconfigure(helper.config());
}
} }
TEST(AudioReceiveStreamTest, ReconfigureWithUpdatedConfig) { TEST(AudioReceiveStreamTest, ReconfigureWithUpdatedConfig) {
ConfigHelper helper; for (bool use_null_audio_processing : {false, true}) {
auto recv_stream = helper.CreateAudioReceiveStream(); ConfigHelper helper(use_null_audio_processing);
auto recv_stream = helper.CreateAudioReceiveStream();
auto new_config = helper.config(); auto new_config = helper.config();
new_config.rtp.nack.rtp_history_ms = 300 + 20; new_config.rtp.nack.rtp_history_ms = 300 + 20;
new_config.rtp.extensions.clear(); new_config.rtp.extensions.clear();
new_config.rtp.extensions.push_back( new_config.rtp.extensions.push_back(
RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId + 1)); RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId + 1));
new_config.rtp.extensions.push_back( new_config.rtp.extensions.push_back(
RtpExtension(RtpExtension::kTransportSequenceNumberUri, RtpExtension(RtpExtension::kTransportSequenceNumberUri,
kTransportSequenceNumberId + 1)); kTransportSequenceNumberId + 1));
new_config.decoder_map.emplace(1, SdpAudioFormat("foo", 8000, 1)); new_config.decoder_map.emplace(1, SdpAudioFormat("foo", 8000, 1));
MockChannelReceive& channel_receive = *helper.channel_receive(); MockChannelReceive& channel_receive = *helper.channel_receive();
EXPECT_CALL(channel_receive, SetNACKStatus(true, 15 + 1)).Times(1); EXPECT_CALL(channel_receive, SetNACKStatus(true, 15 + 1)).Times(1);
EXPECT_CALL(channel_receive, SetReceiveCodecs(new_config.decoder_map)); EXPECT_CALL(channel_receive, SetReceiveCodecs(new_config.decoder_map));
recv_stream->Reconfigure(new_config); recv_stream->Reconfigure(new_config);
}
} }
TEST(AudioReceiveStreamTest, ReconfigureWithFrameDecryptor) { TEST(AudioReceiveStreamTest, ReconfigureWithFrameDecryptor) {
ConfigHelper helper; for (bool use_null_audio_processing : {false, true}) {
auto recv_stream = helper.CreateAudioReceiveStream(); ConfigHelper helper(use_null_audio_processing);
auto recv_stream = helper.CreateAudioReceiveStream();
auto new_config_0 = helper.config(); auto new_config_0 = helper.config();
rtc::scoped_refptr<FrameDecryptorInterface> mock_frame_decryptor_0( rtc::scoped_refptr<FrameDecryptorInterface> mock_frame_decryptor_0(
new rtc::RefCountedObject<MockFrameDecryptor>()); new rtc::RefCountedObject<MockFrameDecryptor>());
new_config_0.frame_decryptor = mock_frame_decryptor_0; new_config_0.frame_decryptor = mock_frame_decryptor_0;
recv_stream->Reconfigure(new_config_0); recv_stream->Reconfigure(new_config_0);
auto new_config_1 = helper.config(); auto new_config_1 = helper.config();
rtc::scoped_refptr<FrameDecryptorInterface> mock_frame_decryptor_1( rtc::scoped_refptr<FrameDecryptorInterface> mock_frame_decryptor_1(
new rtc::RefCountedObject<MockFrameDecryptor>()); new rtc::RefCountedObject<MockFrameDecryptor>());
new_config_1.frame_decryptor = mock_frame_decryptor_1; new_config_1.frame_decryptor = mock_frame_decryptor_1;
new_config_1.crypto_options.sframe.require_frame_encryption = true; new_config_1.crypto_options.sframe.require_frame_encryption = true;
recv_stream->Reconfigure(new_config_1); recv_stream->Reconfigure(new_config_1);
}
} }
} // namespace test } // namespace test

View File

@ -490,9 +490,11 @@ webrtc::AudioSendStream::Stats AudioSendStream::GetStats(
stats.typing_noise_detected = audio_state()->typing_noise_detected(); stats.typing_noise_detected = audio_state()->typing_noise_detected();
stats.ana_statistics = channel_send_->GetANAStatistics(); stats.ana_statistics = channel_send_->GetANAStatistics();
RTC_DCHECK(audio_state_->audio_processing());
stats.apm_statistics = AudioProcessing* ap = audio_state_->audio_processing();
audio_state_->audio_processing()->GetStatistics(has_remote_tracks); if (ap) {
stats.apm_statistics = ap->GetStatistics(has_remote_tracks);
}
stats.report_block_datas = std::move(call_stats.report_block_datas); stats.report_block_datas = std::move(call_stats.report_block_datas);

View File

@ -141,11 +141,16 @@ rtc::scoped_refptr<MockAudioEncoderFactory> SetupEncoderFactoryMock() {
} }
struct ConfigHelper { struct ConfigHelper {
ConfigHelper(bool audio_bwe_enabled, bool expect_set_encoder_call) ConfigHelper(bool audio_bwe_enabled,
bool expect_set_encoder_call,
bool use_null_audio_processing)
: clock_(1000000), : clock_(1000000),
task_queue_factory_(CreateDefaultTaskQueueFactory()), task_queue_factory_(CreateDefaultTaskQueueFactory()),
stream_config_(/*send_transport=*/nullptr), stream_config_(/*send_transport=*/nullptr),
audio_processing_(new rtc::RefCountedObject<MockAudioProcessing>()), audio_processing_(
use_null_audio_processing
? nullptr
: new rtc::RefCountedObject<MockAudioProcessing>()),
bitrate_allocator_(&limit_observer_), bitrate_allocator_(&limit_observer_),
worker_queue_(task_queue_factory_->CreateTaskQueue( worker_queue_(task_queue_factory_->CreateTaskQueue(
"ConfigHelper_worker_queue", "ConfigHelper_worker_queue",
@ -273,7 +278,7 @@ struct ConfigHelper {
.WillOnce(Return(true)); .WillOnce(Return(true));
} }
void SetupMockForGetStats() { void SetupMockForGetStats(bool use_null_audio_processing) {
using ::testing::DoAll; using ::testing::DoAll;
using ::testing::SetArgPointee; using ::testing::SetArgPointee;
using ::testing::SetArgReferee; using ::testing::SetArgReferee;
@ -305,10 +310,13 @@ struct ConfigHelper {
audio_processing_stats_.residual_echo_likelihood = kResidualEchoLikelihood; audio_processing_stats_.residual_echo_likelihood = kResidualEchoLikelihood;
audio_processing_stats_.residual_echo_likelihood_recent_max = audio_processing_stats_.residual_echo_likelihood_recent_max =
kResidualEchoLikelihoodMax; kResidualEchoLikelihoodMax;
if (!use_null_audio_processing) {
EXPECT_CALL(*audio_processing_, GetStatistics(true)) ASSERT_TRUE(audio_processing_);
.WillRepeatedly(Return(audio_processing_stats_)); EXPECT_CALL(*audio_processing_, GetStatistics(true))
.WillRepeatedly(Return(audio_processing_stats_));
}
} }
TaskQueueForTest* worker() { return &worker_queue_; } TaskQueueForTest* worker() { return &worker_queue_; }
private: private:
@ -381,235 +389,270 @@ TEST(AudioSendStreamTest, ConfigToString) {
} }
TEST(AudioSendStreamTest, ConstructDestruct) { TEST(AudioSendStreamTest, ConstructDestruct) {
ConfigHelper helper(false, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(false, true, use_null_audio_processing);
auto send_stream = helper.CreateAudioSendStream();
}
} }
TEST(AudioSendStreamTest, SendTelephoneEvent) { TEST(AudioSendStreamTest, SendTelephoneEvent) {
ConfigHelper helper(false, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(false, true, use_null_audio_processing);
helper.SetupMockForSendTelephoneEvent(); auto send_stream = helper.CreateAudioSendStream();
EXPECT_TRUE(send_stream->SendTelephoneEvent( helper.SetupMockForSendTelephoneEvent();
kTelephoneEventPayloadType, kTelephoneEventPayloadFrequency, EXPECT_TRUE(send_stream->SendTelephoneEvent(
kTelephoneEventCode, kTelephoneEventDuration)); kTelephoneEventPayloadType, kTelephoneEventPayloadFrequency,
kTelephoneEventCode, kTelephoneEventDuration));
}
} }
TEST(AudioSendStreamTest, SetMuted) { TEST(AudioSendStreamTest, SetMuted) {
ConfigHelper helper(false, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(false, true, use_null_audio_processing);
EXPECT_CALL(*helper.channel_send(), SetInputMute(true)); auto send_stream = helper.CreateAudioSendStream();
send_stream->SetMuted(true); EXPECT_CALL(*helper.channel_send(), SetInputMute(true));
send_stream->SetMuted(true);
}
} }
TEST(AudioSendStreamTest, AudioBweCorrectObjectsOnChannelProxy) { TEST(AudioSendStreamTest, AudioBweCorrectObjectsOnChannelProxy) {
ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/"); ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/");
ConfigHelper helper(true, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(true, true, use_null_audio_processing);
auto send_stream = helper.CreateAudioSendStream();
}
} }
TEST(AudioSendStreamTest, NoAudioBweCorrectObjectsOnChannelProxy) { TEST(AudioSendStreamTest, NoAudioBweCorrectObjectsOnChannelProxy) {
ConfigHelper helper(false, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(false, true, use_null_audio_processing);
auto send_stream = helper.CreateAudioSendStream();
}
} }
TEST(AudioSendStreamTest, GetStats) { TEST(AudioSendStreamTest, GetStats) {
ConfigHelper helper(false, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(false, true, use_null_audio_processing);
helper.SetupMockForGetStats(); auto send_stream = helper.CreateAudioSendStream();
AudioSendStream::Stats stats = send_stream->GetStats(true); helper.SetupMockForGetStats(use_null_audio_processing);
EXPECT_EQ(kSsrc, stats.local_ssrc); AudioSendStream::Stats stats = send_stream->GetStats(true);
EXPECT_EQ(kCallStats.payload_bytes_sent, stats.payload_bytes_sent); EXPECT_EQ(kSsrc, stats.local_ssrc);
EXPECT_EQ(kCallStats.header_and_padding_bytes_sent, EXPECT_EQ(kCallStats.payload_bytes_sent, stats.payload_bytes_sent);
stats.header_and_padding_bytes_sent); EXPECT_EQ(kCallStats.header_and_padding_bytes_sent,
EXPECT_EQ(kCallStats.packetsSent, stats.packets_sent); stats.header_and_padding_bytes_sent);
EXPECT_EQ(kReportBlock.cumulative_num_packets_lost, stats.packets_lost); EXPECT_EQ(kCallStats.packetsSent, stats.packets_sent);
EXPECT_EQ(Q8ToFloat(kReportBlock.fraction_lost), stats.fraction_lost); EXPECT_EQ(kReportBlock.cumulative_num_packets_lost, stats.packets_lost);
EXPECT_EQ(kIsacFormat.name, stats.codec_name); EXPECT_EQ(Q8ToFloat(kReportBlock.fraction_lost), stats.fraction_lost);
EXPECT_EQ(static_cast<int32_t>(kReportBlock.interarrival_jitter / EXPECT_EQ(kIsacFormat.name, stats.codec_name);
(kIsacFormat.clockrate_hz / 1000)), EXPECT_EQ(static_cast<int32_t>(kReportBlock.interarrival_jitter /
stats.jitter_ms); (kIsacFormat.clockrate_hz / 1000)),
EXPECT_EQ(kCallStats.rttMs, stats.rtt_ms); stats.jitter_ms);
EXPECT_EQ(0, stats.audio_level); EXPECT_EQ(kCallStats.rttMs, stats.rtt_ms);
EXPECT_EQ(0, stats.total_input_energy); EXPECT_EQ(0, stats.audio_level);
EXPECT_EQ(0, stats.total_input_duration); EXPECT_EQ(0, stats.total_input_energy);
EXPECT_EQ(kEchoDelayMedian, stats.apm_statistics.delay_median_ms); EXPECT_EQ(0, stats.total_input_duration);
EXPECT_EQ(kEchoDelayStdDev, stats.apm_statistics.delay_standard_deviation_ms);
EXPECT_EQ(kEchoReturnLoss, stats.apm_statistics.echo_return_loss); if (!use_null_audio_processing) {
EXPECT_EQ(kEchoReturnLossEnhancement, EXPECT_EQ(kEchoDelayMedian, stats.apm_statistics.delay_median_ms);
stats.apm_statistics.echo_return_loss_enhancement); EXPECT_EQ(kEchoDelayStdDev,
EXPECT_EQ(kDivergentFilterFraction, stats.apm_statistics.delay_standard_deviation_ms);
stats.apm_statistics.divergent_filter_fraction); EXPECT_EQ(kEchoReturnLoss, stats.apm_statistics.echo_return_loss);
EXPECT_EQ(kResidualEchoLikelihood, EXPECT_EQ(kEchoReturnLossEnhancement,
stats.apm_statistics.residual_echo_likelihood); stats.apm_statistics.echo_return_loss_enhancement);
EXPECT_EQ(kResidualEchoLikelihoodMax, EXPECT_EQ(kDivergentFilterFraction,
stats.apm_statistics.residual_echo_likelihood_recent_max); stats.apm_statistics.divergent_filter_fraction);
EXPECT_FALSE(stats.typing_noise_detected); EXPECT_EQ(kResidualEchoLikelihood,
stats.apm_statistics.residual_echo_likelihood);
EXPECT_EQ(kResidualEchoLikelihoodMax,
stats.apm_statistics.residual_echo_likelihood_recent_max);
EXPECT_FALSE(stats.typing_noise_detected);
}
}
} }
TEST(AudioSendStreamTest, GetStatsAudioLevel) { TEST(AudioSendStreamTest, GetStatsAudioLevel) {
ConfigHelper helper(false, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(false, true, use_null_audio_processing);
helper.SetupMockForGetStats(); auto send_stream = helper.CreateAudioSendStream();
EXPECT_CALL(*helper.channel_send(), ProcessAndEncodeAudioForMock(_)) helper.SetupMockForGetStats(use_null_audio_processing);
.Times(AnyNumber()); EXPECT_CALL(*helper.channel_send(), ProcessAndEncodeAudioForMock(_))
.Times(AnyNumber());
constexpr int kSampleRateHz = 48000; constexpr int kSampleRateHz = 48000;
constexpr size_t kNumChannels = 1; constexpr size_t kNumChannels = 1;
constexpr int16_t kSilentAudioLevel = 0; constexpr int16_t kSilentAudioLevel = 0;
constexpr int16_t kMaxAudioLevel = 32767; // Audio level is [0,32767]. constexpr int16_t kMaxAudioLevel = 32767; // Audio level is [0,32767].
constexpr int kAudioFrameDurationMs = 10; constexpr int kAudioFrameDurationMs = 10;
// Process 10 audio frames (100 ms) of silence. After this, on the next // Process 10 audio frames (100 ms) of silence. After this, on the next
// (11-th) frame, the audio level will be updated with the maximum audio level // (11-th) frame, the audio level will be updated with the maximum audio
// of the first 11 frames. See AudioLevel. // level of the first 11 frames. See AudioLevel.
for (size_t i = 0; i < 10; ++i) { for (size_t i = 0; i < 10; ++i) {
send_stream->SendAudioData(CreateAudioFrame1kHzSineWave( send_stream->SendAudioData(
kSilentAudioLevel, kAudioFrameDurationMs, kSampleRateHz, kNumChannels)); CreateAudioFrame1kHzSineWave(kSilentAudioLevel, kAudioFrameDurationMs,
kSampleRateHz, kNumChannels));
}
AudioSendStream::Stats stats = send_stream->GetStats();
EXPECT_EQ(kSilentAudioLevel, stats.audio_level);
EXPECT_NEAR(0.0f, stats.total_input_energy, kTolerance);
EXPECT_NEAR(0.1f, stats.total_input_duration,
kTolerance); // 100 ms = 0.1 s
// Process 10 audio frames (100 ms) of maximum audio level.
// Note that AudioLevel updates the audio level every 11th frame, processing
// 10 frames above was needed to see a non-zero audio level here.
for (size_t i = 0; i < 10; ++i) {
send_stream->SendAudioData(CreateAudioFrame1kHzSineWave(
kMaxAudioLevel, kAudioFrameDurationMs, kSampleRateHz, kNumChannels));
}
stats = send_stream->GetStats();
EXPECT_EQ(kMaxAudioLevel, stats.audio_level);
// Energy increases by energy*duration, where energy is audio level in
// [0,1].
EXPECT_NEAR(0.1f, stats.total_input_energy, kTolerance); // 0.1 s of max
EXPECT_NEAR(0.2f, stats.total_input_duration,
kTolerance); // 200 ms = 0.2 s
} }
AudioSendStream::Stats stats = send_stream->GetStats();
EXPECT_EQ(kSilentAudioLevel, stats.audio_level);
EXPECT_NEAR(0.0f, stats.total_input_energy, kTolerance);
EXPECT_NEAR(0.1f, stats.total_input_duration, kTolerance); // 100 ms = 0.1 s
// Process 10 audio frames (100 ms) of maximum audio level.
// Note that AudioLevel updates the audio level every 11th frame, processing
// 10 frames above was needed to see a non-zero audio level here.
for (size_t i = 0; i < 10; ++i) {
send_stream->SendAudioData(CreateAudioFrame1kHzSineWave(
kMaxAudioLevel, kAudioFrameDurationMs, kSampleRateHz, kNumChannels));
}
stats = send_stream->GetStats();
EXPECT_EQ(kMaxAudioLevel, stats.audio_level);
// Energy increases by energy*duration, where energy is audio level in [0,1].
EXPECT_NEAR(0.1f, stats.total_input_energy, kTolerance); // 0.1 s of max
EXPECT_NEAR(0.2f, stats.total_input_duration, kTolerance); // 200 ms = 0.2 s
} }
TEST(AudioSendStreamTest, SendCodecAppliesAudioNetworkAdaptor) { TEST(AudioSendStreamTest, SendCodecAppliesAudioNetworkAdaptor) {
ConfigHelper helper(false, true); for (bool use_null_audio_processing : {false, true}) {
helper.config().send_codec_spec = ConfigHelper helper(false, true, use_null_audio_processing);
AudioSendStream::Config::SendCodecSpec(0, kOpusFormat); helper.config().send_codec_spec =
const std::string kAnaConfigString = "abcde"; AudioSendStream::Config::SendCodecSpec(0, kOpusFormat);
const std::string kAnaReconfigString = "12345"; const std::string kAnaConfigString = "abcde";
const std::string kAnaReconfigString = "12345";
helper.config().rtp.extensions.push_back(RtpExtension( helper.config().rtp.extensions.push_back(RtpExtension(
RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberId)); RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberId));
helper.config().audio_network_adaptor_config = kAnaConfigString; helper.config().audio_network_adaptor_config = kAnaConfigString;
EXPECT_CALL(helper.mock_encoder_factory(), MakeAudioEncoderMock(_, _, _, _)) EXPECT_CALL(helper.mock_encoder_factory(), MakeAudioEncoderMock(_, _, _, _))
.WillOnce(Invoke([&kAnaConfigString, &kAnaReconfigString]( .WillOnce(Invoke([&kAnaConfigString, &kAnaReconfigString](
int payload_type, const SdpAudioFormat& format, int payload_type, const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id, absl::optional<AudioCodecPairId> codec_pair_id,
std::unique_ptr<AudioEncoder>* return_value) { std::unique_ptr<AudioEncoder>* return_value) {
auto mock_encoder = SetupAudioEncoderMock(payload_type, format); auto mock_encoder = SetupAudioEncoderMock(payload_type, format);
EXPECT_CALL(*mock_encoder, EXPECT_CALL(*mock_encoder,
EnableAudioNetworkAdaptor(StrEq(kAnaConfigString), _)) EnableAudioNetworkAdaptor(StrEq(kAnaConfigString), _))
.WillOnce(Return(true)); .WillOnce(Return(true));
EXPECT_CALL(*mock_encoder, EXPECT_CALL(*mock_encoder,
EnableAudioNetworkAdaptor(StrEq(kAnaReconfigString), _)) EnableAudioNetworkAdaptor(StrEq(kAnaReconfigString), _))
.WillOnce(Return(true)); .WillOnce(Return(true));
*return_value = std::move(mock_encoder); *return_value = std::move(mock_encoder);
})); }));
auto send_stream = helper.CreateAudioSendStream(); auto send_stream = helper.CreateAudioSendStream();
auto stream_config = helper.config(); auto stream_config = helper.config();
stream_config.audio_network_adaptor_config = kAnaReconfigString; stream_config.audio_network_adaptor_config = kAnaReconfigString;
send_stream->Reconfigure(stream_config); send_stream->Reconfigure(stream_config);
}
} }
// VAD is applied when codec is mono and the CNG frequency matches the codec // VAD is applied when codec is mono and the CNG frequency matches the codec
// clock rate. // clock rate.
TEST(AudioSendStreamTest, SendCodecCanApplyVad) { TEST(AudioSendStreamTest, SendCodecCanApplyVad) {
ConfigHelper helper(false, false); for (bool use_null_audio_processing : {false, true}) {
helper.config().send_codec_spec = ConfigHelper helper(false, false, use_null_audio_processing);
AudioSendStream::Config::SendCodecSpec(9, kG722Format); helper.config().send_codec_spec =
helper.config().send_codec_spec->cng_payload_type = 105; AudioSendStream::Config::SendCodecSpec(9, kG722Format);
using ::testing::Invoke; helper.config().send_codec_spec->cng_payload_type = 105;
std::unique_ptr<AudioEncoder> stolen_encoder; using ::testing::Invoke;
EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _)) std::unique_ptr<AudioEncoder> stolen_encoder;
.WillOnce( EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
Invoke([&stolen_encoder](int payload_type, .WillOnce(
std::unique_ptr<AudioEncoder>* encoder) { Invoke([&stolen_encoder](int payload_type,
stolen_encoder = std::move(*encoder); std::unique_ptr<AudioEncoder>* encoder) {
return true; stolen_encoder = std::move(*encoder);
})); return true;
EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000)); }));
EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000));
auto send_stream = helper.CreateAudioSendStream(); auto send_stream = helper.CreateAudioSendStream();
// We cannot truly determine if the encoder created is an AudioEncoderCng. It // We cannot truly determine if the encoder created is an AudioEncoderCng.
// is the only reasonable implementation that will return something from // It is the only reasonable implementation that will return something from
// ReclaimContainedEncoders, though. // ReclaimContainedEncoders, though.
ASSERT_TRUE(stolen_encoder); ASSERT_TRUE(stolen_encoder);
EXPECT_FALSE(stolen_encoder->ReclaimContainedEncoders().empty()); EXPECT_FALSE(stolen_encoder->ReclaimContainedEncoders().empty());
}
} }
TEST(AudioSendStreamTest, DoesNotPassHigherBitrateThanMaxBitrate) { TEST(AudioSendStreamTest, DoesNotPassHigherBitrateThanMaxBitrate) {
ConfigHelper helper(false, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(false, true, use_null_audio_processing);
EXPECT_CALL(*helper.channel_send(), auto send_stream = helper.CreateAudioSendStream();
OnBitrateAllocation(Field( EXPECT_CALL(
&BitrateAllocationUpdate::target_bitrate, *helper.channel_send(),
OnBitrateAllocation(
Field(&BitrateAllocationUpdate::target_bitrate,
Eq(DataRate::BitsPerSec(helper.config().max_bitrate_bps))))); Eq(DataRate::BitsPerSec(helper.config().max_bitrate_bps)))));
BitrateAllocationUpdate update; BitrateAllocationUpdate update;
update.target_bitrate = update.target_bitrate =
DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000); DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000);
update.packet_loss_ratio = 0; update.packet_loss_ratio = 0;
update.round_trip_time = TimeDelta::Millis(50); update.round_trip_time = TimeDelta::Millis(50);
update.bwe_period = TimeDelta::Millis(6000); update.bwe_period = TimeDelta::Millis(6000);
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); }, helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
RTC_FROM_HERE); RTC_FROM_HERE);
}
} }
TEST(AudioSendStreamTest, SSBweTargetInRangeRespected) { TEST(AudioSendStreamTest, SSBweTargetInRangeRespected) {
ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/"); ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/");
ConfigHelper helper(true, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(true, true, use_null_audio_processing);
EXPECT_CALL( auto send_stream = helper.CreateAudioSendStream();
*helper.channel_send(), EXPECT_CALL(
OnBitrateAllocation(Field( *helper.channel_send(),
&BitrateAllocationUpdate::target_bitrate, OnBitrateAllocation(Field(
Eq(DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000))))); &BitrateAllocationUpdate::target_bitrate,
BitrateAllocationUpdate update; Eq(DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000)))));
update.target_bitrate = BitrateAllocationUpdate update;
DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000); update.target_bitrate =
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); }, DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000);
RTC_FROM_HERE); helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
RTC_FROM_HERE);
}
} }
TEST(AudioSendStreamTest, SSBweFieldTrialMinRespected) { TEST(AudioSendStreamTest, SSBweFieldTrialMinRespected) {
ScopedFieldTrials field_trials( ScopedFieldTrials field_trials(
"WebRTC-Audio-SendSideBwe/Enabled/" "WebRTC-Audio-SendSideBwe/Enabled/"
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/"); "WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
ConfigHelper helper(true, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(true, true, use_null_audio_processing);
EXPECT_CALL( auto send_stream = helper.CreateAudioSendStream();
*helper.channel_send(), EXPECT_CALL(
OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate, *helper.channel_send(),
Eq(DataRate::KilobitsPerSec(6))))); OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate,
BitrateAllocationUpdate update; Eq(DataRate::KilobitsPerSec(6)))));
update.target_bitrate = DataRate::KilobitsPerSec(1); BitrateAllocationUpdate update;
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); }, update.target_bitrate = DataRate::KilobitsPerSec(1);
RTC_FROM_HERE); helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
RTC_FROM_HERE);
}
} }
TEST(AudioSendStreamTest, SSBweFieldTrialMaxRespected) { TEST(AudioSendStreamTest, SSBweFieldTrialMaxRespected) {
ScopedFieldTrials field_trials( ScopedFieldTrials field_trials(
"WebRTC-Audio-SendSideBwe/Enabled/" "WebRTC-Audio-SendSideBwe/Enabled/"
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/"); "WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
ConfigHelper helper(true, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(true, true, use_null_audio_processing);
EXPECT_CALL( auto send_stream = helper.CreateAudioSendStream();
*helper.channel_send(), EXPECT_CALL(
OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate, *helper.channel_send(),
Eq(DataRate::KilobitsPerSec(64))))); OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate,
BitrateAllocationUpdate update; Eq(DataRate::KilobitsPerSec(64)))));
update.target_bitrate = DataRate::KilobitsPerSec(128); BitrateAllocationUpdate update;
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); }, update.target_bitrate = DataRate::KilobitsPerSec(128);
RTC_FROM_HERE); helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
RTC_FROM_HERE);
}
} }
TEST(AudioSendStreamTest, SSBweWithOverhead) { TEST(AudioSendStreamTest, SSBweWithOverhead) {
@ -617,19 +660,22 @@ TEST(AudioSendStreamTest, SSBweWithOverhead) {
"WebRTC-Audio-SendSideBwe/Enabled/" "WebRTC-Audio-SendSideBwe/Enabled/"
"WebRTC-SendSideBwe-WithOverhead/Enabled/" "WebRTC-SendSideBwe-WithOverhead/Enabled/"
"WebRTC-Audio-LegacyOverhead/Disabled/"); "WebRTC-Audio-LegacyOverhead/Disabled/");
ConfigHelper helper(true, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(true, true, use_null_audio_processing);
EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1); auto send_stream = helper.CreateAudioSendStream();
send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>()); EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
const DataRate bitrate = send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
DataRate::BitsPerSec(helper.config().max_bitrate_bps) + kMaxOverheadRate; const DataRate bitrate =
EXPECT_CALL(*helper.channel_send(), DataRate::BitsPerSec(helper.config().max_bitrate_bps) +
OnBitrateAllocation(Field( kMaxOverheadRate;
&BitrateAllocationUpdate::target_bitrate, Eq(bitrate)))); EXPECT_CALL(*helper.channel_send(),
BitrateAllocationUpdate update; OnBitrateAllocation(Field(
update.target_bitrate = bitrate; &BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); }, BitrateAllocationUpdate update;
RTC_FROM_HERE); update.target_bitrate = bitrate;
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
RTC_FROM_HERE);
}
} }
TEST(AudioSendStreamTest, SSBweWithOverheadMinRespected) { TEST(AudioSendStreamTest, SSBweWithOverheadMinRespected) {
@ -638,18 +684,20 @@ TEST(AudioSendStreamTest, SSBweWithOverheadMinRespected) {
"WebRTC-SendSideBwe-WithOverhead/Enabled/" "WebRTC-SendSideBwe-WithOverhead/Enabled/"
"WebRTC-Audio-LegacyOverhead/Disabled/" "WebRTC-Audio-LegacyOverhead/Disabled/"
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/"); "WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
ConfigHelper helper(true, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(true, true, use_null_audio_processing);
EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1); auto send_stream = helper.CreateAudioSendStream();
send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>()); EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
const DataRate bitrate = DataRate::KilobitsPerSec(6) + kMinOverheadRate; send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
EXPECT_CALL(*helper.channel_send(), const DataRate bitrate = DataRate::KilobitsPerSec(6) + kMinOverheadRate;
OnBitrateAllocation(Field( EXPECT_CALL(*helper.channel_send(),
&BitrateAllocationUpdate::target_bitrate, Eq(bitrate)))); OnBitrateAllocation(Field(
BitrateAllocationUpdate update; &BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
update.target_bitrate = DataRate::KilobitsPerSec(1); BitrateAllocationUpdate update;
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); }, update.target_bitrate = DataRate::KilobitsPerSec(1);
RTC_FROM_HERE); helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
RTC_FROM_HERE);
}
} }
TEST(AudioSendStreamTest, SSBweWithOverheadMaxRespected) { TEST(AudioSendStreamTest, SSBweWithOverheadMaxRespected) {
@ -658,152 +706,172 @@ TEST(AudioSendStreamTest, SSBweWithOverheadMaxRespected) {
"WebRTC-SendSideBwe-WithOverhead/Enabled/" "WebRTC-SendSideBwe-WithOverhead/Enabled/"
"WebRTC-Audio-LegacyOverhead/Disabled/" "WebRTC-Audio-LegacyOverhead/Disabled/"
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/"); "WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
ConfigHelper helper(true, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(true, true, use_null_audio_processing);
EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1); auto send_stream = helper.CreateAudioSendStream();
send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>()); EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
const DataRate bitrate = DataRate::KilobitsPerSec(64) + kMaxOverheadRate; send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
EXPECT_CALL(*helper.channel_send(), const DataRate bitrate = DataRate::KilobitsPerSec(64) + kMaxOverheadRate;
OnBitrateAllocation(Field( EXPECT_CALL(*helper.channel_send(),
&BitrateAllocationUpdate::target_bitrate, Eq(bitrate)))); OnBitrateAllocation(Field(
BitrateAllocationUpdate update; &BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
update.target_bitrate = DataRate::KilobitsPerSec(128); BitrateAllocationUpdate update;
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); }, update.target_bitrate = DataRate::KilobitsPerSec(128);
RTC_FROM_HERE); helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
RTC_FROM_HERE);
}
} }
TEST(AudioSendStreamTest, ProbingIntervalOnBitrateUpdated) { TEST(AudioSendStreamTest, ProbingIntervalOnBitrateUpdated) {
ConfigHelper helper(false, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(false, true, use_null_audio_processing);
auto send_stream = helper.CreateAudioSendStream();
EXPECT_CALL(*helper.channel_send(), EXPECT_CALL(*helper.channel_send(),
OnBitrateAllocation(Field(&BitrateAllocationUpdate::bwe_period, OnBitrateAllocation(Field(&BitrateAllocationUpdate::bwe_period,
Eq(TimeDelta::Millis(5000))))); Eq(TimeDelta::Millis(5000)))));
BitrateAllocationUpdate update; BitrateAllocationUpdate update;
update.target_bitrate = update.target_bitrate =
DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000); DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000);
update.packet_loss_ratio = 0; update.packet_loss_ratio = 0;
update.round_trip_time = TimeDelta::Millis(50); update.round_trip_time = TimeDelta::Millis(50);
update.bwe_period = TimeDelta::Millis(5000); update.bwe_period = TimeDelta::Millis(5000);
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); }, helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
RTC_FROM_HERE); RTC_FROM_HERE);
}
} }
// Test that AudioSendStream doesn't recreate the encoder unnecessarily. // Test that AudioSendStream doesn't recreate the encoder unnecessarily.
TEST(AudioSendStreamTest, DontRecreateEncoder) { TEST(AudioSendStreamTest, DontRecreateEncoder) {
ConfigHelper helper(false, false); for (bool use_null_audio_processing : {false, true}) {
// WillOnce is (currently) the default used by ConfigHelper if asked to set an ConfigHelper helper(false, false, use_null_audio_processing);
// expectation for SetEncoder. Since this behavior is essential for this test // WillOnce is (currently) the default used by ConfigHelper if asked to set
// to be correct, it's instead set-up manually here. Otherwise a simple change // an expectation for SetEncoder. Since this behavior is essential for this
// to ConfigHelper (say to WillRepeatedly) would silently make this test // test to be correct, it's instead set-up manually here. Otherwise a simple
// useless. // change to ConfigHelper (say to WillRepeatedly) would silently make this
EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _)) // test useless.
.WillOnce(Return()); EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
.WillOnce(Return());
EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000)); EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000));
helper.config().send_codec_spec = helper.config().send_codec_spec =
AudioSendStream::Config::SendCodecSpec(9, kG722Format); AudioSendStream::Config::SendCodecSpec(9, kG722Format);
helper.config().send_codec_spec->cng_payload_type = 105; helper.config().send_codec_spec->cng_payload_type = 105;
auto send_stream = helper.CreateAudioSendStream(); auto send_stream = helper.CreateAudioSendStream();
send_stream->Reconfigure(helper.config()); send_stream->Reconfigure(helper.config());
}
} }
TEST(AudioSendStreamTest, ReconfigureTransportCcResetsFirst) { TEST(AudioSendStreamTest, ReconfigureTransportCcResetsFirst) {
ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/"); ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/");
ConfigHelper helper(false, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(false, true, use_null_audio_processing);
auto new_config = helper.config(); auto send_stream = helper.CreateAudioSendStream();
ConfigHelper::AddBweToConfig(&new_config); auto new_config = helper.config();
ConfigHelper::AddBweToConfig(&new_config);
EXPECT_CALL(*helper.rtp_rtcp(), EXPECT_CALL(*helper.rtp_rtcp(),
RegisterRtpHeaderExtension(TransportSequenceNumber::kUri, RegisterRtpHeaderExtension(TransportSequenceNumber::kUri,
kTransportSequenceNumberId)) kTransportSequenceNumberId))
.Times(1);
{
::testing::InSequence seq;
EXPECT_CALL(*helper.channel_send(), ResetSenderCongestionControlObjects())
.Times(1);
EXPECT_CALL(*helper.channel_send(), RegisterSenderCongestionControlObjects(
helper.transport(), Ne(nullptr)))
.Times(1); .Times(1);
{
::testing::InSequence seq;
EXPECT_CALL(*helper.channel_send(), ResetSenderCongestionControlObjects())
.Times(1);
EXPECT_CALL(*helper.channel_send(),
RegisterSenderCongestionControlObjects(helper.transport(),
Ne(nullptr)))
.Times(1);
}
send_stream->Reconfigure(new_config);
} }
send_stream->Reconfigure(new_config);
} }
TEST(AudioSendStreamTest, OnTransportOverheadChanged) { TEST(AudioSendStreamTest, OnTransportOverheadChanged) {
ConfigHelper helper(false, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(false, true, use_null_audio_processing);
auto new_config = helper.config(); auto send_stream = helper.CreateAudioSendStream();
auto new_config = helper.config();
// CallEncoder will be called on overhead change. // CallEncoder will be called on overhead change.
EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(1); EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(1);
const size_t transport_overhead_per_packet_bytes = 333; const size_t transport_overhead_per_packet_bytes = 333;
send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes); send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
EXPECT_EQ(transport_overhead_per_packet_bytes, EXPECT_EQ(transport_overhead_per_packet_bytes,
send_stream->TestOnlyGetPerPacketOverheadBytes()); send_stream->TestOnlyGetPerPacketOverheadBytes());
}
} }
TEST(AudioSendStreamTest, OnAudioOverheadChanged) { TEST(AudioSendStreamTest, OnAudioOverheadChanged) {
ConfigHelper helper(false, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(false, true, use_null_audio_processing);
auto new_config = helper.config(); auto send_stream = helper.CreateAudioSendStream();
auto new_config = helper.config();
// CallEncoder will be called on overhead change. // CallEncoder will be called on overhead change.
EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(1); EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(1);
const size_t audio_overhead_per_packet_bytes = 555; const size_t audio_overhead_per_packet_bytes = 555;
send_stream->OnOverheadChanged(audio_overhead_per_packet_bytes); send_stream->OnOverheadChanged(audio_overhead_per_packet_bytes);
EXPECT_EQ(audio_overhead_per_packet_bytes, EXPECT_EQ(audio_overhead_per_packet_bytes,
send_stream->TestOnlyGetPerPacketOverheadBytes()); send_stream->TestOnlyGetPerPacketOverheadBytes());
}
} }
TEST(AudioSendStreamTest, OnAudioAndTransportOverheadChanged) { TEST(AudioSendStreamTest, OnAudioAndTransportOverheadChanged) {
ConfigHelper helper(false, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(false, true, use_null_audio_processing);
auto new_config = helper.config(); auto send_stream = helper.CreateAudioSendStream();
auto new_config = helper.config();
// CallEncoder will be called when each of overhead changes. // CallEncoder will be called when each of overhead changes.
EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(2); EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(2);
const size_t transport_overhead_per_packet_bytes = 333; const size_t transport_overhead_per_packet_bytes = 333;
send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes); send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
const size_t audio_overhead_per_packet_bytes = 555; const size_t audio_overhead_per_packet_bytes = 555;
send_stream->OnOverheadChanged(audio_overhead_per_packet_bytes); send_stream->OnOverheadChanged(audio_overhead_per_packet_bytes);
EXPECT_EQ( EXPECT_EQ(
transport_overhead_per_packet_bytes + audio_overhead_per_packet_bytes, transport_overhead_per_packet_bytes + audio_overhead_per_packet_bytes,
send_stream->TestOnlyGetPerPacketOverheadBytes()); send_stream->TestOnlyGetPerPacketOverheadBytes());
}
} }
// Validates that reconfiguring the AudioSendStream with a Frame encryptor // Validates that reconfiguring the AudioSendStream with a Frame encryptor
// correctly reconfigures on the object without crashing. // correctly reconfigures on the object without crashing.
TEST(AudioSendStreamTest, ReconfigureWithFrameEncryptor) { TEST(AudioSendStreamTest, ReconfigureWithFrameEncryptor) {
ConfigHelper helper(false, true); for (bool use_null_audio_processing : {false, true}) {
auto send_stream = helper.CreateAudioSendStream(); ConfigHelper helper(false, true, use_null_audio_processing);
auto new_config = helper.config(); auto send_stream = helper.CreateAudioSendStream();
auto new_config = helper.config();
rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_0( rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_0(
new rtc::RefCountedObject<MockFrameEncryptor>()); new rtc::RefCountedObject<MockFrameEncryptor>());
new_config.frame_encryptor = mock_frame_encryptor_0; new_config.frame_encryptor = mock_frame_encryptor_0;
EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr))).Times(1); EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr)))
send_stream->Reconfigure(new_config); .Times(1);
send_stream->Reconfigure(new_config);
// Not updating the frame encryptor shouldn't force it to reconfigure. // Not updating the frame encryptor shouldn't force it to reconfigure.
EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(_)).Times(0); EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(_)).Times(0);
send_stream->Reconfigure(new_config); send_stream->Reconfigure(new_config);
// Updating frame encryptor to a new object should force a call to the proxy. // Updating frame encryptor to a new object should force a call to the
rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_1( // proxy.
new rtc::RefCountedObject<MockFrameEncryptor>()); rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_1(
new_config.frame_encryptor = mock_frame_encryptor_1; new rtc::RefCountedObject<MockFrameEncryptor>());
new_config.crypto_options.sframe.require_frame_encryption = true; new_config.frame_encryptor = mock_frame_encryptor_1;
EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr))).Times(1); new_config.crypto_options.sframe.require_frame_encryption = true;
send_stream->Reconfigure(new_config); EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr)))
.Times(1);
send_stream->Reconfigure(new_config);
}
} }
} // namespace test } // namespace test
} // namespace webrtc } // namespace webrtc

View File

@ -41,7 +41,6 @@ AudioState::~AudioState() {
} }
AudioProcessing* AudioState::audio_processing() { AudioProcessing* AudioState::audio_processing() {
RTC_DCHECK(config_.audio_processing);
return config_.audio_processing.get(); return config_.audio_processing.get();
} }

View File

@ -31,10 +31,14 @@ constexpr int kSampleRate = 16000;
constexpr int kNumberOfChannels = 1; constexpr int kNumberOfChannels = 1;
struct ConfigHelper { struct ConfigHelper {
ConfigHelper() : audio_mixer(AudioMixerImpl::Create()) { explicit ConfigHelper(bool use_null_audio_processing)
: audio_mixer(AudioMixerImpl::Create()) {
audio_state_config.audio_mixer = audio_mixer; audio_state_config.audio_mixer = audio_mixer;
audio_state_config.audio_processing = audio_state_config.audio_processing =
new rtc::RefCountedObject<testing::NiceMock<MockAudioProcessing>>(); use_null_audio_processing
? nullptr
: new rtc::RefCountedObject<
testing::NiceMock<MockAudioProcessing>>();
audio_state_config.audio_device_module = audio_state_config.audio_device_module =
new rtc::RefCountedObject<MockAudioDeviceModule>(); new rtc::RefCountedObject<MockAudioDeviceModule>();
} }
@ -88,162 +92,183 @@ std::vector<uint32_t> ComputeChannelLevels(AudioFrame* audio_frame) {
} // namespace } // namespace
TEST(AudioStateTest, Create) { TEST(AudioStateTest, Create) {
ConfigHelper helper; for (bool use_null_audio_processing : {false, true}) {
auto audio_state = AudioState::Create(helper.config()); ConfigHelper helper(use_null_audio_processing);
EXPECT_TRUE(audio_state.get()); auto audio_state = AudioState::Create(helper.config());
EXPECT_TRUE(audio_state.get());
}
} }
TEST(AudioStateTest, ConstructDestruct) { TEST(AudioStateTest, ConstructDestruct) {
ConfigHelper helper; for (bool use_null_audio_processing : {false, true}) {
rtc::scoped_refptr<internal::AudioState> audio_state( ConfigHelper helper(use_null_audio_processing);
new rtc::RefCountedObject<internal::AudioState>(helper.config())); rtc::scoped_refptr<internal::AudioState> audio_state(
new rtc::RefCountedObject<internal::AudioState>(helper.config()));
}
} }
TEST(AudioStateTest, RecordedAudioArrivesAtSingleStream) { TEST(AudioStateTest, RecordedAudioArrivesAtSingleStream) {
ConfigHelper helper; for (bool use_null_audio_processing : {false, true}) {
rtc::scoped_refptr<internal::AudioState> audio_state( ConfigHelper helper(use_null_audio_processing);
new rtc::RefCountedObject<internal::AudioState>(helper.config())); rtc::scoped_refptr<internal::AudioState> audio_state(
new rtc::RefCountedObject<internal::AudioState>(helper.config()));
MockAudioSendStream stream; MockAudioSendStream stream;
audio_state->AddSendingStream(&stream, 8000, 2); audio_state->AddSendingStream(&stream, 8000, 2);
EXPECT_CALL( EXPECT_CALL(
stream, stream,
SendAudioDataForMock(::testing::AllOf( SendAudioDataForMock(::testing::AllOf(
::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(8000)), ::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(8000)),
::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(2u))))) ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(2u)))))
.WillOnce( .WillOnce(
// Verify that channels are not swapped by default. // Verify that channels are not swapped by default.
::testing::Invoke([](AudioFrame* audio_frame) { ::testing::Invoke([](AudioFrame* audio_frame) {
auto levels = ComputeChannelLevels(audio_frame); auto levels = ComputeChannelLevels(audio_frame);
EXPECT_LT(0u, levels[0]); EXPECT_LT(0u, levels[0]);
EXPECT_EQ(0u, levels[1]); EXPECT_EQ(0u, levels[1]);
})); }));
MockAudioProcessing* ap = MockAudioProcessing* ap = use_null_audio_processing
static_cast<MockAudioProcessing*>(audio_state->audio_processing()); ? nullptr
EXPECT_CALL(*ap, set_stream_delay_ms(0)); : static_cast<MockAudioProcessing*>(
EXPECT_CALL(*ap, set_stream_key_pressed(false)); audio_state->audio_processing());
EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher<int16_t*>(_))); if (ap) {
EXPECT_CALL(*ap, set_stream_delay_ms(0));
EXPECT_CALL(*ap, set_stream_key_pressed(false));
EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher<int16_t*>(_)));
}
constexpr int kSampleRate = 16000; constexpr int kSampleRate = 16000;
constexpr size_t kNumChannels = 2; constexpr size_t kNumChannels = 2;
auto audio_data = Create10msTestData(kSampleRate, kNumChannels); auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
uint32_t new_mic_level = 667; uint32_t new_mic_level = 667;
audio_state->audio_transport()->RecordedDataIsAvailable( audio_state->audio_transport()->RecordedDataIsAvailable(
&audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels, &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
kSampleRate, 0, 0, 0, false, new_mic_level); kSampleRate, 0, 0, 0, false, new_mic_level);
EXPECT_EQ(667u, new_mic_level); EXPECT_EQ(667u, new_mic_level);
audio_state->RemoveSendingStream(&stream); audio_state->RemoveSendingStream(&stream);
}
} }
TEST(AudioStateTest, RecordedAudioArrivesAtMultipleStreams) { TEST(AudioStateTest, RecordedAudioArrivesAtMultipleStreams) {
ConfigHelper helper; for (bool use_null_audio_processing : {false, true}) {
rtc::scoped_refptr<internal::AudioState> audio_state( ConfigHelper helper(use_null_audio_processing);
new rtc::RefCountedObject<internal::AudioState>(helper.config())); rtc::scoped_refptr<internal::AudioState> audio_state(
new rtc::RefCountedObject<internal::AudioState>(helper.config()));
MockAudioSendStream stream_1; MockAudioSendStream stream_1;
MockAudioSendStream stream_2; MockAudioSendStream stream_2;
audio_state->AddSendingStream(&stream_1, 8001, 2); audio_state->AddSendingStream(&stream_1, 8001, 2);
audio_state->AddSendingStream(&stream_2, 32000, 1); audio_state->AddSendingStream(&stream_2, 32000, 1);
EXPECT_CALL( EXPECT_CALL(
stream_1, stream_1,
SendAudioDataForMock(::testing::AllOf( SendAudioDataForMock(::testing::AllOf(
::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(16000)), ::testing::Field(&AudioFrame::sample_rate_hz_,
::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u))))) ::testing::Eq(16000)),
.WillOnce( ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u)))))
// Verify that there is output signal. .WillOnce(
::testing::Invoke([](AudioFrame* audio_frame) { // Verify that there is output signal.
auto levels = ComputeChannelLevels(audio_frame); ::testing::Invoke([](AudioFrame* audio_frame) {
EXPECT_LT(0u, levels[0]); auto levels = ComputeChannelLevels(audio_frame);
})); EXPECT_LT(0u, levels[0]);
EXPECT_CALL( }));
stream_2, EXPECT_CALL(
SendAudioDataForMock(::testing::AllOf( stream_2,
::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(16000)), SendAudioDataForMock(::testing::AllOf(
::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u))))) ::testing::Field(&AudioFrame::sample_rate_hz_,
.WillOnce( ::testing::Eq(16000)),
// Verify that there is output signal. ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u)))))
::testing::Invoke([](AudioFrame* audio_frame) { .WillOnce(
auto levels = ComputeChannelLevels(audio_frame); // Verify that there is output signal.
EXPECT_LT(0u, levels[0]); ::testing::Invoke([](AudioFrame* audio_frame) {
})); auto levels = ComputeChannelLevels(audio_frame);
MockAudioProcessing* ap = EXPECT_LT(0u, levels[0]);
static_cast<MockAudioProcessing*>(audio_state->audio_processing()); }));
EXPECT_CALL(*ap, set_stream_delay_ms(5)); MockAudioProcessing* ap =
EXPECT_CALL(*ap, set_stream_key_pressed(true)); static_cast<MockAudioProcessing*>(audio_state->audio_processing());
EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher<int16_t*>(_))); if (ap) {
EXPECT_CALL(*ap, set_stream_delay_ms(5));
EXPECT_CALL(*ap, set_stream_key_pressed(true));
EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher<int16_t*>(_)));
}
constexpr int kSampleRate = 16000; constexpr int kSampleRate = 16000;
constexpr size_t kNumChannels = 1; constexpr size_t kNumChannels = 1;
auto audio_data = Create10msTestData(kSampleRate, kNumChannels); auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
uint32_t new_mic_level = 667; uint32_t new_mic_level = 667;
audio_state->audio_transport()->RecordedDataIsAvailable( audio_state->audio_transport()->RecordedDataIsAvailable(
&audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels, &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
kSampleRate, 5, 0, 0, true, new_mic_level); kSampleRate, 5, 0, 0, true, new_mic_level);
EXPECT_EQ(667u, new_mic_level); EXPECT_EQ(667u, new_mic_level);
audio_state->RemoveSendingStream(&stream_1); audio_state->RemoveSendingStream(&stream_1);
audio_state->RemoveSendingStream(&stream_2); audio_state->RemoveSendingStream(&stream_2);
}
} }
TEST(AudioStateTest, EnableChannelSwap) { TEST(AudioStateTest, EnableChannelSwap) {
constexpr int kSampleRate = 16000; constexpr int kSampleRate = 16000;
constexpr size_t kNumChannels = 2; constexpr size_t kNumChannels = 2;
ConfigHelper helper; for (bool use_null_audio_processing : {false, true}) {
rtc::scoped_refptr<internal::AudioState> audio_state( ConfigHelper helper(use_null_audio_processing);
new rtc::RefCountedObject<internal::AudioState>(helper.config())); rtc::scoped_refptr<internal::AudioState> audio_state(
new rtc::RefCountedObject<internal::AudioState>(helper.config()));
audio_state->SetStereoChannelSwapping(true); audio_state->SetStereoChannelSwapping(true);
MockAudioSendStream stream; MockAudioSendStream stream;
audio_state->AddSendingStream(&stream, kSampleRate, kNumChannels); audio_state->AddSendingStream(&stream, kSampleRate, kNumChannels);
EXPECT_CALL(stream, SendAudioDataForMock(_)) EXPECT_CALL(stream, SendAudioDataForMock(_))
.WillOnce( .WillOnce(
// Verify that channels are swapped. // Verify that channels are swapped.
::testing::Invoke([](AudioFrame* audio_frame) { ::testing::Invoke([](AudioFrame* audio_frame) {
auto levels = ComputeChannelLevels(audio_frame); auto levels = ComputeChannelLevels(audio_frame);
EXPECT_EQ(0u, levels[0]); EXPECT_EQ(0u, levels[0]);
EXPECT_LT(0u, levels[1]); EXPECT_LT(0u, levels[1]);
})); }));
auto audio_data = Create10msTestData(kSampleRate, kNumChannels); auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
uint32_t new_mic_level = 667; uint32_t new_mic_level = 667;
audio_state->audio_transport()->RecordedDataIsAvailable( audio_state->audio_transport()->RecordedDataIsAvailable(
&audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels, &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
kSampleRate, 0, 0, 0, false, new_mic_level); kSampleRate, 0, 0, 0, false, new_mic_level);
EXPECT_EQ(667u, new_mic_level); EXPECT_EQ(667u, new_mic_level);
audio_state->RemoveSendingStream(&stream); audio_state->RemoveSendingStream(&stream);
}
} }
TEST(AudioStateTest, TEST(AudioStateTest,
QueryingTransportForAudioShouldResultInGetAudioCallOnMixerSource) { QueryingTransportForAudioShouldResultInGetAudioCallOnMixerSource) {
ConfigHelper helper; for (bool use_null_audio_processing : {false, true}) {
auto audio_state = AudioState::Create(helper.config()); ConfigHelper helper(use_null_audio_processing);
auto audio_state = AudioState::Create(helper.config());
FakeAudioSource fake_source; FakeAudioSource fake_source;
helper.mixer()->AddSource(&fake_source); helper.mixer()->AddSource(&fake_source);
EXPECT_CALL(fake_source, GetAudioFrameWithInfo(_, _)) EXPECT_CALL(fake_source, GetAudioFrameWithInfo(_, _))
.WillOnce( .WillOnce(
::testing::Invoke([](int sample_rate_hz, AudioFrame* audio_frame) { ::testing::Invoke([](int sample_rate_hz, AudioFrame* audio_frame) {
audio_frame->sample_rate_hz_ = sample_rate_hz; audio_frame->sample_rate_hz_ = sample_rate_hz;
audio_frame->samples_per_channel_ = sample_rate_hz / 100; audio_frame->samples_per_channel_ = sample_rate_hz / 100;
audio_frame->num_channels_ = kNumberOfChannels; audio_frame->num_channels_ = kNumberOfChannels;
return AudioMixer::Source::AudioFrameInfo::kNormal; return AudioMixer::Source::AudioFrameInfo::kNormal;
})); }));
int16_t audio_buffer[kSampleRate / 100 * kNumberOfChannels]; int16_t audio_buffer[kSampleRate / 100 * kNumberOfChannels];
size_t n_samples_out; size_t n_samples_out;
int64_t elapsed_time_ms; int64_t elapsed_time_ms;
int64_t ntp_time_ms; int64_t ntp_time_ms;
audio_state->audio_transport()->NeedMorePlayData( audio_state->audio_transport()->NeedMorePlayData(
kSampleRate / 100, kNumberOfChannels * 2, kNumberOfChannels, kSampleRate, kSampleRate / 100, kNumberOfChannels * 2, kNumberOfChannels,
audio_buffer, n_samples_out, &elapsed_time_ms, &ntp_time_ms); kSampleRate, audio_buffer, n_samples_out, &elapsed_time_ms,
&ntp_time_ms);
}
} }
} // namespace test } // namespace test
} // namespace webrtc } // namespace webrtc

View File

@ -49,13 +49,15 @@ void ProcessCaptureFrame(uint32_t delay_ms,
bool swap_stereo_channels, bool swap_stereo_channels,
AudioProcessing* audio_processing, AudioProcessing* audio_processing,
AudioFrame* audio_frame) { AudioFrame* audio_frame) {
RTC_DCHECK(audio_processing);
RTC_DCHECK(audio_frame); RTC_DCHECK(audio_frame);
audio_processing->set_stream_delay_ms(delay_ms); if (audio_processing) {
audio_processing->set_stream_key_pressed(key_pressed); audio_processing->set_stream_delay_ms(delay_ms);
int error = ProcessAudioFrame(audio_processing, audio_frame); audio_processing->set_stream_key_pressed(key_pressed);
int error = ProcessAudioFrame(audio_processing, audio_frame);
RTC_DCHECK_EQ(0, error) << "ProcessStream() error: " << error;
}
RTC_DCHECK_EQ(0, error) << "ProcessStream() error: " << error;
if (swap_stereo_channels) { if (swap_stereo_channels) {
AudioFrameOperations::SwapStereoChannels(audio_frame); AudioFrameOperations::SwapStereoChannels(audio_frame);
} }
@ -85,7 +87,6 @@ AudioTransportImpl::AudioTransportImpl(AudioMixer* mixer,
AudioProcessing* audio_processing) AudioProcessing* audio_processing)
: audio_processing_(audio_processing), mixer_(mixer) { : audio_processing_(audio_processing), mixer_(mixer) {
RTC_DCHECK(mixer); RTC_DCHECK(mixer);
RTC_DCHECK(audio_processing);
} }
AudioTransportImpl::~AudioTransportImpl() {} AudioTransportImpl::~AudioTransportImpl() {}
@ -137,7 +138,8 @@ int32_t AudioTransportImpl::RecordedDataIsAvailable(
// if we're using this feature or not. // if we're using this feature or not.
// TODO(solenberg): GetConfig() takes a lock. Work around that. // TODO(solenberg): GetConfig() takes a lock. Work around that.
bool typing_detected = false; bool typing_detected = false;
if (audio_processing_->GetConfig().voice_detection.enabled) { if (audio_processing_ &&
audio_processing_->GetConfig().voice_detection.enabled) {
if (audio_frame->vad_activity_ != AudioFrame::kVadUnknown) { if (audio_frame->vad_activity_ != AudioFrame::kVadUnknown) {
bool vad_active = audio_frame->vad_activity_ == AudioFrame::kVadActive; bool vad_active = audio_frame->vad_activity_ == AudioFrame::kVadActive;
typing_detected = typing_detection_.Process(key_pressed, vad_active); typing_detected = typing_detection_.Process(key_pressed, vad_active);
@ -192,8 +194,11 @@ int32_t AudioTransportImpl::NeedMorePlayData(const size_t nSamples,
*elapsed_time_ms = mixed_frame_.elapsed_time_ms_; *elapsed_time_ms = mixed_frame_.elapsed_time_ms_;
*ntp_time_ms = mixed_frame_.ntp_time_ms_; *ntp_time_ms = mixed_frame_.ntp_time_ms_;
const auto error = ProcessReverseAudioFrame(audio_processing_, &mixed_frame_); if (audio_processing_) {
RTC_DCHECK_EQ(error, AudioProcessing::kNoError); const auto error =
ProcessReverseAudioFrame(audio_processing_, &mixed_frame_);
RTC_DCHECK_EQ(error, AudioProcessing::kNoError);
}
nSamplesOut = Resample(mixed_frame_, samplesPerSec, &render_resampler_, nSamplesOut = Resample(mixed_frame_, samplesPerSec, &render_resampler_,
static_cast<int16_t*>(audioSamples)); static_cast<int16_t*>(audioSamples));

View File

@ -35,13 +35,15 @@
namespace { namespace {
struct CallHelper { struct CallHelper {
CallHelper() { explicit CallHelper(bool use_null_audio_processing) {
task_queue_factory_ = webrtc::CreateDefaultTaskQueueFactory(); task_queue_factory_ = webrtc::CreateDefaultTaskQueueFactory();
webrtc::AudioState::Config audio_state_config; webrtc::AudioState::Config audio_state_config;
audio_state_config.audio_mixer = audio_state_config.audio_mixer =
new rtc::RefCountedObject<webrtc::test::MockAudioMixer>(); new rtc::RefCountedObject<webrtc::test::MockAudioMixer>();
audio_state_config.audio_processing = audio_state_config.audio_processing =
new rtc::RefCountedObject<webrtc::test::MockAudioProcessing>(); use_null_audio_processing
? nullptr
: new rtc::RefCountedObject<webrtc::test::MockAudioProcessing>();
audio_state_config.audio_device_module = audio_state_config.audio_device_module =
new rtc::RefCountedObject<webrtc::test::MockAudioDeviceModule>(); new rtc::RefCountedObject<webrtc::test::MockAudioDeviceModule>();
webrtc::Call::Config config(&event_log_); webrtc::Call::Config config(&event_log_);
@ -64,236 +66,261 @@ struct CallHelper {
namespace webrtc { namespace webrtc {
TEST(CallTest, ConstructDestruct) { TEST(CallTest, ConstructDestruct) {
CallHelper call; for (bool use_null_audio_processing : {false, true}) {
CallHelper call(use_null_audio_processing);
}
} }
TEST(CallTest, CreateDestroy_AudioSendStream) { TEST(CallTest, CreateDestroy_AudioSendStream) {
CallHelper call; for (bool use_null_audio_processing : {false, true}) {
MockTransport send_transport; CallHelper call(use_null_audio_processing);
AudioSendStream::Config config(&send_transport); MockTransport send_transport;
config.rtp.ssrc = 42; AudioSendStream::Config config(&send_transport);
AudioSendStream* stream = call->CreateAudioSendStream(config); config.rtp.ssrc = 42;
EXPECT_NE(stream, nullptr); AudioSendStream* stream = call->CreateAudioSendStream(config);
call->DestroyAudioSendStream(stream); EXPECT_NE(stream, nullptr);
call->DestroyAudioSendStream(stream);
}
} }
TEST(CallTest, CreateDestroy_AudioReceiveStream) { TEST(CallTest, CreateDestroy_AudioReceiveStream) {
CallHelper call; for (bool use_null_audio_processing : {false, true}) {
AudioReceiveStream::Config config; CallHelper call(use_null_audio_processing);
MockTransport rtcp_send_transport; AudioReceiveStream::Config config;
config.rtp.remote_ssrc = 42; MockTransport rtcp_send_transport;
config.rtcp_send_transport = &rtcp_send_transport; config.rtp.remote_ssrc = 42;
config.decoder_factory = config.rtcp_send_transport = &rtcp_send_transport;
new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>(); config.decoder_factory =
AudioReceiveStream* stream = call->CreateAudioReceiveStream(config); new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
EXPECT_NE(stream, nullptr); AudioReceiveStream* stream = call->CreateAudioReceiveStream(config);
call->DestroyAudioReceiveStream(stream); EXPECT_NE(stream, nullptr);
call->DestroyAudioReceiveStream(stream);
}
} }
TEST(CallTest, CreateDestroy_AudioSendStreams) { TEST(CallTest, CreateDestroy_AudioSendStreams) {
CallHelper call; for (bool use_null_audio_processing : {false, true}) {
MockTransport send_transport; CallHelper call(use_null_audio_processing);
AudioSendStream::Config config(&send_transport); MockTransport send_transport;
std::list<AudioSendStream*> streams; AudioSendStream::Config config(&send_transport);
for (int i = 0; i < 2; ++i) { std::list<AudioSendStream*> streams;
for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) { for (int i = 0; i < 2; ++i) {
config.rtp.ssrc = ssrc; for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
AudioSendStream* stream = call->CreateAudioSendStream(config); config.rtp.ssrc = ssrc;
EXPECT_NE(stream, nullptr); AudioSendStream* stream = call->CreateAudioSendStream(config);
if (ssrc & 1) { EXPECT_NE(stream, nullptr);
streams.push_back(stream); if (ssrc & 1) {
} else { streams.push_back(stream);
streams.push_front(stream); } else {
streams.push_front(stream);
}
} }
for (auto s : streams) {
call->DestroyAudioSendStream(s);
}
streams.clear();
} }
for (auto s : streams) {
call->DestroyAudioSendStream(s);
}
streams.clear();
} }
} }
TEST(CallTest, CreateDestroy_AudioReceiveStreams) { TEST(CallTest, CreateDestroy_AudioReceiveStreams) {
CallHelper call; for (bool use_null_audio_processing : {false, true}) {
AudioReceiveStream::Config config; CallHelper call(use_null_audio_processing);
MockTransport rtcp_send_transport; AudioReceiveStream::Config config;
config.rtcp_send_transport = &rtcp_send_transport; MockTransport rtcp_send_transport;
config.decoder_factory = config.rtcp_send_transport = &rtcp_send_transport;
new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>(); config.decoder_factory =
std::list<AudioReceiveStream*> streams; new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
for (int i = 0; i < 2; ++i) { std::list<AudioReceiveStream*> streams;
for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) { for (int i = 0; i < 2; ++i) {
config.rtp.remote_ssrc = ssrc; for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
AudioReceiveStream* stream = call->CreateAudioReceiveStream(config); config.rtp.remote_ssrc = ssrc;
EXPECT_NE(stream, nullptr); AudioReceiveStream* stream = call->CreateAudioReceiveStream(config);
if (ssrc & 1) { EXPECT_NE(stream, nullptr);
streams.push_back(stream); if (ssrc & 1) {
} else { streams.push_back(stream);
streams.push_front(stream); } else {
streams.push_front(stream);
}
} }
for (auto s : streams) {
call->DestroyAudioReceiveStream(s);
}
streams.clear();
} }
for (auto s : streams) {
call->DestroyAudioReceiveStream(s);
}
streams.clear();
} }
} }
TEST(CallTest, CreateDestroy_AssociateAudioSendReceiveStreams_RecvFirst) { TEST(CallTest, CreateDestroy_AssociateAudioSendReceiveStreams_RecvFirst) {
CallHelper call; for (bool use_null_audio_processing : {false, true}) {
AudioReceiveStream::Config recv_config; CallHelper call(use_null_audio_processing);
MockTransport rtcp_send_transport; AudioReceiveStream::Config recv_config;
recv_config.rtp.remote_ssrc = 42; MockTransport rtcp_send_transport;
recv_config.rtp.local_ssrc = 777; recv_config.rtp.remote_ssrc = 42;
recv_config.rtcp_send_transport = &rtcp_send_transport; recv_config.rtp.local_ssrc = 777;
recv_config.decoder_factory = recv_config.rtcp_send_transport = &rtcp_send_transport;
new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>(); recv_config.decoder_factory =
AudioReceiveStream* recv_stream = call->CreateAudioReceiveStream(recv_config); new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
EXPECT_NE(recv_stream, nullptr); AudioReceiveStream* recv_stream =
call->CreateAudioReceiveStream(recv_config);
EXPECT_NE(recv_stream, nullptr);
MockTransport send_transport; MockTransport send_transport;
AudioSendStream::Config send_config(&send_transport); AudioSendStream::Config send_config(&send_transport);
send_config.rtp.ssrc = 777; send_config.rtp.ssrc = 777;
AudioSendStream* send_stream = call->CreateAudioSendStream(send_config); AudioSendStream* send_stream = call->CreateAudioSendStream(send_config);
EXPECT_NE(send_stream, nullptr); EXPECT_NE(send_stream, nullptr);
internal::AudioReceiveStream* internal_recv_stream = internal::AudioReceiveStream* internal_recv_stream =
static_cast<internal::AudioReceiveStream*>(recv_stream); static_cast<internal::AudioReceiveStream*>(recv_stream);
EXPECT_EQ(send_stream, EXPECT_EQ(send_stream,
internal_recv_stream->GetAssociatedSendStreamForTesting()); internal_recv_stream->GetAssociatedSendStreamForTesting());
call->DestroyAudioSendStream(send_stream); call->DestroyAudioSendStream(send_stream);
EXPECT_EQ(nullptr, internal_recv_stream->GetAssociatedSendStreamForTesting()); EXPECT_EQ(nullptr,
internal_recv_stream->GetAssociatedSendStreamForTesting());
call->DestroyAudioReceiveStream(recv_stream); call->DestroyAudioReceiveStream(recv_stream);
}
} }
TEST(CallTest, CreateDestroy_AssociateAudioSendReceiveStreams_SendFirst) { TEST(CallTest, CreateDestroy_AssociateAudioSendReceiveStreams_SendFirst) {
CallHelper call; for (bool use_null_audio_processing : {false, true}) {
MockTransport send_transport; CallHelper call(use_null_audio_processing);
AudioSendStream::Config send_config(&send_transport); MockTransport send_transport;
send_config.rtp.ssrc = 777; AudioSendStream::Config send_config(&send_transport);
AudioSendStream* send_stream = call->CreateAudioSendStream(send_config); send_config.rtp.ssrc = 777;
EXPECT_NE(send_stream, nullptr); AudioSendStream* send_stream = call->CreateAudioSendStream(send_config);
EXPECT_NE(send_stream, nullptr);
AudioReceiveStream::Config recv_config; AudioReceiveStream::Config recv_config;
MockTransport rtcp_send_transport; MockTransport rtcp_send_transport;
recv_config.rtp.remote_ssrc = 42; recv_config.rtp.remote_ssrc = 42;
recv_config.rtp.local_ssrc = 777; recv_config.rtp.local_ssrc = 777;
recv_config.rtcp_send_transport = &rtcp_send_transport; recv_config.rtcp_send_transport = &rtcp_send_transport;
recv_config.decoder_factory = recv_config.decoder_factory =
new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>(); new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
AudioReceiveStream* recv_stream = call->CreateAudioReceiveStream(recv_config); AudioReceiveStream* recv_stream =
EXPECT_NE(recv_stream, nullptr); call->CreateAudioReceiveStream(recv_config);
EXPECT_NE(recv_stream, nullptr);
internal::AudioReceiveStream* internal_recv_stream = internal::AudioReceiveStream* internal_recv_stream =
static_cast<internal::AudioReceiveStream*>(recv_stream); static_cast<internal::AudioReceiveStream*>(recv_stream);
EXPECT_EQ(send_stream, EXPECT_EQ(send_stream,
internal_recv_stream->GetAssociatedSendStreamForTesting()); internal_recv_stream->GetAssociatedSendStreamForTesting());
call->DestroyAudioReceiveStream(recv_stream); call->DestroyAudioReceiveStream(recv_stream);
call->DestroyAudioSendStream(send_stream); call->DestroyAudioSendStream(send_stream);
}
} }
TEST(CallTest, CreateDestroy_FlexfecReceiveStream) { TEST(CallTest, CreateDestroy_FlexfecReceiveStream) {
CallHelper call; for (bool use_null_audio_processing : {false, true}) {
MockTransport rtcp_send_transport; CallHelper call(use_null_audio_processing);
FlexfecReceiveStream::Config config(&rtcp_send_transport); MockTransport rtcp_send_transport;
config.payload_type = 118; FlexfecReceiveStream::Config config(&rtcp_send_transport);
config.remote_ssrc = 38837212; config.payload_type = 118;
config.protected_media_ssrcs = {27273}; config.remote_ssrc = 38837212;
config.protected_media_ssrcs = {27273};
FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config); FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config);
EXPECT_NE(stream, nullptr); EXPECT_NE(stream, nullptr);
call->DestroyFlexfecReceiveStream(stream); call->DestroyFlexfecReceiveStream(stream);
}
} }
TEST(CallTest, CreateDestroy_FlexfecReceiveStreams) { TEST(CallTest, CreateDestroy_FlexfecReceiveStreams) {
CallHelper call; for (bool use_null_audio_processing : {false, true}) {
MockTransport rtcp_send_transport; CallHelper call(use_null_audio_processing);
FlexfecReceiveStream::Config config(&rtcp_send_transport); MockTransport rtcp_send_transport;
config.payload_type = 118; FlexfecReceiveStream::Config config(&rtcp_send_transport);
std::list<FlexfecReceiveStream*> streams; config.payload_type = 118;
std::list<FlexfecReceiveStream*> streams;
for (int i = 0; i < 2; ++i) { for (int i = 0; i < 2; ++i) {
for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) { for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
config.remote_ssrc = ssrc; config.remote_ssrc = ssrc;
config.protected_media_ssrcs = {ssrc + 1}; config.protected_media_ssrcs = {ssrc + 1};
FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config); FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config);
EXPECT_NE(stream, nullptr); EXPECT_NE(stream, nullptr);
if (ssrc & 1) { if (ssrc & 1) {
streams.push_back(stream); streams.push_back(stream);
} else { } else {
streams.push_front(stream); streams.push_front(stream);
}
} }
for (auto s : streams) {
call->DestroyFlexfecReceiveStream(s);
}
streams.clear();
} }
for (auto s : streams) {
call->DestroyFlexfecReceiveStream(s);
}
streams.clear();
} }
} }
TEST(CallTest, MultipleFlexfecReceiveStreamsProtectingSingleVideoStream) { TEST(CallTest, MultipleFlexfecReceiveStreamsProtectingSingleVideoStream) {
CallHelper call; for (bool use_null_audio_processing : {false, true}) {
MockTransport rtcp_send_transport; CallHelper call(use_null_audio_processing);
FlexfecReceiveStream::Config config(&rtcp_send_transport); MockTransport rtcp_send_transport;
config.payload_type = 118; FlexfecReceiveStream::Config config(&rtcp_send_transport);
config.protected_media_ssrcs = {1324234}; config.payload_type = 118;
FlexfecReceiveStream* stream; config.protected_media_ssrcs = {1324234};
std::list<FlexfecReceiveStream*> streams; FlexfecReceiveStream* stream;
std::list<FlexfecReceiveStream*> streams;
config.remote_ssrc = 838383; config.remote_ssrc = 838383;
stream = call->CreateFlexfecReceiveStream(config); stream = call->CreateFlexfecReceiveStream(config);
EXPECT_NE(stream, nullptr); EXPECT_NE(stream, nullptr);
streams.push_back(stream); streams.push_back(stream);
config.remote_ssrc = 424993; config.remote_ssrc = 424993;
stream = call->CreateFlexfecReceiveStream(config); stream = call->CreateFlexfecReceiveStream(config);
EXPECT_NE(stream, nullptr); EXPECT_NE(stream, nullptr);
streams.push_back(stream); streams.push_back(stream);
config.remote_ssrc = 99383; config.remote_ssrc = 99383;
stream = call->CreateFlexfecReceiveStream(config); stream = call->CreateFlexfecReceiveStream(config);
EXPECT_NE(stream, nullptr); EXPECT_NE(stream, nullptr);
streams.push_back(stream); streams.push_back(stream);
config.remote_ssrc = 5548; config.remote_ssrc = 5548;
stream = call->CreateFlexfecReceiveStream(config); stream = call->CreateFlexfecReceiveStream(config);
EXPECT_NE(stream, nullptr); EXPECT_NE(stream, nullptr);
streams.push_back(stream); streams.push_back(stream);
for (auto s : streams) { for (auto s : streams) {
call->DestroyFlexfecReceiveStream(s); call->DestroyFlexfecReceiveStream(s);
}
} }
} }
TEST(CallTest, RecreatingAudioStreamWithSameSsrcReusesRtpState) { TEST(CallTest, RecreatingAudioStreamWithSameSsrcReusesRtpState) {
constexpr uint32_t kSSRC = 12345; constexpr uint32_t kSSRC = 12345;
CallHelper call; for (bool use_null_audio_processing : {false, true}) {
CallHelper call(use_null_audio_processing);
auto create_stream_and_get_rtp_state = [&](uint32_t ssrc) { auto create_stream_and_get_rtp_state = [&](uint32_t ssrc) {
MockTransport send_transport; MockTransport send_transport;
AudioSendStream::Config config(&send_transport); AudioSendStream::Config config(&send_transport);
config.rtp.ssrc = ssrc; config.rtp.ssrc = ssrc;
AudioSendStream* stream = call->CreateAudioSendStream(config); AudioSendStream* stream = call->CreateAudioSendStream(config);
const RtpState rtp_state = const RtpState rtp_state =
static_cast<internal::AudioSendStream*>(stream)->GetRtpState(); static_cast<internal::AudioSendStream*>(stream)->GetRtpState();
call->DestroyAudioSendStream(stream); call->DestroyAudioSendStream(stream);
return rtp_state; return rtp_state;
}; };
const RtpState rtp_state1 = create_stream_and_get_rtp_state(kSSRC); const RtpState rtp_state1 = create_stream_and_get_rtp_state(kSSRC);
const RtpState rtp_state2 = create_stream_and_get_rtp_state(kSSRC); const RtpState rtp_state2 = create_stream_and_get_rtp_state(kSSRC);
EXPECT_EQ(rtp_state1.sequence_number, rtp_state2.sequence_number); EXPECT_EQ(rtp_state1.sequence_number, rtp_state2.sequence_number);
EXPECT_EQ(rtp_state1.start_timestamp, rtp_state2.start_timestamp); EXPECT_EQ(rtp_state1.start_timestamp, rtp_state2.start_timestamp);
EXPECT_EQ(rtp_state1.timestamp, rtp_state2.timestamp); EXPECT_EQ(rtp_state1.timestamp, rtp_state2.timestamp);
EXPECT_EQ(rtp_state1.capture_time_ms, rtp_state2.capture_time_ms); EXPECT_EQ(rtp_state1.capture_time_ms, rtp_state2.capture_time_ms);
EXPECT_EQ(rtp_state1.last_timestamp_time_ms, EXPECT_EQ(rtp_state1.last_timestamp_time_ms,
rtp_state2.last_timestamp_time_ms); rtp_state2.last_timestamp_time_ms);
EXPECT_EQ(rtp_state1.media_has_been_sent, rtp_state2.media_has_been_sent); EXPECT_EQ(rtp_state1.media_has_been_sent, rtp_state2.media_has_been_sent);
}
} }
} // namespace webrtc } // namespace webrtc

View File

@ -206,7 +206,6 @@ WebRtcVoiceEngine::WebRtcVoiceEngine(
RTC_LOG(LS_INFO) << "WebRtcVoiceEngine::WebRtcVoiceEngine"; RTC_LOG(LS_INFO) << "WebRtcVoiceEngine::WebRtcVoiceEngine";
RTC_DCHECK(decoder_factory); RTC_DCHECK(decoder_factory);
RTC_DCHECK(encoder_factory); RTC_DCHECK(encoder_factory);
RTC_DCHECK(audio_processing);
// The rest of our initialization will happen in Init. // The rest of our initialization will happen in Init.
} }
@ -458,6 +457,14 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
*options.audio_jitter_buffer_enable_rtx_handling; *options.audio_jitter_buffer_enable_rtx_handling;
} }
webrtc::AudioProcessing* ap = apm();
if (!ap) {
RTC_LOG(LS_INFO)
<< "No audio processing module present. No software-provided effects "
"(AEC, NS, AGC, ...) are activated";
return true;
}
webrtc::Config config; webrtc::Config config;
if (options.experimental_ns) { if (options.experimental_ns) {
@ -469,7 +476,7 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
new webrtc::ExperimentalNs(*experimental_ns_)); new webrtc::ExperimentalNs(*experimental_ns_));
} }
webrtc::AudioProcessing::Config apm_config = apm()->GetConfig(); webrtc::AudioProcessing::Config apm_config = ap->GetConfig();
if (options.echo_cancellation) { if (options.echo_cancellation) {
apm_config.echo_canceller.enabled = *options.echo_cancellation; apm_config.echo_canceller.enabled = *options.echo_cancellation;
@ -524,8 +531,8 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
apm_config.voice_detection.enabled = *options.typing_detection; apm_config.voice_detection.enabled = *options.typing_detection;
} }
apm()->SetExtraOptions(config); ap->SetExtraOptions(config);
apm()->ApplyConfig(apm_config); ap->ApplyConfig(apm_config);
return true; return true;
} }
@ -571,18 +578,34 @@ void WebRtcVoiceEngine::UnregisterChannel(WebRtcVoiceMediaChannel* channel) {
bool WebRtcVoiceEngine::StartAecDump(webrtc::FileWrapper file, bool WebRtcVoiceEngine::StartAecDump(webrtc::FileWrapper file,
int64_t max_size_bytes) { int64_t max_size_bytes) {
RTC_DCHECK(worker_thread_checker_.IsCurrent()); RTC_DCHECK(worker_thread_checker_.IsCurrent());
webrtc::AudioProcessing* ap = apm();
if (!ap) {
RTC_LOG(LS_WARNING)
<< "Attempting to start aecdump when no audio processing module is "
"present, hence no aecdump is started.";
return false;
}
auto aec_dump = webrtc::AecDumpFactory::Create( auto aec_dump = webrtc::AecDumpFactory::Create(
std::move(file), max_size_bytes, low_priority_worker_queue_.get()); std::move(file), max_size_bytes, low_priority_worker_queue_.get());
if (!aec_dump) { if (!aec_dump) {
return false; return false;
} }
apm()->AttachAecDump(std::move(aec_dump));
ap->AttachAecDump(std::move(aec_dump));
return true; return true;
} }
void WebRtcVoiceEngine::StopAecDump() { void WebRtcVoiceEngine::StopAecDump() {
RTC_DCHECK(worker_thread_checker_.IsCurrent()); RTC_DCHECK(worker_thread_checker_.IsCurrent());
apm()->DetachAecDump(); webrtc::AudioProcessing* ap = apm();
if (ap) {
ap->DetachAecDump();
} else {
RTC_LOG(LS_WARNING) << "Attempting to stop aecdump when no audio "
"processing module is present";
}
} }
webrtc::AudioDeviceModule* WebRtcVoiceEngine::adm() { webrtc::AudioDeviceModule* WebRtcVoiceEngine::adm() {
@ -593,7 +616,6 @@ webrtc::AudioDeviceModule* WebRtcVoiceEngine::adm() {
webrtc::AudioProcessing* WebRtcVoiceEngine::apm() const { webrtc::AudioProcessing* WebRtcVoiceEngine::apm() const {
RTC_DCHECK(worker_thread_checker_.IsCurrent()); RTC_DCHECK(worker_thread_checker_.IsCurrent());
RTC_DCHECK(apm_);
return apm_.get(); return apm_.get();
} }
@ -2141,7 +2163,10 @@ bool WebRtcVoiceMediaChannel::MuteStream(uint32_t ssrc, bool muted) {
for (const auto& kv : send_streams_) { for (const auto& kv : send_streams_) {
all_muted = all_muted && kv.second->muted(); all_muted = all_muted && kv.second->muted();
} }
engine()->apm()->set_output_will_be_muted(all_muted); webrtc::AudioProcessing* ap = engine()->apm();
if (ap) {
ap->set_output_will_be_muted(all_muted);
}
return true; return true;
} }

File diff suppressed because it is too large Load Diff

View File

@ -116,6 +116,7 @@ rtc_library("audio_processing") {
visibility = [ "*" ] visibility = [ "*" ]
configs += [ ":apm_debug_dump" ] configs += [ ":apm_debug_dump" ]
sources = [ sources = [
"audio_processing_builder_impl.cc",
"audio_processing_impl.cc", "audio_processing_impl.cc",
"audio_processing_impl.h", "audio_processing_impl.h",
"common.h", "common.h",
@ -169,6 +170,7 @@ rtc_library("audio_processing") {
"../../rtc_base:deprecation", "../../rtc_base:deprecation",
"../../rtc_base:gtest_prod", "../../rtc_base:gtest_prod",
"../../rtc_base:ignore_wundef", "../../rtc_base:ignore_wundef",
"../../rtc_base:refcount",
"../../rtc_base:safe_minmax", "../../rtc_base:safe_minmax",
"../../rtc_base:sanitizer", "../../rtc_base:sanitizer",
"../../rtc_base/system:rtc_export", "../../rtc_base/system:rtc_export",
@ -556,41 +558,6 @@ if (rtc_include_tests) {
} # audioproc_f_impl } # audioproc_f_impl
} }
rtc_library("audioproc_test_utils") {
visibility = [ "*" ]
testonly = true
sources = [
"test/audio_buffer_tools.cc",
"test/audio_buffer_tools.h",
"test/bitexactness_tools.cc",
"test/bitexactness_tools.h",
"test/performance_timer.cc",
"test/performance_timer.h",
"test/simulator_buffers.cc",
"test/simulator_buffers.h",
"test/test_utils.cc",
"test/test_utils.h",
]
deps = [
":api",
":audio_buffer",
":audio_processing",
"../../api:array_view",
"../../api/audio:audio_frame_api",
"../../common_audio",
"../../rtc_base:checks",
"../../rtc_base:rtc_base_approved",
"../../rtc_base/system:arch",
"../../system_wrappers",
"../../test:fileutils",
"../../test:test_support",
"../audio_coding:neteq_input_audio_tools",
"//testing/gtest",
"//third_party/abseil-cpp/absl/types:optional",
]
}
if (rtc_enable_protobuf) { if (rtc_enable_protobuf) {
proto_library("audioproc_unittest_proto") { proto_library("audioproc_unittest_proto") {
sources = [ "test/unittest.proto" ] sources = [ "test/unittest.proto" ]
@ -629,3 +596,42 @@ if (rtc_include_tests) {
} }
} }
} }
rtc_library("audioproc_test_utils") {
visibility = [ "*" ]
testonly = true
sources = [
"test/audio_buffer_tools.cc",
"test/audio_buffer_tools.h",
"test/audio_processing_builder_for_testing.cc",
"test/audio_processing_builder_for_testing.h",
"test/bitexactness_tools.cc",
"test/bitexactness_tools.h",
"test/performance_timer.cc",
"test/performance_timer.h",
"test/simulator_buffers.cc",
"test/simulator_buffers.h",
"test/test_utils.cc",
"test/test_utils.h",
]
configs += [ ":apm_debug_dump" ]
deps = [
":api",
":audio_buffer",
":audio_processing",
"../../api:array_view",
"../../api/audio:audio_frame_api",
"../../common_audio",
"../../rtc_base:checks",
"../../rtc_base:rtc_base_approved",
"../../rtc_base/system:arch",
"../../system_wrappers",
"../../test:fileutils",
"../../test:test_support",
"../audio_coding:neteq_input_audio_tools",
"//testing/gtest",
"//third_party/abseil-cpp/absl/types:optional",
]
}

View File

@ -20,31 +20,35 @@ rtc_source_set("aec_dump") {
] ]
} }
rtc_library("mock_aec_dump") { if (rtc_include_tests) {
testonly = true rtc_library("mock_aec_dump") {
sources = [ testonly = true
"mock_aec_dump.cc", sources = [
"mock_aec_dump.h", "mock_aec_dump.cc",
] "mock_aec_dump.h",
]
deps = [ deps = [
"../", "..:audioproc_test_utils",
"../../../test:test_support", "../",
] "../../../test:test_support",
} ]
}
rtc_library("mock_aec_dump_unittests") { rtc_library("mock_aec_dump_unittests") {
testonly = true testonly = true
configs += [ "..:apm_debug_dump" ] configs += [ "..:apm_debug_dump" ]
sources = [ "aec_dump_integration_test.cc" ] sources = [ "aec_dump_integration_test.cc" ]
deps = [ deps = [
":mock_aec_dump", ":mock_aec_dump",
"..:api", "..:api",
"../", "..:audioproc_test_utils",
"../../../rtc_base:rtc_base_approved", "../",
"//testing/gtest", "../../../rtc_base:rtc_base_approved",
] "//testing/gtest",
]
}
} }
if (rtc_enable_protobuf) { if (rtc_enable_protobuf) {
@ -75,20 +79,22 @@ if (rtc_enable_protobuf) {
deps += [ "../:audioproc_debug_proto" ] deps += [ "../:audioproc_debug_proto" ]
} }
rtc_library("aec_dump_unittests") { if (rtc_include_tests) {
testonly = true rtc_library("aec_dump_unittests") {
defines = [] testonly = true
deps = [ defines = []
":aec_dump", deps = [
":aec_dump_impl", ":aec_dump",
"..:audioproc_debug_proto", ":aec_dump_impl",
"../", "..:audioproc_debug_proto",
"../../../rtc_base:task_queue_for_test", "../",
"../../../test:fileutils", "../../../rtc_base:task_queue_for_test",
"../../../test:test_support", "../../../test:fileutils",
"//testing/gtest", "../../../test:test_support",
] "//testing/gtest",
sources = [ "aec_dump_unittest.cc" ] ]
sources = [ "aec_dump_unittest.cc" ]
}
} }
} }

View File

@ -15,6 +15,7 @@
#include "modules/audio_processing/aec_dump/mock_aec_dump.h" #include "modules/audio_processing/aec_dump/mock_aec_dump.h"
#include "modules/audio_processing/audio_processing_impl.h" #include "modules/audio_processing/audio_processing_impl.h"
#include "modules/audio_processing/include/audio_processing.h" #include "modules/audio_processing/include/audio_processing.h"
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
using ::testing::_; using ::testing::_;
using ::testing::AtLeast; using ::testing::AtLeast;
@ -25,7 +26,7 @@ namespace {
std::unique_ptr<webrtc::AudioProcessing> CreateAudioProcessing() { std::unique_ptr<webrtc::AudioProcessing> CreateAudioProcessing() {
webrtc::Config config; webrtc::Config config;
std::unique_ptr<webrtc::AudioProcessing> apm( std::unique_ptr<webrtc::AudioProcessing> apm(
webrtc::AudioProcessingBuilder().Create(config)); webrtc::AudioProcessingBuilderForTesting().Create(config));
RTC_DCHECK(apm); RTC_DCHECK(apm);
return apm; return apm;
} }

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_processing/include/audio_processing.h"
#include <memory>
#include "modules/audio_processing/audio_processing_impl.h"
#include "rtc_base/ref_counted_object.h"
namespace webrtc {
AudioProcessingBuilder::AudioProcessingBuilder() = default;
AudioProcessingBuilder::~AudioProcessingBuilder() = default;
AudioProcessing* AudioProcessingBuilder::Create() {
webrtc::Config config;
return Create(config);
}
AudioProcessing* AudioProcessingBuilder::Create(const webrtc::Config& config) {
#ifdef WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE
// Implementation returning a null pointer for using when the APM is excluded
// from the build..
return nullptr;
#else
// Standard implementation.
AudioProcessingImpl* apm = new rtc::RefCountedObject<AudioProcessingImpl>(
config, std::move(capture_post_processing_),
std::move(render_pre_processing_), std::move(echo_control_factory_),
std::move(echo_detector_), std::move(capture_analyzer_));
if (apm->Initialize() != AudioProcessing::kNoError) {
delete apm;
apm = nullptr;
}
return apm;
#endif
}
} // namespace webrtc

View File

@ -229,56 +229,6 @@ bool AudioProcessingImpl::SubmoduleStates::HighPassFilteringRequired() const {
noise_suppressor_enabled_; noise_suppressor_enabled_;
} }
AudioProcessingBuilder::AudioProcessingBuilder() = default;
AudioProcessingBuilder::~AudioProcessingBuilder() = default;
AudioProcessingBuilder& AudioProcessingBuilder::SetCapturePostProcessing(
std::unique_ptr<CustomProcessing> capture_post_processing) {
capture_post_processing_ = std::move(capture_post_processing);
return *this;
}
AudioProcessingBuilder& AudioProcessingBuilder::SetRenderPreProcessing(
std::unique_ptr<CustomProcessing> render_pre_processing) {
render_pre_processing_ = std::move(render_pre_processing);
return *this;
}
AudioProcessingBuilder& AudioProcessingBuilder::SetCaptureAnalyzer(
std::unique_ptr<CustomAudioAnalyzer> capture_analyzer) {
capture_analyzer_ = std::move(capture_analyzer);
return *this;
}
AudioProcessingBuilder& AudioProcessingBuilder::SetEchoControlFactory(
std::unique_ptr<EchoControlFactory> echo_control_factory) {
echo_control_factory_ = std::move(echo_control_factory);
return *this;
}
AudioProcessingBuilder& AudioProcessingBuilder::SetEchoDetector(
rtc::scoped_refptr<EchoDetector> echo_detector) {
echo_detector_ = std::move(echo_detector);
return *this;
}
AudioProcessing* AudioProcessingBuilder::Create() {
webrtc::Config config;
return Create(config);
}
AudioProcessing* AudioProcessingBuilder::Create(const webrtc::Config& config) {
AudioProcessingImpl* apm = new rtc::RefCountedObject<AudioProcessingImpl>(
config, std::move(capture_post_processing_),
std::move(render_pre_processing_), std::move(echo_control_factory_),
std::move(echo_detector_), std::move(capture_analyzer_));
if (apm->Initialize() != AudioProcessing::kNoError) {
delete apm;
apm = nullptr;
}
return apm;
}
AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config) AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config)
: AudioProcessingImpl(config, : AudioProcessingImpl(config,
/*capture_post_processor=*/nullptr, /*capture_post_processor=*/nullptr,

View File

@ -14,6 +14,7 @@
#include "api/array_view.h" #include "api/array_view.h"
#include "modules/audio_processing/audio_processing_impl.h" #include "modules/audio_processing/audio_processing_impl.h"
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include "modules/audio_processing/test/test_utils.h" #include "modules/audio_processing/test/test_utils.h"
#include "rtc_base/critical_section.h" #include "rtc_base/critical_section.h"
#include "rtc_base/event.h" #include "rtc_base/event.h"
@ -496,7 +497,7 @@ AudioProcessingImplLockTest::AudioProcessingImplLockTest()
this, this,
"stats", "stats",
rtc::kNormalPriority), rtc::kNormalPriority),
apm_(AudioProcessingBuilder().Create()), apm_(AudioProcessingBuilderForTesting().Create()),
render_thread_state_(kMaxFrameSize, render_thread_state_(kMaxFrameSize,
&rand_gen_, &rand_gen_,
&render_call_event_, &render_call_event_,

View File

@ -15,6 +15,7 @@
#include "api/scoped_refptr.h" #include "api/scoped_refptr.h"
#include "modules/audio_processing/include/audio_processing.h" #include "modules/audio_processing/include/audio_processing.h"
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include "modules/audio_processing/test/echo_control_mock.h" #include "modules/audio_processing/test/echo_control_mock.h"
#include "modules/audio_processing/test/test_utils.h" #include "modules/audio_processing/test/test_utils.h"
#include "rtc_base/checks.h" #include "rtc_base/checks.h"
@ -167,7 +168,8 @@ TEST(AudioProcessingImplTest, AudioParameterChangeTriggersInit) {
} }
TEST(AudioProcessingImplTest, UpdateCapturePreGainRuntimeSetting) { TEST(AudioProcessingImplTest, UpdateCapturePreGainRuntimeSetting) {
std::unique_ptr<AudioProcessing> apm(AudioProcessingBuilder().Create()); std::unique_ptr<AudioProcessing> apm(
AudioProcessingBuilderForTesting().Create());
webrtc::AudioProcessing::Config apm_config; webrtc::AudioProcessing::Config apm_config;
apm_config.pre_amplifier.enabled = true; apm_config.pre_amplifier.enabled = true;
apm_config.pre_amplifier.fixed_gain_factor = 1.f; apm_config.pre_amplifier.fixed_gain_factor = 1.f;
@ -205,7 +207,7 @@ TEST(AudioProcessingImplTest,
const auto* echo_control_factory_ptr = echo_control_factory.get(); const auto* echo_control_factory_ptr = echo_control_factory.get();
std::unique_ptr<AudioProcessing> apm( std::unique_ptr<AudioProcessing> apm(
AudioProcessingBuilder() AudioProcessingBuilderForTesting()
.SetEchoControlFactory(std::move(echo_control_factory)) .SetEchoControlFactory(std::move(echo_control_factory))
.Create()); .Create());
// Disable AGC. // Disable AGC.
@ -248,7 +250,7 @@ TEST(AudioProcessingImplTest,
const auto* echo_control_factory_ptr = echo_control_factory.get(); const auto* echo_control_factory_ptr = echo_control_factory.get();
std::unique_ptr<AudioProcessing> apm( std::unique_ptr<AudioProcessing> apm(
AudioProcessingBuilder() AudioProcessingBuilderForTesting()
.SetEchoControlFactory(std::move(echo_control_factory)) .SetEchoControlFactory(std::move(echo_control_factory))
.Create()); .Create());
webrtc::AudioProcessing::Config apm_config; webrtc::AudioProcessing::Config apm_config;
@ -294,7 +296,7 @@ TEST(AudioProcessingImplTest, EchoControllerObservesPlayoutVolumeChange) {
const auto* echo_control_factory_ptr = echo_control_factory.get(); const auto* echo_control_factory_ptr = echo_control_factory.get();
std::unique_ptr<AudioProcessing> apm( std::unique_ptr<AudioProcessing> apm(
AudioProcessingBuilder() AudioProcessingBuilderForTesting()
.SetEchoControlFactory(std::move(echo_control_factory)) .SetEchoControlFactory(std::move(echo_control_factory))
.Create()); .Create());
// Disable AGC. // Disable AGC.
@ -353,7 +355,7 @@ TEST(AudioProcessingImplTest, RenderPreProcessorBeforeEchoDetector) {
new TestRenderPreProcessor()); new TestRenderPreProcessor());
// Create APM injecting the test echo detector and render pre-processor. // Create APM injecting the test echo detector and render pre-processor.
std::unique_ptr<AudioProcessing> apm( std::unique_ptr<AudioProcessing> apm(
AudioProcessingBuilder() AudioProcessingBuilderForTesting()
.SetEchoDetector(test_echo_detector) .SetEchoDetector(test_echo_detector)
.SetRenderPreProcessing(std::move(test_render_pre_processor)) .SetRenderPreProcessing(std::move(test_render_pre_processor))
.Create()); .Create());

View File

@ -15,6 +15,7 @@
#include "api/array_view.h" #include "api/array_view.h"
#include "modules/audio_processing/audio_processing_impl.h" #include "modules/audio_processing/audio_processing_impl.h"
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include "modules/audio_processing/test/test_utils.h" #include "modules/audio_processing/test/test_utils.h"
#include "rtc_base/atomic_ops.h" #include "rtc_base/atomic_ops.h"
#include "rtc_base/event.h" #include "rtc_base/event.h"
@ -486,28 +487,28 @@ class CallSimulator : public ::testing::TestWithParam<SimulationConfig> {
int num_capture_channels = 1; int num_capture_channels = 1;
switch (simulation_config_.simulation_settings) { switch (simulation_config_.simulation_settings) {
case SettingsType::kDefaultApmMobile: { case SettingsType::kDefaultApmMobile: {
apm_.reset(AudioProcessingBuilder().Create()); apm_.reset(AudioProcessingBuilderForTesting().Create());
ASSERT_TRUE(!!apm_); ASSERT_TRUE(!!apm_);
set_default_mobile_apm_runtime_settings(apm_.get()); set_default_mobile_apm_runtime_settings(apm_.get());
break; break;
} }
case SettingsType::kDefaultApmDesktop: { case SettingsType::kDefaultApmDesktop: {
Config config; Config config;
apm_.reset(AudioProcessingBuilder().Create(config)); apm_.reset(AudioProcessingBuilderForTesting().Create(config));
ASSERT_TRUE(!!apm_); ASSERT_TRUE(!!apm_);
set_default_desktop_apm_runtime_settings(apm_.get()); set_default_desktop_apm_runtime_settings(apm_.get());
apm_->SetExtraOptions(config); apm_->SetExtraOptions(config);
break; break;
} }
case SettingsType::kAllSubmodulesTurnedOff: { case SettingsType::kAllSubmodulesTurnedOff: {
apm_.reset(AudioProcessingBuilder().Create()); apm_.reset(AudioProcessingBuilderForTesting().Create());
ASSERT_TRUE(!!apm_); ASSERT_TRUE(!!apm_);
turn_off_default_apm_runtime_settings(apm_.get()); turn_off_default_apm_runtime_settings(apm_.get());
break; break;
} }
case SettingsType::kDefaultApmDesktopWithoutDelayAgnostic: { case SettingsType::kDefaultApmDesktopWithoutDelayAgnostic: {
Config config; Config config;
apm_.reset(AudioProcessingBuilder().Create(config)); apm_.reset(AudioProcessingBuilderForTesting().Create(config));
ASSERT_TRUE(!!apm_); ASSERT_TRUE(!!apm_);
set_default_desktop_apm_runtime_settings(apm_.get()); set_default_desktop_apm_runtime_settings(apm_.get());
apm_->SetExtraOptions(config); apm_->SetExtraOptions(config);
@ -515,7 +516,7 @@ class CallSimulator : public ::testing::TestWithParam<SimulationConfig> {
} }
case SettingsType::kDefaultApmDesktopWithoutExtendedFilter: { case SettingsType::kDefaultApmDesktopWithoutExtendedFilter: {
Config config; Config config;
apm_.reset(AudioProcessingBuilder().Create(config)); apm_.reset(AudioProcessingBuilderForTesting().Create(config));
ASSERT_TRUE(!!apm_); ASSERT_TRUE(!!apm_);
set_default_desktop_apm_runtime_settings(apm_.get()); set_default_desktop_apm_runtime_settings(apm_.get());
apm_->SetExtraOptions(config); apm_->SetExtraOptions(config);

View File

@ -28,6 +28,7 @@
#include "modules/audio_processing/audio_processing_impl.h" #include "modules/audio_processing/audio_processing_impl.h"
#include "modules/audio_processing/common.h" #include "modules/audio_processing/common.h"
#include "modules/audio_processing/include/mock_audio_processing.h" #include "modules/audio_processing/include/mock_audio_processing.h"
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include "modules/audio_processing/test/protobuf_utils.h" #include "modules/audio_processing/test/protobuf_utils.h"
#include "modules/audio_processing/test/test_utils.h" #include "modules/audio_processing/test/test_utils.h"
#include "rtc_base/arraysize.h" #include "rtc_base/arraysize.h"
@ -426,7 +427,7 @@ ApmTest::ApmTest()
far_file_(NULL), far_file_(NULL),
near_file_(NULL), near_file_(NULL),
out_file_(NULL) { out_file_(NULL) {
apm_.reset(AudioProcessingBuilder().Create()); apm_.reset(AudioProcessingBuilderForTesting().Create());
AudioProcessing::Config apm_config = apm_->GetConfig(); AudioProcessing::Config apm_config = apm_->GetConfig();
apm_config.gain_controller1.analog_gain_controller.enabled = false; apm_config.gain_controller1.analog_gain_controller.enabled = false;
apm_config.pipeline.maximum_internal_processing_rate = 48000; apm_config.pipeline.maximum_internal_processing_rate = 48000;
@ -1176,7 +1177,7 @@ TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabledFloat) {
auto src_channels = &src[0]; auto src_channels = &src[0];
auto dest_channels = &dest[0]; auto dest_channels = &dest[0];
apm_.reset(AudioProcessingBuilder().Create()); apm_.reset(AudioProcessingBuilderForTesting().Create());
EXPECT_NOERR(apm_->ProcessStream(&src_channels, StreamConfig(sample_rate, 1), EXPECT_NOERR(apm_->ProcessStream(&src_channels, StreamConfig(sample_rate, 1),
StreamConfig(sample_rate, 1), StreamConfig(sample_rate, 1),
&dest_channels)); &dest_channels));
@ -1637,7 +1638,7 @@ TEST_F(ApmTest, Process) {
if (test->num_input_channels() != test->num_output_channels()) if (test->num_input_channels() != test->num_output_channels())
continue; continue;
apm_.reset(AudioProcessingBuilder().Create()); apm_.reset(AudioProcessingBuilderForTesting().Create());
AudioProcessing::Config apm_config = apm_->GetConfig(); AudioProcessing::Config apm_config = apm_->GetConfig();
apm_config.gain_controller1.analog_gain_controller.enabled = false; apm_config.gain_controller1.analog_gain_controller.enabled = false;
apm_->ApplyConfig(apm_config); apm_->ApplyConfig(apm_config);
@ -1806,7 +1807,8 @@ TEST_F(ApmTest, NoErrorsWithKeyboardChannel) {
{AudioProcessing::kStereoAndKeyboard, AudioProcessing::kStereo}, {AudioProcessing::kStereoAndKeyboard, AudioProcessing::kStereo},
}; };
std::unique_ptr<AudioProcessing> ap(AudioProcessingBuilder().Create()); std::unique_ptr<AudioProcessing> ap(
AudioProcessingBuilderForTesting().Create());
// Enable one component just to ensure some processing takes place. // Enable one component just to ensure some processing takes place.
AudioProcessing::Config config; AudioProcessing::Config config;
config.noise_suppression.enabled = true; config.noise_suppression.enabled = true;
@ -1932,7 +1934,8 @@ class AudioProcessingTest
size_t num_reverse_input_channels, size_t num_reverse_input_channels,
size_t num_reverse_output_channels, size_t num_reverse_output_channels,
const std::string& output_file_prefix) { const std::string& output_file_prefix) {
std::unique_ptr<AudioProcessing> ap(AudioProcessingBuilder().Create()); std::unique_ptr<AudioProcessing> ap(
AudioProcessingBuilderForTesting().Create());
AudioProcessing::Config apm_config = ap->GetConfig(); AudioProcessing::Config apm_config = ap->GetConfig();
apm_config.gain_controller1.analog_gain_controller.enabled = false; apm_config.gain_controller1.analog_gain_controller.enabled = false;
ap->ApplyConfig(apm_config); ap->ApplyConfig(apm_config);
@ -2316,7 +2319,8 @@ void RunApmRateAndChannelTest(
rtc::ArrayView<const int> sample_rates_hz, rtc::ArrayView<const int> sample_rates_hz,
rtc::ArrayView<const int> render_channel_counts, rtc::ArrayView<const int> render_channel_counts,
rtc::ArrayView<const int> capture_channel_counts) { rtc::ArrayView<const int> capture_channel_counts) {
std::unique_ptr<AudioProcessing> apm(AudioProcessingBuilder().Create()); std::unique_ptr<AudioProcessing> apm(
AudioProcessingBuilderForTesting().Create());
webrtc::AudioProcessing::Config apm_config; webrtc::AudioProcessing::Config apm_config;
apm_config.echo_canceller.enabled = true; apm_config.echo_canceller.enabled = true;
apm->ApplyConfig(apm_config); apm->ApplyConfig(apm_config);
@ -2455,7 +2459,7 @@ TEST(ApmConfiguration, EnablePostProcessing) {
auto mock_post_processor = auto mock_post_processor =
std::unique_ptr<CustomProcessing>(mock_post_processor_ptr); std::unique_ptr<CustomProcessing>(mock_post_processor_ptr);
rtc::scoped_refptr<AudioProcessing> apm = rtc::scoped_refptr<AudioProcessing> apm =
AudioProcessingBuilder() AudioProcessingBuilderForTesting()
.SetCapturePostProcessing(std::move(mock_post_processor)) .SetCapturePostProcessing(std::move(mock_post_processor))
.Create(); .Create();
@ -2477,7 +2481,7 @@ TEST(ApmConfiguration, EnablePreProcessing) {
auto mock_pre_processor = auto mock_pre_processor =
std::unique_ptr<CustomProcessing>(mock_pre_processor_ptr); std::unique_ptr<CustomProcessing>(mock_pre_processor_ptr);
rtc::scoped_refptr<AudioProcessing> apm = rtc::scoped_refptr<AudioProcessing> apm =
AudioProcessingBuilder() AudioProcessingBuilderForTesting()
.SetRenderPreProcessing(std::move(mock_pre_processor)) .SetRenderPreProcessing(std::move(mock_pre_processor))
.Create(); .Create();
@ -2499,7 +2503,7 @@ TEST(ApmConfiguration, EnableCaptureAnalyzer) {
auto mock_capture_analyzer = auto mock_capture_analyzer =
std::unique_ptr<CustomAudioAnalyzer>(mock_capture_analyzer_ptr); std::unique_ptr<CustomAudioAnalyzer>(mock_capture_analyzer_ptr);
rtc::scoped_refptr<AudioProcessing> apm = rtc::scoped_refptr<AudioProcessing> apm =
AudioProcessingBuilder() AudioProcessingBuilderForTesting()
.SetCaptureAnalyzer(std::move(mock_capture_analyzer)) .SetCaptureAnalyzer(std::move(mock_capture_analyzer))
.Create(); .Create();
@ -2520,7 +2524,7 @@ TEST(ApmConfiguration, PreProcessingReceivesRuntimeSettings) {
auto mock_pre_processor = auto mock_pre_processor =
std::unique_ptr<CustomProcessing>(mock_pre_processor_ptr); std::unique_ptr<CustomProcessing>(mock_pre_processor_ptr);
rtc::scoped_refptr<AudioProcessing> apm = rtc::scoped_refptr<AudioProcessing> apm =
AudioProcessingBuilder() AudioProcessingBuilderForTesting()
.SetRenderPreProcessing(std::move(mock_pre_processor)) .SetRenderPreProcessing(std::move(mock_pre_processor))
.Create(); .Create();
apm->SetRuntimeSetting( apm->SetRuntimeSetting(
@ -2565,7 +2569,7 @@ TEST(ApmConfiguration, EchoControlInjection) {
new MyEchoControlFactory()); new MyEchoControlFactory());
rtc::scoped_refptr<AudioProcessing> apm = rtc::scoped_refptr<AudioProcessing> apm =
AudioProcessingBuilder() AudioProcessingBuilderForTesting()
.SetEchoControlFactory(std::move(echo_control_factory)) .SetEchoControlFactory(std::move(echo_control_factory))
.Create(webrtc_config); .Create(webrtc_config);
@ -2589,7 +2593,7 @@ TEST(ApmConfiguration, EchoControlInjection) {
std::unique_ptr<AudioProcessing> CreateApm(bool mobile_aec) { std::unique_ptr<AudioProcessing> CreateApm(bool mobile_aec) {
Config old_config; Config old_config;
std::unique_ptr<AudioProcessing> apm( std::unique_ptr<AudioProcessing> apm(
AudioProcessingBuilder().Create(old_config)); AudioProcessingBuilderForTesting().Create(old_config));
if (!apm) { if (!apm) {
return apm; return apm;
} }
@ -2740,7 +2744,8 @@ TEST(ApmStatistics, ReportOutputRmsDbfs) {
ptr[i] = 10000 * ((i % 3) - 1); ptr[i] = 10000 * ((i % 3) - 1);
} }
std::unique_ptr<AudioProcessing> apm(AudioProcessingBuilder().Create()); std::unique_ptr<AudioProcessing> apm(
AudioProcessingBuilderForTesting().Create());
apm->Initialize(processing_config); apm->Initialize(processing_config);
// If not enabled, no metric should be reported. // If not enabled, no metric should be reported.
@ -2793,7 +2798,8 @@ TEST(ApmStatistics, ReportHasVoice) {
ptr[i] = 10000 * ((i % 3) - 1); ptr[i] = 10000 * ((i % 3) - 1);
} }
std::unique_ptr<AudioProcessing> apm(AudioProcessingBuilder().Create()); std::unique_ptr<AudioProcessing> apm(
AudioProcessingBuilderForTesting().Create());
apm->Initialize(processing_config); apm->Initialize(processing_config);
// If not enabled, no metric should be reported. // If not enabled, no metric should be reported.

View File

@ -685,19 +685,34 @@ class RTC_EXPORT AudioProcessingBuilder {
~AudioProcessingBuilder(); ~AudioProcessingBuilder();
// The AudioProcessingBuilder takes ownership of the echo_control_factory. // The AudioProcessingBuilder takes ownership of the echo_control_factory.
AudioProcessingBuilder& SetEchoControlFactory( AudioProcessingBuilder& SetEchoControlFactory(
std::unique_ptr<EchoControlFactory> echo_control_factory); std::unique_ptr<EchoControlFactory> echo_control_factory) {
echo_control_factory_ = std::move(echo_control_factory);
return *this;
}
// The AudioProcessingBuilder takes ownership of the capture_post_processing. // The AudioProcessingBuilder takes ownership of the capture_post_processing.
AudioProcessingBuilder& SetCapturePostProcessing( AudioProcessingBuilder& SetCapturePostProcessing(
std::unique_ptr<CustomProcessing> capture_post_processing); std::unique_ptr<CustomProcessing> capture_post_processing) {
capture_post_processing_ = std::move(capture_post_processing);
return *this;
}
// The AudioProcessingBuilder takes ownership of the render_pre_processing. // The AudioProcessingBuilder takes ownership of the render_pre_processing.
AudioProcessingBuilder& SetRenderPreProcessing( AudioProcessingBuilder& SetRenderPreProcessing(
std::unique_ptr<CustomProcessing> render_pre_processing); std::unique_ptr<CustomProcessing> render_pre_processing) {
render_pre_processing_ = std::move(render_pre_processing);
return *this;
}
// The AudioProcessingBuilder takes ownership of the echo_detector. // The AudioProcessingBuilder takes ownership of the echo_detector.
AudioProcessingBuilder& SetEchoDetector( AudioProcessingBuilder& SetEchoDetector(
rtc::scoped_refptr<EchoDetector> echo_detector); rtc::scoped_refptr<EchoDetector> echo_detector) {
echo_detector_ = std::move(echo_detector);
return *this;
}
// The AudioProcessingBuilder takes ownership of the capture_analyzer. // The AudioProcessingBuilder takes ownership of the capture_analyzer.
AudioProcessingBuilder& SetCaptureAnalyzer( AudioProcessingBuilder& SetCaptureAnalyzer(
std::unique_ptr<CustomAudioAnalyzer> capture_analyzer); std::unique_ptr<CustomAudioAnalyzer> capture_analyzer) {
capture_analyzer_ = std::move(capture_analyzer);
return *this;
}
// This creates an APM instance using the previously set components. Calling // This creates an APM instance using the previously set components. Calling
// the Create function resets the AudioProcessingBuilder to its initial state. // the Create function resets the AudioProcessingBuilder to its initial state.
AudioProcessing* Create(); AudioProcessing* Create();

View File

@ -0,0 +1,68 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include <memory>
#include <utility>
#include "modules/audio_processing/audio_processing_impl.h"
#include "rtc_base/ref_counted_object.h"
namespace webrtc {
AudioProcessingBuilderForTesting::AudioProcessingBuilderForTesting() = default;
AudioProcessingBuilderForTesting::~AudioProcessingBuilderForTesting() = default;
#ifdef WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE
AudioProcessing* AudioProcessingBuilderForTesting::Create() {
webrtc::Config config;
return Create(config);
}
AudioProcessing* AudioProcessingBuilderForTesting::Create(
const webrtc::Config& config) {
AudioProcessingImpl* apm = new rtc::RefCountedObject<AudioProcessingImpl>(
config, std::move(capture_post_processing_),
std::move(render_pre_processing_), std::move(echo_control_factory_),
std::move(echo_detector_), std::move(capture_analyzer_));
int error = apm->Initialize();
RTC_CHECK_EQ(error, AudioProcessing::kNoError);
return apm;
}
#else
AudioProcessing* AudioProcessingBuilderForTesting::Create() {
AudioProcessingBuilder builder;
TransferOwnershipsToBuilder(&builder);
return builder.Create();
}
AudioProcessing* AudioProcessingBuilderForTesting::Create(
const webrtc::Config& config) {
AudioProcessingBuilder builder;
TransferOwnershipsToBuilder(&builder);
return builder.Create(config);
}
#endif
void AudioProcessingBuilderForTesting::TransferOwnershipsToBuilder(
AudioProcessingBuilder* builder) {
builder->SetCapturePostProcessing(std::move(capture_post_processing_));
builder->SetRenderPreProcessing(std::move(render_pre_processing_));
builder->SetCaptureAnalyzer(std::move(capture_analyzer_));
builder->SetEchoControlFactory(std::move(echo_control_factory_));
builder->SetEchoDetector(std::move(echo_detector_));
}
} // namespace webrtc

View File

@ -0,0 +1,81 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_PROCESSING_TEST_AUDIO_PROCESSING_BUILDER_FOR_TESTING_H_
#define MODULES_AUDIO_PROCESSING_TEST_AUDIO_PROCESSING_BUILDER_FOR_TESTING_H_
#include <list>
#include <memory>
#include <utility>
#include <vector>
#include "modules/audio_processing/include/audio_processing.h"
namespace webrtc {
// Facilitates building of AudioProcessingImp for the tests.
class AudioProcessingBuilderForTesting {
public:
AudioProcessingBuilderForTesting();
~AudioProcessingBuilderForTesting();
// The AudioProcessingBuilderForTesting takes ownership of the
// echo_control_factory.
AudioProcessingBuilderForTesting& SetEchoControlFactory(
std::unique_ptr<EchoControlFactory> echo_control_factory) {
echo_control_factory_ = std::move(echo_control_factory);
return *this;
}
// The AudioProcessingBuilderForTesting takes ownership of the
// capture_post_processing.
AudioProcessingBuilderForTesting& SetCapturePostProcessing(
std::unique_ptr<CustomProcessing> capture_post_processing) {
capture_post_processing_ = std::move(capture_post_processing);
return *this;
}
// The AudioProcessingBuilderForTesting takes ownership of the
// render_pre_processing.
AudioProcessingBuilderForTesting& SetRenderPreProcessing(
std::unique_ptr<CustomProcessing> render_pre_processing) {
render_pre_processing_ = std::move(render_pre_processing);
return *this;
}
// The AudioProcessingBuilderForTesting takes ownership of the echo_detector.
AudioProcessingBuilderForTesting& SetEchoDetector(
rtc::scoped_refptr<EchoDetector> echo_detector) {
echo_detector_ = std::move(echo_detector);
return *this;
}
// The AudioProcessingBuilderForTesting takes ownership of the
// capture_analyzer.
AudioProcessingBuilderForTesting& SetCaptureAnalyzer(
std::unique_ptr<CustomAudioAnalyzer> capture_analyzer) {
capture_analyzer_ = std::move(capture_analyzer);
return *this;
}
// This creates an APM instance using the previously set components. Calling
// the Create function resets the AudioProcessingBuilderForTesting to its
// initial state.
AudioProcessing* Create();
AudioProcessing* Create(const webrtc::Config& config);
private:
// Transfers the ownership to a non-testing builder.
void TransferOwnershipsToBuilder(AudioProcessingBuilder* builder);
std::unique_ptr<EchoControlFactory> echo_control_factory_;
std::unique_ptr<CustomProcessing> capture_post_processing_;
std::unique_ptr<CustomProcessing> render_pre_processing_;
rtc::scoped_refptr<EchoDetector> echo_detector_;
std::unique_ptr<CustomAudioAnalyzer> capture_analyzer_;
};
} // namespace webrtc
#endif // MODULES_AUDIO_PROCESSING_TEST_AUDIO_PROCESSING_BUILDER_FOR_TESTING_H_

View File

@ -10,6 +10,7 @@
#include "modules/audio_processing/test/debug_dump_replayer.h" #include "modules/audio_processing/test/debug_dump_replayer.h"
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include "modules/audio_processing/test/protobuf_utils.h" #include "modules/audio_processing/test/protobuf_utils.h"
#include "modules/audio_processing/test/runtime_setting_util.h" #include "modules/audio_processing/test/runtime_setting_util.h"
#include "rtc_base/checks.h" #include "rtc_base/checks.h"
@ -185,7 +186,7 @@ void DebugDumpReplayer::MaybeRecreateApm(const audioproc::Config& msg) {
// We only create APM once, since changes on these fields should not // We only create APM once, since changes on these fields should not
// happen in current implementation. // happen in current implementation.
if (!apm_.get()) { if (!apm_.get()) {
apm_.reset(AudioProcessingBuilder().Create(config)); apm_.reset(AudioProcessingBuilderForTesting().Create(config));
} }
} }

View File

@ -17,6 +17,7 @@
#include "api/audio/echo_canceller3_factory.h" #include "api/audio/echo_canceller3_factory.h"
#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h" #include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
#include "modules/audio_processing/aec_dump/aec_dump_factory.h" #include "modules/audio_processing/aec_dump/aec_dump_factory.h"
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include "modules/audio_processing/test/debug_dump_replayer.h" #include "modules/audio_processing/test/debug_dump_replayer.h"
#include "modules/audio_processing/test/test_utils.h" #include "modules/audio_processing/test/test_utils.h"
#include "rtc_base/task_queue_for_test.h" #include "rtc_base/task_queue_for_test.h"
@ -141,7 +142,7 @@ DebugDumpGenerator::DebugDumpGenerator(const std::string& input_file_name,
enable_pre_amplifier_(enable_pre_amplifier), enable_pre_amplifier_(enable_pre_amplifier),
worker_queue_("debug_dump_generator_worker_queue"), worker_queue_("debug_dump_generator_worker_queue"),
dump_file_name_(dump_file_name) { dump_file_name_(dump_file_name) {
AudioProcessingBuilder apm_builder; AudioProcessingBuilderForTesting apm_builder;
apm_.reset(apm_builder.Create(config)); apm_.reset(apm_builder.Create(config));
} }

View File

@ -587,6 +587,7 @@ if (rtc_include_tests) {
"../media:rtc_media_engine_defaults", "../media:rtc_media_engine_defaults",
"../modules/audio_device:audio_device_api", "../modules/audio_device:audio_device_api",
"../modules/audio_processing:audio_processing_statistics", "../modules/audio_processing:audio_processing_statistics",
"../modules/audio_processing:audioproc_test_utils",
"../modules/rtp_rtcp:rtp_rtcp_format", "../modules/rtp_rtcp:rtp_rtcp_format",
"../p2p:fake_ice_transport", "../p2p:fake_ice_transport",
"../p2p:fake_port_allocator", "../p2p:fake_port_allocator",

View File

@ -36,6 +36,7 @@
#include "media/engine/fake_webrtc_video_engine.h" #include "media/engine/fake_webrtc_video_engine.h"
#include "media/engine/webrtc_media_engine.h" #include "media/engine/webrtc_media_engine.h"
#include "media/engine/webrtc_media_engine_defaults.h" #include "media/engine/webrtc_media_engine_defaults.h"
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include "p2p/base/fake_ice_transport.h" #include "p2p/base/fake_ice_transport.h"
#include "p2p/base/mock_async_resolver.h" #include "p2p/base/mock_async_resolver.h"
#include "p2p/base/p2p_constants.h" #include "p2p/base/p2p_constants.h"
@ -648,6 +649,12 @@ class PeerConnectionWrapper : public webrtc::PeerConnectionObserver,
media_deps.video_decoder_factory.reset(); media_deps.video_decoder_factory.reset();
} }
if (!media_deps.audio_processing) {
// If the standard Creation method for APM returns a null pointer, instead
// use the builder for testing to create an APM object.
media_deps.audio_processing = AudioProcessingBuilderForTesting().Create();
}
pc_factory_dependencies.media_engine = pc_factory_dependencies.media_engine =
cricket::CreateMediaEngine(std::move(media_deps)); cricket::CreateMediaEngine(std::move(media_deps));
pc_factory_dependencies.call_factory = webrtc::CreateCallFactory(); pc_factory_dependencies.call_factory = webrtc::CreateCallFactory();

View File

@ -453,6 +453,7 @@ webrtc_fuzzer_test("audio_processing_fuzzer") {
"../../modules/audio_processing", "../../modules/audio_processing",
"../../modules/audio_processing:api", "../../modules/audio_processing:api",
"../../modules/audio_processing:audio_buffer", "../../modules/audio_processing:audio_buffer",
"../../modules/audio_processing:audioproc_test_utils",
"../../modules/audio_processing/aec3", "../../modules/audio_processing/aec3",
"../../modules/audio_processing/aec_dump", "../../modules/audio_processing/aec_dump",
"../../modules/audio_processing/aec_dump:aec_dump_impl", "../../modules/audio_processing/aec_dump:aec_dump_impl",

View File

@ -16,6 +16,7 @@
#include "api/task_queue/default_task_queue_factory.h" #include "api/task_queue/default_task_queue_factory.h"
#include "modules/audio_processing/aec_dump/aec_dump_factory.h" #include "modules/audio_processing/aec_dump/aec_dump_factory.h"
#include "modules/audio_processing/include/audio_processing.h" #include "modules/audio_processing/include/audio_processing.h"
#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include "rtc_base/arraysize.h" #include "rtc_base/arraysize.h"
#include "rtc_base/numerics/safe_minmax.h" #include "rtc_base/numerics/safe_minmax.h"
#include "rtc_base/task_queue.h" #include "rtc_base/task_queue.h"
@ -108,7 +109,7 @@ std::unique_ptr<AudioProcessing> CreateApm(test::FuzzDataHelper* fuzz_data,
config.Set<ExperimentalNs>(new ExperimentalNs(exp_ns)); config.Set<ExperimentalNs>(new ExperimentalNs(exp_ns));
std::unique_ptr<AudioProcessing> apm( std::unique_ptr<AudioProcessing> apm(
AudioProcessingBuilder() AudioProcessingBuilderForTesting()
.SetEchoControlFactory(std::move(echo_control_factory)) .SetEchoControlFactory(std::move(echo_control_factory))
.Create(config)); .Create(config));

View File

@ -33,7 +33,11 @@ class TestPeer final : public PeerConnectionWrapper {
return std::move(video_generators_[i]); return std::move(video_generators_[i]);
} }
void DetachAecDump() { audio_processing_->DetachAecDump(); } void DetachAecDump() {
if (audio_processing_) {
audio_processing_->DetachAecDump();
}
}
// Adds provided |candidates| to the owned peer connection. // Adds provided |candidates| to the owned peer connection.
bool AddIceCandidates( bool AddIceCandidates(

View File

@ -290,7 +290,7 @@ std::unique_ptr<TestPeer> TestPeerFactory::CreateTestPeer(
// Create peer connection factory. // Create peer connection factory.
rtc::scoped_refptr<AudioProcessing> audio_processing = rtc::scoped_refptr<AudioProcessing> audio_processing =
webrtc::AudioProcessingBuilder().Create(); webrtc::AudioProcessingBuilder().Create();
if (params->aec_dump_path) { if (params->aec_dump_path && audio_processing) {
audio_processing->AttachAecDump( audio_processing->AttachAecDump(
AecDumpFactory::Create(*params->aec_dump_path, -1, task_queue)); AecDumpFactory::Create(*params->aec_dump_path, -1, task_queue));
} }

View File

@ -96,6 +96,9 @@ declare_args() {
# should be generated. # should be generated.
apm_debug_dump = false apm_debug_dump = false
# Selects whether the audio processing module should be excluded.
rtc_exclude_audio_processing_module = false
# Set this to true to enable BWE test logging. # Set this to true to enable BWE test logging.
rtc_enable_bwe_test_logging = false rtc_enable_bwe_test_logging = false