Change unittests to use AEC3 instead of AEC2
This CL changes the APM unittests to use AEC3 instead of AEC2. Bug: webrtc:8671 Change-Id: I80f88dbafb7c31696abd8b7efb5a187a9fb30d1c Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/129420 Reviewed-by: Gustaf Ullberg <gustaf@webrtc.org> Commit-Queue: Per Åhgren <peah@webrtc.org> Cr-Commit-Position: refs/heads/master@{#27607}
This commit is contained in:
parent
30f36af455
commit
8607f843a7
@ -110,10 +110,6 @@ size_t TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int TruncateToMultipleOf10(int value) {
|
|
||||||
return (value / 10) * 10;
|
|
||||||
}
|
|
||||||
|
|
||||||
void MixStereoToMono(const float* stereo, float* mono,
|
void MixStereoToMono(const float* stereo, float* mono,
|
||||||
size_t samples_per_channel) {
|
size_t samples_per_channel) {
|
||||||
for (size_t i = 0; i < samples_per_channel; ++i)
|
for (size_t i = 0; i < samples_per_channel; ++i)
|
||||||
@ -188,10 +184,7 @@ void EnableAllAPComponents(AudioProcessing* ap) {
|
|||||||
apm_config.gain_controller1.mode =
|
apm_config.gain_controller1.mode =
|
||||||
AudioProcessing::Config::GainController1::kAdaptiveDigital;
|
AudioProcessing::Config::GainController1::kAdaptiveDigital;
|
||||||
#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
|
#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
|
||||||
// TODO(peah): Update tests to instead use AEC3.
|
|
||||||
apm_config.echo_canceller.use_legacy_aec = true;
|
|
||||||
apm_config.echo_canceller.mobile_mode = false;
|
apm_config.echo_canceller.mobile_mode = false;
|
||||||
apm_config.echo_canceller.legacy_moderate_suppression_level = true;
|
|
||||||
|
|
||||||
apm_config.gain_controller1.enabled = true;
|
apm_config.gain_controller1.enabled = true;
|
||||||
apm_config.gain_controller1.mode =
|
apm_config.gain_controller1.mode =
|
||||||
@ -421,15 +414,8 @@ ApmTest::ApmTest()
|
|||||||
ref_filename_(test::ResourcePath("audio_processing/output_data_fixed",
|
ref_filename_(test::ResourcePath("audio_processing/output_data_fixed",
|
||||||
"pb")),
|
"pb")),
|
||||||
#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
|
#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
|
||||||
#if defined(WEBRTC_MAC)
|
|
||||||
// A different file for Mac is needed because on this platform the AEC
|
|
||||||
// constant |kFixedDelayMs| value is 20 and not 50 as it is on the rest.
|
|
||||||
ref_filename_(test::ResourcePath("audio_processing/output_data_mac",
|
|
||||||
"pb")),
|
|
||||||
#else
|
|
||||||
ref_filename_(test::ResourcePath("audio_processing/output_data_float",
|
ref_filename_(test::ResourcePath("audio_processing/output_data_float",
|
||||||
"pb")),
|
"pb")),
|
||||||
#endif
|
|
||||||
#endif
|
#endif
|
||||||
frame_(NULL),
|
frame_(NULL),
|
||||||
revframe_(NULL),
|
revframe_(NULL),
|
||||||
@ -870,83 +856,6 @@ TEST_F(ApmTest, SampleRatesInt) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ApmTest, DISABLED_EchoCancellationReportsCorrectDelays) {
|
|
||||||
// TODO(bjornv): Fix this test to work with DA-AEC.
|
|
||||||
// Enable AEC only.
|
|
||||||
AudioProcessing::Config apm_config = apm_->GetConfig();
|
|
||||||
apm_config.echo_canceller.enabled = true;
|
|
||||||
// TODO(peah): Update tests to instead use AEC3.
|
|
||||||
apm_config.echo_canceller.use_legacy_aec = true;
|
|
||||||
apm_config.echo_canceller.mobile_mode = false;
|
|
||||||
apm_->ApplyConfig(apm_config);
|
|
||||||
Config config;
|
|
||||||
config.Set<DelayAgnostic>(new DelayAgnostic(false));
|
|
||||||
apm_->SetExtraOptions(config);
|
|
||||||
|
|
||||||
// Internally in the AEC the amount of lookahead the delay estimation can
|
|
||||||
// handle is 15 blocks and the maximum delay is set to 60 blocks.
|
|
||||||
const int kLookaheadBlocks = 15;
|
|
||||||
const int kMaxDelayBlocks = 60;
|
|
||||||
// The AEC has a startup time before it actually starts to process. This
|
|
||||||
// procedure can flush the internal far-end buffer, which of course affects
|
|
||||||
// the delay estimation. Therefore, we set a system_delay high enough to
|
|
||||||
// avoid that. The smallest system_delay you can report without flushing the
|
|
||||||
// buffer is 66 ms in 8 kHz.
|
|
||||||
//
|
|
||||||
// It is known that for 16 kHz (and 32 kHz) sampling frequency there is an
|
|
||||||
// additional stuffing of 8 ms on the fly, but it seems to have no impact on
|
|
||||||
// delay estimation. This should be noted though. In case of test failure,
|
|
||||||
// this could be the cause.
|
|
||||||
const int kSystemDelayMs = 66;
|
|
||||||
// Test a couple of corner cases and verify that the estimated delay is
|
|
||||||
// within a valid region (set to +-1.5 blocks). Note that these cases are
|
|
||||||
// sampling frequency dependent.
|
|
||||||
for (size_t i = 0; i < arraysize(kProcessSampleRates); i++) {
|
|
||||||
Init(kProcessSampleRates[i],
|
|
||||||
kProcessSampleRates[i],
|
|
||||||
kProcessSampleRates[i],
|
|
||||||
2,
|
|
||||||
2,
|
|
||||||
2,
|
|
||||||
false);
|
|
||||||
// Sampling frequency dependent variables.
|
|
||||||
const int num_ms_per_block =
|
|
||||||
std::max(4, static_cast<int>(640 / frame_->samples_per_channel_));
|
|
||||||
const int delay_min_ms = -kLookaheadBlocks * num_ms_per_block;
|
|
||||||
const int delay_max_ms = (kMaxDelayBlocks - 1) * num_ms_per_block;
|
|
||||||
|
|
||||||
// 1) Verify correct delay estimate at lookahead boundary.
|
|
||||||
int delay_ms = TruncateToMultipleOf10(kSystemDelayMs + delay_min_ms);
|
|
||||||
ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
|
|
||||||
delay_max_ms);
|
|
||||||
// 2) A delay less than maximum lookahead should give an delay estimate at
|
|
||||||
// the boundary (= -kLookaheadBlocks * num_ms_per_block).
|
|
||||||
delay_ms -= 20;
|
|
||||||
ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
|
|
||||||
delay_max_ms);
|
|
||||||
// 3) Three values around zero delay. Note that we need to compensate for
|
|
||||||
// the fake system_delay.
|
|
||||||
delay_ms = TruncateToMultipleOf10(kSystemDelayMs - 10);
|
|
||||||
ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
|
|
||||||
delay_max_ms);
|
|
||||||
delay_ms = TruncateToMultipleOf10(kSystemDelayMs);
|
|
||||||
ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
|
|
||||||
delay_max_ms);
|
|
||||||
delay_ms = TruncateToMultipleOf10(kSystemDelayMs + 10);
|
|
||||||
ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
|
|
||||||
delay_max_ms);
|
|
||||||
// 4) Verify correct delay estimate at maximum delay boundary.
|
|
||||||
delay_ms = TruncateToMultipleOf10(kSystemDelayMs + delay_max_ms);
|
|
||||||
ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
|
|
||||||
delay_max_ms);
|
|
||||||
// 5) A delay above the maximum delay should give an estimate at the
|
|
||||||
// boundary (= (kMaxDelayBlocks - 1) * num_ms_per_block).
|
|
||||||
delay_ms += 20;
|
|
||||||
ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
|
|
||||||
delay_max_ms);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ApmTest, GainControl) {
|
TEST_F(ApmTest, GainControl) {
|
||||||
// Testing gain modes
|
// Testing gain modes
|
||||||
EXPECT_EQ(apm_->kNoError,
|
EXPECT_EQ(apm_->kNoError,
|
||||||
@ -1452,8 +1361,6 @@ TEST_F(ApmTest, SplittingFilter) {
|
|||||||
// Check the test is valid. We should have distortion from the filter
|
// Check the test is valid. We should have distortion from the filter
|
||||||
// when AEC is enabled (which won't affect the audio).
|
// when AEC is enabled (which won't affect the audio).
|
||||||
apm_config.echo_canceller.enabled = true;
|
apm_config.echo_canceller.enabled = true;
|
||||||
// TODO(peah): Update tests to instead use AEC3.
|
|
||||||
apm_config.echo_canceller.use_legacy_aec = true;
|
|
||||||
apm_config.echo_canceller.mobile_mode = false;
|
apm_config.echo_canceller.mobile_mode = false;
|
||||||
apm_->ApplyConfig(apm_config);
|
apm_->ApplyConfig(apm_config);
|
||||||
frame_->samples_per_channel_ = 320;
|
frame_->samples_per_channel_ = 320;
|
||||||
@ -1703,114 +1610,6 @@ TEST_F(ApmTest, DebugDumpFromFileHandle) {
|
|||||||
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
|
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) {
|
|
||||||
audioproc::OutputData ref_data;
|
|
||||||
OpenFileAndReadMessage(ref_filename_, &ref_data);
|
|
||||||
|
|
||||||
Config config;
|
|
||||||
config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
|
|
||||||
std::unique_ptr<AudioProcessing> fapm(
|
|
||||||
AudioProcessingBuilder().Create(config));
|
|
||||||
EnableAllComponents();
|
|
||||||
EnableAllAPComponents(fapm.get());
|
|
||||||
for (int i = 0; i < ref_data.test_size(); i++) {
|
|
||||||
printf("Running test %d of %d...\n", i + 1, ref_data.test_size());
|
|
||||||
|
|
||||||
audioproc::Test* test = ref_data.mutable_test(i);
|
|
||||||
// TODO(ajm): Restore downmixing test cases.
|
|
||||||
if (test->num_input_channels() != test->num_output_channels())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
const size_t num_render_channels =
|
|
||||||
static_cast<size_t>(test->num_reverse_channels());
|
|
||||||
const size_t num_input_channels =
|
|
||||||
static_cast<size_t>(test->num_input_channels());
|
|
||||||
const size_t num_output_channels =
|
|
||||||
static_cast<size_t>(test->num_output_channels());
|
|
||||||
const size_t samples_per_channel = static_cast<size_t>(
|
|
||||||
test->sample_rate() * AudioProcessing::kChunkSizeMs / 1000);
|
|
||||||
|
|
||||||
Init(test->sample_rate(), test->sample_rate(), test->sample_rate(),
|
|
||||||
num_input_channels, num_output_channels, num_render_channels, true);
|
|
||||||
Init(fapm.get());
|
|
||||||
|
|
||||||
ChannelBuffer<int16_t> output_cb(samples_per_channel, num_input_channels);
|
|
||||||
ChannelBuffer<int16_t> output_int16(samples_per_channel,
|
|
||||||
num_input_channels);
|
|
||||||
|
|
||||||
int analog_level = 127;
|
|
||||||
size_t num_bad_chunks = 0;
|
|
||||||
while (ReadFrame(far_file_, revframe_, revfloat_cb_.get()) &&
|
|
||||||
ReadFrame(near_file_, frame_, float_cb_.get())) {
|
|
||||||
frame_->vad_activity_ = AudioFrame::kVadUnknown;
|
|
||||||
|
|
||||||
EXPECT_NOERR(apm_->ProcessReverseStream(revframe_));
|
|
||||||
EXPECT_NOERR(fapm->AnalyzeReverseStream(
|
|
||||||
revfloat_cb_->channels(),
|
|
||||||
samples_per_channel,
|
|
||||||
test->sample_rate(),
|
|
||||||
LayoutFromChannels(num_render_channels)));
|
|
||||||
|
|
||||||
EXPECT_NOERR(apm_->set_stream_delay_ms(0));
|
|
||||||
EXPECT_NOERR(fapm->set_stream_delay_ms(0));
|
|
||||||
EXPECT_NOERR(apm_->gain_control()->set_stream_analog_level(analog_level));
|
|
||||||
EXPECT_NOERR(fapm->gain_control()->set_stream_analog_level(analog_level));
|
|
||||||
|
|
||||||
EXPECT_NOERR(apm_->ProcessStream(frame_));
|
|
||||||
Deinterleave(frame_->data(), samples_per_channel, num_output_channels,
|
|
||||||
output_int16.channels());
|
|
||||||
|
|
||||||
EXPECT_NOERR(fapm->ProcessStream(
|
|
||||||
float_cb_->channels(),
|
|
||||||
samples_per_channel,
|
|
||||||
test->sample_rate(),
|
|
||||||
LayoutFromChannels(num_input_channels),
|
|
||||||
test->sample_rate(),
|
|
||||||
LayoutFromChannels(num_output_channels),
|
|
||||||
float_cb_->channels()));
|
|
||||||
for (size_t j = 0; j < num_output_channels; ++j) {
|
|
||||||
FloatToS16(float_cb_->channels()[j],
|
|
||||||
samples_per_channel,
|
|
||||||
output_cb.channels()[j]);
|
|
||||||
float variance = 0;
|
|
||||||
float snr = ComputeSNR(output_int16.channels()[j],
|
|
||||||
output_cb.channels()[j],
|
|
||||||
samples_per_channel, &variance);
|
|
||||||
|
|
||||||
const float kVarianceThreshold = 20;
|
|
||||||
const float kSNRThreshold = 20;
|
|
||||||
|
|
||||||
// Skip frames with low energy.
|
|
||||||
if (std::sqrt(variance) > kVarianceThreshold && snr < kSNRThreshold) {
|
|
||||||
++num_bad_chunks;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
analog_level = fapm->gain_control()->stream_analog_level();
|
|
||||||
EXPECT_EQ(apm_->gain_control()->stream_analog_level(),
|
|
||||||
fapm->gain_control()->stream_analog_level());
|
|
||||||
EXPECT_NEAR(apm_->noise_suppression()->speech_probability(),
|
|
||||||
fapm->noise_suppression()->speech_probability(),
|
|
||||||
0.01);
|
|
||||||
|
|
||||||
// Reset in case of downmixing.
|
|
||||||
frame_->num_channels_ = static_cast<size_t>(test->num_input_channels());
|
|
||||||
}
|
|
||||||
|
|
||||||
#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
|
|
||||||
const size_t kMaxNumBadChunks = 0;
|
|
||||||
#elif defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
|
|
||||||
// There are a few chunks in the fixed-point profile that give low SNR.
|
|
||||||
// Listening confirmed the difference is acceptable.
|
|
||||||
const size_t kMaxNumBadChunks = 60;
|
|
||||||
#endif
|
|
||||||
EXPECT_LE(num_bad_chunks, kMaxNumBadChunks);
|
|
||||||
|
|
||||||
rewind(far_file_);
|
|
||||||
rewind(near_file_);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(andrew): Add a test to process a few frames with different combinations
|
// TODO(andrew): Add a test to process a few frames with different combinations
|
||||||
// of enabled components.
|
// of enabled components.
|
||||||
|
|
||||||
@ -1938,18 +1737,11 @@ TEST_F(ApmTest, Process) {
|
|||||||
const float echo_return_loss = stats.echo_return_loss.value_or(-1.0f);
|
const float echo_return_loss = stats.echo_return_loss.value_or(-1.0f);
|
||||||
const float echo_return_loss_enhancement =
|
const float echo_return_loss_enhancement =
|
||||||
stats.echo_return_loss_enhancement.value_or(-1.0f);
|
stats.echo_return_loss_enhancement.value_or(-1.0f);
|
||||||
const float divergent_filter_fraction =
|
|
||||||
stats.divergent_filter_fraction.value_or(-1.0f);
|
|
||||||
const float residual_echo_likelihood =
|
const float residual_echo_likelihood =
|
||||||
stats.residual_echo_likelihood.value_or(-1.0f);
|
stats.residual_echo_likelihood.value_or(-1.0f);
|
||||||
const float residual_echo_likelihood_recent_max =
|
const float residual_echo_likelihood_recent_max =
|
||||||
stats.residual_echo_likelihood_recent_max.value_or(-1.0f);
|
stats.residual_echo_likelihood_recent_max.value_or(-1.0f);
|
||||||
|
|
||||||
// Delay metrics.
|
|
||||||
const int32_t delay_median_ms = stats.delay_median_ms.value_or(-1.0);
|
|
||||||
const int32_t delay_standard_deviation_ms =
|
|
||||||
stats.delay_standard_deviation_ms.value_or(-1.0);
|
|
||||||
|
|
||||||
if (!write_ref_data) {
|
if (!write_ref_data) {
|
||||||
const audioproc::Test::EchoMetrics& reference =
|
const audioproc::Test::EchoMetrics& reference =
|
||||||
test->echo_metrics(stats_index);
|
test->echo_metrics(stats_index);
|
||||||
@ -1957,34 +1749,20 @@ TEST_F(ApmTest, Process) {
|
|||||||
EXPECT_NEAR(echo_return_loss, reference.echo_return_loss(), kEpsilon);
|
EXPECT_NEAR(echo_return_loss, reference.echo_return_loss(), kEpsilon);
|
||||||
EXPECT_NEAR(echo_return_loss_enhancement,
|
EXPECT_NEAR(echo_return_loss_enhancement,
|
||||||
reference.echo_return_loss_enhancement(), kEpsilon);
|
reference.echo_return_loss_enhancement(), kEpsilon);
|
||||||
EXPECT_NEAR(divergent_filter_fraction,
|
|
||||||
reference.divergent_filter_fraction(), kEpsilon);
|
|
||||||
EXPECT_NEAR(residual_echo_likelihood,
|
EXPECT_NEAR(residual_echo_likelihood,
|
||||||
reference.residual_echo_likelihood(), kEpsilon);
|
reference.residual_echo_likelihood(), kEpsilon);
|
||||||
EXPECT_NEAR(residual_echo_likelihood_recent_max,
|
EXPECT_NEAR(residual_echo_likelihood_recent_max,
|
||||||
reference.residual_echo_likelihood_recent_max(),
|
reference.residual_echo_likelihood_recent_max(),
|
||||||
kEpsilon);
|
kEpsilon);
|
||||||
|
|
||||||
const audioproc::Test::DelayMetrics& reference_delay =
|
|
||||||
test->delay_metrics(stats_index);
|
|
||||||
EXPECT_EQ(reference_delay.median(), delay_median_ms);
|
|
||||||
EXPECT_EQ(reference_delay.std(), delay_standard_deviation_ms);
|
|
||||||
|
|
||||||
++stats_index;
|
++stats_index;
|
||||||
} else {
|
} else {
|
||||||
audioproc::Test::EchoMetrics* message_echo = test->add_echo_metrics();
|
audioproc::Test::EchoMetrics* message_echo = test->add_echo_metrics();
|
||||||
message_echo->set_echo_return_loss(echo_return_loss);
|
message_echo->set_echo_return_loss(echo_return_loss);
|
||||||
message_echo->set_echo_return_loss_enhancement(
|
message_echo->set_echo_return_loss_enhancement(
|
||||||
echo_return_loss_enhancement);
|
echo_return_loss_enhancement);
|
||||||
message_echo->set_divergent_filter_fraction(
|
|
||||||
divergent_filter_fraction);
|
|
||||||
message_echo->set_residual_echo_likelihood(residual_echo_likelihood);
|
message_echo->set_residual_echo_likelihood(residual_echo_likelihood);
|
||||||
message_echo->set_residual_echo_likelihood_recent_max(
|
message_echo->set_residual_echo_likelihood_recent_max(
|
||||||
residual_echo_likelihood_recent_max);
|
residual_echo_likelihood_recent_max);
|
||||||
audioproc::Test::DelayMetrics* message_delay =
|
|
||||||
test->add_delay_metrics();
|
|
||||||
message_delay->set_median(delay_median_ms);
|
|
||||||
message_delay->set_std(delay_standard_deviation_ms);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif // defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE).
|
#endif // defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE).
|
||||||
@ -2689,12 +2467,8 @@ TEST(ApmConfiguration, EchoControlInjection) {
|
|||||||
apm->ProcessStream(&audio);
|
apm->ProcessStream(&audio);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<AudioProcessing> CreateApm(bool use_AEC2) {
|
std::unique_ptr<AudioProcessing> CreateApm(bool mobile_aec) {
|
||||||
Config old_config;
|
Config old_config;
|
||||||
if (use_AEC2) {
|
|
||||||
old_config.Set<ExtendedFilter>(new ExtendedFilter(true));
|
|
||||||
old_config.Set<DelayAgnostic>(new DelayAgnostic(true));
|
|
||||||
}
|
|
||||||
std::unique_ptr<AudioProcessing> apm(
|
std::unique_ptr<AudioProcessing> apm(
|
||||||
AudioProcessingBuilder().Create(old_config));
|
AudioProcessingBuilder().Create(old_config));
|
||||||
if (!apm) {
|
if (!apm) {
|
||||||
@ -2709,14 +2483,12 @@ std::unique_ptr<AudioProcessing> CreateApm(bool use_AEC2) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Disable all components except for an AEC and the residual echo detector.
|
// Disable all components except for an AEC and the residual echo detector.
|
||||||
// TODO(peah): Update this to also work on AEC3.
|
|
||||||
AudioProcessing::Config apm_config;
|
AudioProcessing::Config apm_config;
|
||||||
apm_config.residual_echo_detector.enabled = true;
|
apm_config.residual_echo_detector.enabled = true;
|
||||||
apm_config.high_pass_filter.enabled = false;
|
apm_config.high_pass_filter.enabled = false;
|
||||||
apm_config.gain_controller2.enabled = false;
|
apm_config.gain_controller2.enabled = false;
|
||||||
apm_config.echo_canceller.enabled = true;
|
apm_config.echo_canceller.enabled = true;
|
||||||
apm_config.echo_canceller.mobile_mode = !use_AEC2;
|
apm_config.echo_canceller.mobile_mode = mobile_aec;
|
||||||
apm_config.echo_canceller.use_legacy_aec = use_AEC2;
|
|
||||||
apm->ApplyConfig(apm_config);
|
apm->ApplyConfig(apm_config);
|
||||||
EXPECT_EQ(apm->gain_control()->Enable(false), 0);
|
EXPECT_EQ(apm->gain_control()->Enable(false), 0);
|
||||||
EXPECT_EQ(apm->level_estimator()->Enable(false), 0);
|
EXPECT_EQ(apm->level_estimator()->Enable(false), 0);
|
||||||
@ -2731,14 +2503,12 @@ std::unique_ptr<AudioProcessing> CreateApm(bool use_AEC2) {
|
|||||||
#define MAYBE_ApmStatistics ApmStatistics
|
#define MAYBE_ApmStatistics ApmStatistics
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
TEST(MAYBE_ApmStatistics, AEC2EnabledTest) {
|
TEST(MAYBE_ApmStatistics, AECEnabledTest) {
|
||||||
// Set up APM with AEC2 and process some audio.
|
// Set up APM with AEC3 and process some audio.
|
||||||
std::unique_ptr<AudioProcessing> apm = CreateApm(true);
|
std::unique_ptr<AudioProcessing> apm = CreateApm(false);
|
||||||
ASSERT_TRUE(apm);
|
ASSERT_TRUE(apm);
|
||||||
AudioProcessing::Config apm_config;
|
AudioProcessing::Config apm_config;
|
||||||
apm_config.echo_canceller.enabled = true;
|
apm_config.echo_canceller.enabled = true;
|
||||||
// TODO(peah): Update tests to instead use AEC3.
|
|
||||||
apm_config.echo_canceller.use_legacy_aec = true;
|
|
||||||
apm->ApplyConfig(apm_config);
|
apm->ApplyConfig(apm_config);
|
||||||
|
|
||||||
// Set up an audioframe.
|
// Set up an audioframe.
|
||||||
@ -2772,13 +2542,6 @@ TEST(MAYBE_ApmStatistics, AEC2EnabledTest) {
|
|||||||
EXPECT_NE(*stats.echo_return_loss, -100.0);
|
EXPECT_NE(*stats.echo_return_loss, -100.0);
|
||||||
ASSERT_TRUE(stats.echo_return_loss_enhancement);
|
ASSERT_TRUE(stats.echo_return_loss_enhancement);
|
||||||
EXPECT_NE(*stats.echo_return_loss_enhancement, -100.0);
|
EXPECT_NE(*stats.echo_return_loss_enhancement, -100.0);
|
||||||
ASSERT_TRUE(stats.divergent_filter_fraction);
|
|
||||||
EXPECT_NE(*stats.divergent_filter_fraction, -1.0);
|
|
||||||
ASSERT_TRUE(stats.delay_standard_deviation_ms);
|
|
||||||
EXPECT_GE(*stats.delay_standard_deviation_ms, 0);
|
|
||||||
// We don't check stats.delay_median_ms since it takes too long to settle to a
|
|
||||||
// value. At least 20 seconds of data need to be processed before it will get
|
|
||||||
// a value, which would make this test take too much time.
|
|
||||||
|
|
||||||
// If there are no receive streams, we expect the stats not to be set. The
|
// If there are no receive streams, we expect the stats not to be set. The
|
||||||
// 'false' argument signals to APM that no receive streams are currently
|
// 'false' argument signals to APM that no receive streams are currently
|
||||||
@ -2790,14 +2553,11 @@ TEST(MAYBE_ApmStatistics, AEC2EnabledTest) {
|
|||||||
EXPECT_FALSE(stats.residual_echo_likelihood_recent_max);
|
EXPECT_FALSE(stats.residual_echo_likelihood_recent_max);
|
||||||
EXPECT_FALSE(stats.echo_return_loss);
|
EXPECT_FALSE(stats.echo_return_loss);
|
||||||
EXPECT_FALSE(stats.echo_return_loss_enhancement);
|
EXPECT_FALSE(stats.echo_return_loss_enhancement);
|
||||||
EXPECT_FALSE(stats.divergent_filter_fraction);
|
|
||||||
EXPECT_FALSE(stats.delay_median_ms);
|
|
||||||
EXPECT_FALSE(stats.delay_standard_deviation_ms);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(MAYBE_ApmStatistics, AECMEnabledTest) {
|
TEST(MAYBE_ApmStatistics, AECMEnabledTest) {
|
||||||
// Set up APM with AECM and process some audio.
|
// Set up APM with AECM and process some audio.
|
||||||
std::unique_ptr<AudioProcessing> apm = CreateApm(false);
|
std::unique_ptr<AudioProcessing> apm = CreateApm(true);
|
||||||
ASSERT_TRUE(apm);
|
ASSERT_TRUE(apm);
|
||||||
|
|
||||||
// Set up an audioframe.
|
// Set up an audioframe.
|
||||||
@ -2834,9 +2594,6 @@ TEST(MAYBE_ApmStatistics, AECMEnabledTest) {
|
|||||||
}
|
}
|
||||||
EXPECT_FALSE(stats.echo_return_loss);
|
EXPECT_FALSE(stats.echo_return_loss);
|
||||||
EXPECT_FALSE(stats.echo_return_loss_enhancement);
|
EXPECT_FALSE(stats.echo_return_loss_enhancement);
|
||||||
EXPECT_FALSE(stats.divergent_filter_fraction);
|
|
||||||
EXPECT_FALSE(stats.delay_median_ms);
|
|
||||||
EXPECT_FALSE(stats.delay_standard_deviation_ms);
|
|
||||||
|
|
||||||
// If there are no receive streams, we expect the stats not to be set.
|
// If there are no receive streams, we expect the stats not to be set.
|
||||||
stats = apm->GetStatistics(false);
|
stats = apm->GetStatistics(false);
|
||||||
@ -2844,9 +2601,6 @@ TEST(MAYBE_ApmStatistics, AECMEnabledTest) {
|
|||||||
EXPECT_FALSE(stats.residual_echo_likelihood_recent_max);
|
EXPECT_FALSE(stats.residual_echo_likelihood_recent_max);
|
||||||
EXPECT_FALSE(stats.echo_return_loss);
|
EXPECT_FALSE(stats.echo_return_loss);
|
||||||
EXPECT_FALSE(stats.echo_return_loss_enhancement);
|
EXPECT_FALSE(stats.echo_return_loss_enhancement);
|
||||||
EXPECT_FALSE(stats.divergent_filter_fraction);
|
|
||||||
EXPECT_FALSE(stats.delay_median_ms);
|
|
||||||
EXPECT_FALSE(stats.delay_standard_deviation_ms);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(ApmStatistics, ReportOutputRmsDbfs) {
|
TEST(ApmStatistics, ReportOutputRmsDbfs) {
|
||||||
|
|||||||
@ -1 +1 @@
|
|||||||
f3f7efa512900b06a30af8e3c92aa7863fbc96f8
|
169276fe22bbeb1c06e0ed1a9df8149c5dbf8f80
|
||||||
Loading…
x
Reference in New Issue
Block a user