diff --git a/webrtc/modules/audio_processing/aec/aec_core.cc b/webrtc/modules/audio_processing/aec/aec_core.cc index 2cc2968f81..1a45a0c8c0 100644 --- a/webrtc/modules/audio_processing/aec/aec_core.cc +++ b/webrtc/modules/audio_processing/aec/aec_core.cc @@ -689,12 +689,11 @@ static void UpdateMetrics(AecCore* aec) { // ERLE // subtract noise power - suppressedEcho = 2 * (aec->nlpoutlevel.averagelevel - - safety * aec->nlpoutlevel.minlevel); + suppressedEcho = aec->nlpoutlevel.averagelevel - + safety * aec->nlpoutlevel.minlevel; dtmp = 10 * static_cast(log10(aec->nearlevel.averagelevel / - (2 * aec->nlpoutlevel.averagelevel) + - 1e-10f)); + aec->nlpoutlevel.averagelevel + 1e-10f)); dtmp2 = 10 * static_cast(log10(echo / suppressedEcho + 1e-10f)); dtmp = dtmp2; @@ -1139,16 +1138,6 @@ static void EchoSuppression(AecCore* aec, // Inverse error fft. ScaledInverseFft(efw, fft, 2.0f, 1); - // TODO(bjornv): Investigate how to take the windowing below into account if - // needed. - if (aec->metricsMode == 1) { - // Note that we have a scaling by two in the time domain |eBuf|. - // In addition the time domain signal is windowed before transformation, - // losing half the energy on the average. We take care of the first - // scaling only in UpdateMetrics(). - UpdateLevel(&aec->nlpoutlevel, CalculatePower(fft, PART_LEN2)); - } - // Overlap and add to obtain output. for (i = 0; i < PART_LEN; i++) { output[i] = (fft[i] * WebRtcAec_sqrtHanning[i] + @@ -1358,6 +1347,7 @@ static void ProcessBlock(AecCore* aec) { EchoSuppression(aec, farend_ptr, echo_subtractor_output, output, outputH_ptr); if (aec->metricsMode == 1) { + UpdateLevel(&aec->nlpoutlevel, CalculatePower(output, PART_LEN)); UpdateMetrics(aec); } diff --git a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc index 3ffed87444..40e235be1d 100644 --- a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc +++ b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc @@ -204,10 +204,10 @@ int16_t MaxAudioFrame(const AudioFrame& frame) { #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) void TestStats(const AudioProcessing::Statistic& test, const audioproc::Test::Statistic& reference) { - EXPECT_NEAR(reference.instant(), test.instant, 1); - EXPECT_EQ(reference.average(), test.average); - EXPECT_EQ(reference.maximum(), test.maximum); - EXPECT_NEAR(reference.minimum(), test.minimum, 1); + EXPECT_NEAR(reference.instant(), test.instant, 2); + EXPECT_NEAR(reference.average(), test.average, 2); + EXPECT_NEAR(reference.maximum(), test.maximum, 2); + EXPECT_NEAR(reference.minimum(), test.minimum, 2); } void WriteStatsMessage(const AudioProcessing::Statistic& output,