From 58530ed246b7d87e2208b96ab2c4836adb6fd5d7 Mon Sep 17 00:00:00 2001 From: minyue Date: Tue, 24 May 2016 05:50:12 -0700 Subject: [PATCH] Updating APM unittests on the echo metrics. There were a series of changes in the calculation of echo metrics. There changes made the existing unittests lose, e.g., EXPECT_EQ become EXPECT_NEAR. It is good time to protect the echo calculation more strictly. The change is not simply generating a new reference file and change EXPECT_NEAR to EXPECT_EQ. It strengthens the test as well. Main changes are 1. the old test only sample a metric at the end of processing, while the new test takes metrics during the call with a certain time interval. This gives a much stronger protection. 2. added protection of a newly added metric, called divergent_filter_fraction. 3. as said, use EXPECT_EQ (actually ASSERT_EQ) instead of EXPECT_NEAR as much as possible, even for float point values. This may be too restrictive. But it can be good to be restrictive at the beginning. BUG= Review-Url: https://codereview.webrtc.org/1969403003 Cr-Commit-Position: refs/heads/master@{#12871} --- data/audio_processing/output_data_float.pb | Bin 2054 -> 15622 bytes data/audio_processing/output_data_mac.pb | Bin 2054 -> 15658 bytes .../audio_processing_unittest.cc | 139 ++++++++++-------- .../audio_processing/test/unittest.proto | 7 +- 4 files changed, 81 insertions(+), 65 deletions(-) diff --git a/data/audio_processing/output_data_float.pb b/data/audio_processing/output_data_float.pb index aff94fa9f6518bd628fdac2ec70d0686b4f36253..bc19aa369b26b8df8054b995daf9a25cedfba50a 100644 GIT binary patch literal 15622 zcmeHOO^X~w7_P3&M@?^(+S#wlOn0Vsa>*gHCJ13PncXp`IeH81Q4#7O-U41iHXufG z$sbq}(1RiffqV%PO-#VcDtHPJ5j+UyXu=wdnD|z8Z&meVHg3<(Fw?uWd#Ud2dTVNX zp7*Jzp0{udA^#E~9`%^VZ!I1^?=>IoZU2coD|_BV%Qrp$5LtZ||9I3inQ3y+q^S)?_R^d_v!KFaUU&b zf9gEX!Q2uPDRB?xhU}+2j^LYsP#<&4%xG_wV9qF%)7*Yj91^dKV`9IU5uvD_TEALd zdLQ}!7UB%Ca{BPcUi0&p+YfN(Hg*|8r^ep-aL5_1ukUri#z%n{07cXGt!X|DDDv%F zKKG*xanvV-Q_eW&WkLugj6eWF_EE<6seXxMR62w*ND z5~8yZ33cPx2OO&$HIPUuBMdAOfR4~yxcA5JTdA8Gwj@TdcttFV6M(%4e_vjyN&G$4 z_ioUM^c-H~YqA}C779<$~;FOKFDL&O3fCpO3F9x9Ow>trd0DzhRx8B@9ox*K| zMFqe&6aWJtz>^DI0DO!k1OD|=`w!f?<^X6=w+k5Xe*q|&b1MLRl4L-0nVJDjSH-2~ z^2L#QFrew0IugJq2LN3AskxZ4*$aS2#FA)h2E1`*zhpqAQ8B|(qkiNyZ&0aG&pM4d zV*G)|POlmh$||RHT+t@LA}<1t64Fj;)C=FOx5j=nY)>S|akoY-4pgINW;93(h!R85 z+iHq~;x$mIftXerH6JLAdS9-QWrhHa`k~i+I4L#iO~=q9&Jcsrs53UOX7~(bhza}F z1fPhsW&^b5Je_Crd>%Avkrr7|Y1AsMuqyN;&~Jk2$Cu#okcKSeAxKk;1oP0(Tq-(c z5CA~{M12Sl?<|cx0APz6Y^7=TV&4VOpKXP$KvYg${!Ov(&l+2xKhJs1ugVf!mmRov zBe?3c&g#701+MCj1=j$zWfENPJW}9Nn>&iQKwHAv_Nrp8I}Ths1YEN=P0jMzO_@t> z?raXO&HKrnOhaLR8*M2m%v@R&*5#6DE*ry0{jVJ5rtGVCk4QgF+bd|R25Fc7c~ASFQHF^8&o7ky;uzW{Hj^x} zXTLcAvtLg(_FRVIe@ce_K>MFD{v!YL7w3PV{^vFUI6#dm3lNO|NwxnO z1JL=Osp=3jQh=~E26X->Z2g)%%Vbqj%z#7lKksP&6DxNk@;|RT|8pz|urH13{Lci6 zybUKMy_4+xPgo=WTHWwJqYM%GpX+KD`5U*3Y~^dy%$`Pd{-^X^dpWSf^gl0Z|1-u` ziDemL~~zTP?SL$DL0yzjW-29TJiPs{+oNjl>$Sfq~zBh$=dq^mfYQngrtHSwRgl zmmFlS_aLwXBkS923MzofkHOoQ=c{Ub4$9Nwb6Jfa*4A3fvOKLuYw)L9^5~WPB6c0U ws=!lGeOnREWm)xYBMqPs5QOi2_E1{+h^A>iX8?-hP~CPy*S8&e3?(?;fB84rZ9Y zpQ*QT)E$X`k;eRmo9K4SzCt?(*@qo7WM;_CuvVp3ep88CZWaD*cCcZN$Ey_OmoP`O z!?1(`1YhE(yBps1hseI4RW<*p{tQRUqwx-Dd>~NO4VzNm@tM#4noIwg`i2&6f+eL%FTHiOUviHKmwztCKS^Wr zsN{aXOEW!=%gU#h!sXW7W#LlBO&AezKgH3OBjR2-S==Naq{Yc#+~xnq?WHAO#@&ME zi>e^*MI8OEin!m3ModE7ffKNR2N1V$?vz-Wo2hIBV;;mUg11?vm$GcRo34GA{u3<( zDL=u{>zYtnD^#i3l}h3~NlOqebb3R2+ScJ3Pk&9g!jqZiJPkmj%~`>n7MX!?Z4zYR zk|Qj)PZn+wEPvK4tEv>sD-x_;BZ_7Dv1U0_R4jizO_m+aawd5OEN}N%Ch~|WmS3GW z%ip|=WmT18c~_qMcZCx6LbIF+DVE=y0?QMc+fb5tzLM6sOEK69N~{DU-> qbQ?n_WFZgxUqEqv47`9Y>{3@^8J6O{ezYqQZyny8zJu diff --git a/data/audio_processing/output_data_mac.pb b/data/audio_processing/output_data_mac.pb index 78d0e10965fdbb364d3626814202e077e6517a70..44f6d276f23bc5c649fa906a3a05aa8e60d4384a 100644 GIT binary patch literal 15658 zcmeHOO^+Kj7`BHb<4hLtW_R+Uir*iFhQ;>3x=9xEaFz_k(=)U8xO z+Y5gH!J!v~T2YBYkx=>SN?Z^Ur>Y1cAr8P1s^z1I(%1IH_JnNN$tFr>*U7~*vfqg_ z^SqBAKkpn~MJRZi5KM81*=K9VFX7hRna;0XcjJ*4(E3dr93^l5n|v@e&CoQNX;w4! zu4U@o%+z}Yp^Z;-|NCFg`1j9}qJ58wtXP{RW&2c_m7BA0yUjg=`;J$F+n1kRpZ2R2 z>|b4E1(;iEB4vhQZpiM18Gwk@wAb>f9 zNQf>%B-D@N0B|gG)JP)9j4-lD06Ibo@yyp>u4Zm(*pdXn{0Y9sp9AbE{9S$jpupdK zjlX+XQ1l%Mz)c50i^fdTT9D)ffO9t5=Gfd|03KRhe>ecexcv!01OU_oxbtQYbrQD< zixPmRBmg5Iz!#780q_q`FyQZxc7FD{A25ts7Km)N@Xw zP8ffnv9qhjl(H&F9p~Ew=rbR1l#+HrqrUOQ*6P$>&9f&`U|zpQ^+&2vb8|IF3y2a! z(YtE$Bm5~)sS%%-8nqZnje19{kwu08jrtmH{WdE!>P^Sc6V4E$(x?kIuolQg`TrADpOT3Cm11jbcD-MtOnV;YAsi$R)NBv^!T=1|gU z2mufTKs0~=$$Aoe{5`m-ak74h1cYd=W#{Z?ZO^ylli^-)!T>zV`C zegs#OHp3=s_JOOuXTdc>ZJ7qwOLrx>y`u89s$>)O;d|(aa-mR zn>*WsYx{9xC(}^aUq)LB3Nwcmh4r~4n#-mz((o%su_=3U^MsJ3y(Vn#wBajOj$(5s z@Zv;UdjIi1FF9>FVf+oHEuH`A{7)k@wY`FNYLNE%pSQLDsWOD}Ki`%1i(_b;*i5p> zpZ((e&tW~;#B&*o|0x*yy!Jm6{3-wQJLi9jRbFa~G|vEZ{^u?MI6{po3J{F{`MCB! z6978@GgBR6P6`lq#(>WMgsoqbXG2kylrZ4f{Lfq3|MaB0q5RJeo&Px%1UQsNb^d1x zMY!jrWOtIC{|Rg4@9RDPQ)LL{e{RWLhm$^FOD8ekW2~u)RgIH;W5Ks4dg}=Zf|}6L8u7r>oqtl}4VK%lV(gn6NafCp%96 z(^LNE(INw7Jxp7C`}LQ3#F zl;lMYq%U#+%FNXJRla)hNtCsjSFe1i(e_~+A*!rRbg$4dRHgZ<3Qkm|HE1Jjutxfg zEAjmGy>Ydq%i2T$tE_FJ0Zv!RrdVW%52jK4{Evs)BR^LrGsYS0HTg-XfIFpY;M#ve HnGSvcc?Zco literal 2054 zcmc(gy=xRf7{+JDJu|m=%G?!UZoeE-8@X~3O%`mfOe2C|69~6uEEa6U3yArsQ-(BY zLQsq*8bOrEiT{EKB3MPsYkPQIC052mdkkU8Ik?OdA4yyDBjnKM^H54is&N**HN^59bR12tX^ zN*?vPG~1K7EPZ+*Ty8xIgi9GWqDaI&2g5BQ;+{EO+$bBQ#ZZ6TL&wJLCIwH%-Q>*| zRq?o=z;Lf3;(jL@ah=EQ6QBCb=W!e7q(Fz5*~&&x>hicn@HQ=VQf2@Ug z%Cj(hQx!_?yK(h?7scY0HLYU^-~r{5-A;mK?>p8BBC<}CkS6OHk3ZRD$P z(E!T}rwca^mOpEj)2dRI*CbfoM&!%#W6iQsOj-VVmMjy^vXVRpmfKyHkvw84%dh`8 z%imnZa#~f&^1eLx?{g(=S+lH!QkLJI0n4MBWu=s5Sox`?paBbdZ$L3V`fk8lcO(|oNGub}zx{IMX6z*6zxben{{a2^0dD{R diff --git a/webrtc/modules/audio_processing/audio_processing_unittest.cc b/webrtc/modules/audio_processing/audio_processing_unittest.cc index ded75c8652..e5ab3da3b4 100644 --- a/webrtc/modules/audio_processing/audio_processing_unittest.cc +++ b/webrtc/modules/audio_processing/audio_processing_unittest.cc @@ -204,10 +204,10 @@ int16_t MaxAudioFrame(const AudioFrame& frame) { #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) void TestStats(const AudioProcessing::Statistic& test, const audioproc::Test::Statistic& reference) { - EXPECT_NEAR(reference.instant(), test.instant, 2); - EXPECT_NEAR(reference.average(), test.average, 2); - EXPECT_NEAR(reference.maximum(), test.maximum, 3); - EXPECT_NEAR(reference.minimum(), test.minimum, 2); + EXPECT_EQ(reference.instant(), test.instant); + EXPECT_EQ(reference.average(), test.average); + EXPECT_EQ(reference.maximum(), test.maximum); + EXPECT_EQ(reference.minimum(), test.minimum); } void WriteStatsMessage(const AudioProcessing::Statistic& output, @@ -221,7 +221,6 @@ void WriteStatsMessage(const AudioProcessing::Statistic& output, void OpenFileAndWriteMessage(const std::string filename, const ::google::protobuf::MessageLite& msg) { -#if defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID) FILE* file = fopen(filename.c_str(), "wb"); ASSERT_TRUE(file != NULL); @@ -234,10 +233,6 @@ void OpenFileAndWriteMessage(const std::string filename, ASSERT_EQ(static_cast(size), fwrite(array.get(), sizeof(array[0]), size, file)); fclose(file); -#else - std::cout << "Warning: Writing new reference is only allowed on Linux!" - << std::endl; -#endif } std::string ResourceFilePath(std::string name, int sample_rate_hz) { @@ -2101,6 +2096,9 @@ TEST_F(ApmTest, Process) { int analog_level_average = 0; int max_output_average = 0; float ns_speech_prob_average = 0.0f; +#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) + int stats_index = 0; +#endif while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) { EXPECT_EQ(apm_->kNoError, apm_->ProcessReverseStream(revframe_)); @@ -2148,27 +2146,81 @@ TEST_F(ApmTest, Process) { // Reset in case of downmixing. frame_->num_channels_ = static_cast(test->num_input_channels()); frame_count++; + +#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) + const int kStatsAggregationFrameNum = 100; // 1 second. + if (frame_count % kStatsAggregationFrameNum == 0) { + // Get echo metrics. + EchoCancellation::Metrics echo_metrics; + EXPECT_EQ(apm_->kNoError, + apm_->echo_cancellation()->GetMetrics(&echo_metrics)); + + // Get delay metrics. + int median = 0; + int std = 0; + float fraction_poor_delays = 0; + EXPECT_EQ(apm_->kNoError, + apm_->echo_cancellation()->GetDelayMetrics( + &median, &std, &fraction_poor_delays)); + + // Get RMS. + int rms_level = apm_->level_estimator()->RMS(); + EXPECT_LE(0, rms_level); + EXPECT_GE(127, rms_level); + + if (!write_ref_data) { + const audioproc::Test::EchoMetrics& reference = + test->echo_metrics(stats_index); + TestStats(echo_metrics.residual_echo_return_loss, + reference.residual_echo_return_loss()); + TestStats(echo_metrics.echo_return_loss, + reference.echo_return_loss()); + TestStats(echo_metrics.echo_return_loss_enhancement, + reference.echo_return_loss_enhancement()); + TestStats(echo_metrics.a_nlp, + reference.a_nlp()); + EXPECT_EQ(echo_metrics.divergent_filter_fraction, + reference.divergent_filter_fraction()); + + const audioproc::Test::DelayMetrics& reference_delay = + test->delay_metrics(stats_index); + EXPECT_EQ(reference_delay.median(), median); + EXPECT_EQ(reference_delay.std(), std); + EXPECT_EQ(reference_delay.fraction_poor_delays(), + fraction_poor_delays); + + EXPECT_EQ(test->rms_level(stats_index), rms_level); + + ++stats_index; + } else { + audioproc::Test::EchoMetrics* message = + test->add_echo_metrics(); + WriteStatsMessage(echo_metrics.residual_echo_return_loss, + message->mutable_residual_echo_return_loss()); + WriteStatsMessage(echo_metrics.echo_return_loss, + message->mutable_echo_return_loss()); + WriteStatsMessage(echo_metrics.echo_return_loss_enhancement, + message->mutable_echo_return_loss_enhancement()); + WriteStatsMessage(echo_metrics.a_nlp, + message->mutable_a_nlp()); + message->set_divergent_filter_fraction( + echo_metrics.divergent_filter_fraction); + + audioproc::Test::DelayMetrics* message_delay = + test->add_delay_metrics(); + message_delay->set_median(median); + message_delay->set_std(std); + message_delay->set_fraction_poor_delays(fraction_poor_delays); + + test->add_rms_level(rms_level); + } + } +#endif // defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE). } max_output_average /= frame_count; analog_level_average /= frame_count; ns_speech_prob_average /= frame_count; -#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) - EchoCancellation::Metrics echo_metrics; - EXPECT_EQ(apm_->kNoError, - apm_->echo_cancellation()->GetMetrics(&echo_metrics)); - int median = 0; - int std = 0; - float fraction_poor_delays = 0; - EXPECT_EQ(apm_->kNoError, - apm_->echo_cancellation()->GetDelayMetrics( - &median, &std, &fraction_poor_delays)); - - int rms_level = apm_->level_estimator()->RMS(); - EXPECT_LE(0, rms_level); - EXPECT_GE(127, rms_level); -#endif - if (!write_ref_data) { const int kIntNear = 1; // When running the test on a N7 we get a {2, 6} difference of @@ -2198,27 +2250,8 @@ TEST_F(ApmTest, Process) { EXPECT_NEAR(test->max_output_average(), max_output_average - kMaxOutputAverageOffset, kMaxOutputAverageNear); - #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) - audioproc::Test::EchoMetrics reference = test->echo_metrics(); - TestStats(echo_metrics.residual_echo_return_loss, - reference.residual_echo_return_loss()); - TestStats(echo_metrics.echo_return_loss, - reference.echo_return_loss()); - TestStats(echo_metrics.echo_return_loss_enhancement, - reference.echo_return_loss_enhancement()); - TestStats(echo_metrics.a_nlp, - reference.a_nlp()); - const double kFloatNear = 0.0005; - audioproc::Test::DelayMetrics reference_delay = test->delay_metrics(); - EXPECT_NEAR(reference_delay.median(), median, kIntNear); - EXPECT_NEAR(reference_delay.std(), std, kIntNear); - EXPECT_NEAR(reference_delay.fraction_poor_delays(), fraction_poor_delays, - kFloatNear); - - EXPECT_NEAR(test->rms_level(), rms_level, kIntNear); - EXPECT_NEAR(test->ns_speech_probability_average(), ns_speech_prob_average, kFloatNear); @@ -2232,24 +2265,6 @@ TEST_F(ApmTest, Process) { test->set_max_output_average(max_output_average); #if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) - audioproc::Test::EchoMetrics* message = test->mutable_echo_metrics(); - WriteStatsMessage(echo_metrics.residual_echo_return_loss, - message->mutable_residual_echo_return_loss()); - WriteStatsMessage(echo_metrics.echo_return_loss, - message->mutable_echo_return_loss()); - WriteStatsMessage(echo_metrics.echo_return_loss_enhancement, - message->mutable_echo_return_loss_enhancement()); - WriteStatsMessage(echo_metrics.a_nlp, - message->mutable_a_nlp()); - - audioproc::Test::DelayMetrics* message_delay = - test->mutable_delay_metrics(); - message_delay->set_median(median); - message_delay->set_std(std); - message_delay->set_fraction_poor_delays(fraction_poor_delays); - - test->set_rms_level(rms_level); - EXPECT_LE(0.0f, ns_speech_prob_average); EXPECT_GE(1.0f, ns_speech_prob_average); test->set_ns_speech_probability_average(ns_speech_prob_average); diff --git a/webrtc/modules/audio_processing/test/unittest.proto b/webrtc/modules/audio_processing/test/unittest.proto index ddce46b775..3c93bfd713 100644 --- a/webrtc/modules/audio_processing/test/unittest.proto +++ b/webrtc/modules/audio_processing/test/unittest.proto @@ -32,9 +32,10 @@ message Test { optional Statistic echo_return_loss = 2; optional Statistic echo_return_loss_enhancement = 3; optional Statistic a_nlp = 4; + optional float divergent_filter_fraction = 5; } - optional EchoMetrics echo_metrics = 11; + repeated EchoMetrics echo_metrics = 11; message DelayMetrics { optional int32 median = 1; @@ -42,9 +43,9 @@ message Test { optional float fraction_poor_delays = 3; } - optional DelayMetrics delay_metrics = 12; + repeated DelayMetrics delay_metrics = 12; - optional int32 rms_level = 13; + repeated int32 rms_level = 13; optional float ns_speech_probability_average = 14;