diff --git a/data/audio_processing/output_data_fixed.pb b/data/audio_processing/output_data_fixed.pb index efc95631ee..0e9307ae9f 100644 Binary files a/data/audio_processing/output_data_fixed.pb and b/data/audio_processing/output_data_fixed.pb differ diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc index 267929a0dd..1623875ca6 100644 --- a/webrtc/modules/audio_processing/audio_processing_impl.cc +++ b/webrtc/modules/audio_processing/audio_processing_impl.cc @@ -117,7 +117,6 @@ const size_t AudioProcessing::kNumNativeSampleRates = arraysize(AudioProcessing::kNativeSampleRatesHz); const int AudioProcessing::kMaxNativeSampleRateHz = AudioProcessing:: kNativeSampleRatesHz[AudioProcessing::kNumNativeSampleRates - 1]; -const int AudioProcessing::kMaxAECMSampleRateHz = kSampleRate16kHz; AudioProcessing* AudioProcessing::Create() { Config config; @@ -347,7 +346,7 @@ int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { formats_.api_format = config; - // We process at the closest native rate >= min(input rate, output rate)... + // We process at the closest native rate >= min(input rate, output rate). const int min_proc_rate = std::min(formats_.api_format.input_stream().sample_rate_hz(), formats_.api_format.output_stream().sample_rate_hz()); @@ -358,11 +357,6 @@ int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { break; } } - // ...with one exception. - if (public_submodules_->echo_control_mobile->is_enabled() && - min_proc_rate > kMaxAECMSampleRateHz) { - fwd_proc_rate = kMaxAECMSampleRateHz; - } capture_nonlocked_.fwd_proc_format = StreamConfig(fwd_proc_rate); @@ -595,12 +589,6 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { return kBadSampleRateError; } - if (public_submodules_->echo_control_mobile->is_enabled() && - frame->sample_rate_hz_ > kMaxAECMSampleRateHz) { - LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates"; - return kUnsupportedComponentError; - } - ProcessingConfig processing_config; { // Aquire lock for the access of api_format. @@ -1254,7 +1242,9 @@ void AudioProcessingImpl::InitializeGainController() { void AudioProcessingImpl::InitializeEchoControlMobile() { public_submodules_->echo_control_mobile->Initialize( - proc_sample_rate_hz(), num_reverse_channels(), num_output_channels()); + proc_split_sample_rate_hz(), + num_reverse_channels(), + num_output_channels()); } void AudioProcessingImpl::InitializeLevelEstimator() { diff --git a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc index d3fe17a352..2d5ba35c34 100644 --- a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc +++ b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc @@ -243,6 +243,12 @@ int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio, ++handle_index; } + for (size_t band = 1u; band < audio->num_bands(); ++band) { + memset(audio->split_bands(capture)[band], + 0, + audio->num_frames_per_band() * + sizeof(audio->split_bands(capture)[band][0])); + } } return AudioProcessing::kNoError; } diff --git a/webrtc/modules/audio_processing/include/audio_processing.h b/webrtc/modules/audio_processing/include/audio_processing.h index 6fb10c6f85..95e56097da 100644 --- a/webrtc/modules/audio_processing/include/audio_processing.h +++ b/webrtc/modules/audio_processing/include/audio_processing.h @@ -508,7 +508,6 @@ class AudioProcessing { static const int kNativeSampleRatesHz[]; static const size_t kNumNativeSampleRates; static const int kMaxNativeSampleRateHz; - static const int kMaxAECMSampleRateHz; static const int kChunkSizeMs = 10; }; diff --git a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc index 53f667d50d..da695ec038 100644 --- a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc +++ b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc @@ -55,8 +55,8 @@ const google::protobuf::int32 kChannels[] = {1, 2}; const int kSampleRates[] = {8000, 16000, 32000, 48000}; #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE) -// AECM doesn't support super-wb. -const int kProcessSampleRates[] = {8000, 16000}; +// Android doesn't support 48kHz. +const int kProcessSampleRates[] = {8000, 16000, 32000}; #elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) const int kProcessSampleRates[] = {8000, 16000, 32000, 48000}; #endif @@ -435,11 +435,7 @@ void ApmTest::SetUp() { frame_ = new AudioFrame(); revframe_ = new AudioFrame(); -#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE) - Init(16000, 16000, 16000, 2, 2, 2, false); -#else Init(32000, 32000, 32000, 2, 2, 2, false); -#endif } void ApmTest::TearDown() { @@ -1039,18 +1035,6 @@ TEST_F(ApmTest, DISABLED_EchoCancellationReportsCorrectDelays) { } TEST_F(ApmTest, EchoControlMobile) { - // AECM won't use super-wideband. - SetFrameSampleRate(frame_, 32000); - EXPECT_NOERR(apm_->ProcessStream(frame_)); - EXPECT_EQ(apm_->kBadSampleRateError, - apm_->echo_control_mobile()->Enable(true)); - SetFrameSampleRate(frame_, 16000); - EXPECT_NOERR(apm_->ProcessStream(frame_)); - EXPECT_EQ(apm_->kNoError, - apm_->echo_control_mobile()->Enable(true)); - SetFrameSampleRate(frame_, 32000); - EXPECT_EQ(apm_->kUnsupportedComponentError, apm_->ProcessStream(frame_)); - // Turn AECM on (and AEC off) Init(16000, 16000, 16000, 2, 2, 2, false); EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true)); @@ -1974,6 +1958,7 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) { num_input_channels); int analog_level = 127; + size_t num_bad_chunks = 0; while (ReadFrame(far_file_, revframe_, revfloat_cb_.get()) && ReadFrame(near_file_, frame_, float_cb_.get())) { frame_->vad_activity_ = AudioFrame::kVadUnknown; @@ -2012,18 +1997,13 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) { float snr = ComputeSNR(output_int16.channels()[j], output_cb.channels()[j], samples_per_channel, &variance); - #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE) - // There are a few chunks in the fixed-point profile that give low SNR. - // Listening confirmed the difference is acceptable. - const float kVarianceThreshold = 150; - const float kSNRThreshold = 10; - #else + const float kVarianceThreshold = 20; const float kSNRThreshold = 20; - #endif + // Skip frames with low energy. - if (sqrt(variance) > kVarianceThreshold) { - EXPECT_LT(kSNRThreshold, snr); + if (sqrt(variance) > kVarianceThreshold && snr < kSNRThreshold) { + ++num_bad_chunks; } } @@ -2039,6 +2019,16 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) { // Reset in case of downmixing. frame_->num_channels_ = static_cast(test->num_input_channels()); } + +#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) + const size_t kMaxNumBadChunks = 0; +#elif defined(WEBRTC_AUDIOPROC_FIXED_PROFILE) + // There are a few chunks in the fixed-point profile that give low SNR. + // Listening confirmed the difference is acceptable. + const size_t kMaxNumBadChunks = 60; +#endif + EXPECT_LE(num_bad_chunks, kMaxNumBadChunks); + rewind(far_file_); rewind(near_file_); } @@ -2560,9 +2550,9 @@ TEST_P(AudioProcessingTest, Formats) { } else { ref_rate = 8000; } -#ifdef WEBRTC_AUDIOPROC_FIXED_PROFILE +#ifdef WEBRTC_ARCH_ARM_FAMILY if (file_direction == kForward) { - ref_rate = std::min(ref_rate, 16000); + ref_rate = std::min(ref_rate, 32000); } #endif FILE* out_file = fopen( @@ -2645,9 +2635,7 @@ TEST_P(AudioProcessingTest, Formats) { EXPECT_NE(0, expected_snr); std::cout << "SNR=" << snr << " dB" << std::endl; } else { - EXPECT_EQ(expected_snr, 0); - std::cout << "SNR=" - << "inf dB" << std::endl; + std::cout << "SNR=inf dB" << std::endl; } fclose(out_file); @@ -2729,9 +2717,9 @@ INSTANTIATE_TEST_CASE_P( std::tr1::make_tuple(48000, 16000, 32000, 16000, 20, 20), std::tr1::make_tuple(48000, 16000, 16000, 16000, 20, 0), - std::tr1::make_tuple(44100, 48000, 48000, 48000, 20, 0), - std::tr1::make_tuple(44100, 48000, 32000, 48000, 20, 30), - std::tr1::make_tuple(44100, 48000, 16000, 48000, 20, 20), + std::tr1::make_tuple(44100, 48000, 48000, 48000, 15, 0), + std::tr1::make_tuple(44100, 48000, 32000, 48000, 15, 30), + std::tr1::make_tuple(44100, 48000, 16000, 48000, 15, 20), std::tr1::make_tuple(44100, 44100, 48000, 44100, 15, 20), std::tr1::make_tuple(44100, 44100, 32000, 44100, 15, 15), std::tr1::make_tuple(44100, 44100, 16000, 44100, 15, 15), @@ -2742,15 +2730,15 @@ INSTANTIATE_TEST_CASE_P( std::tr1::make_tuple(44100, 16000, 32000, 16000, 20, 20), std::tr1::make_tuple(44100, 16000, 16000, 16000, 20, 0), - std::tr1::make_tuple(32000, 48000, 48000, 48000, 20, 0), - std::tr1::make_tuple(32000, 48000, 32000, 48000, 20, 30), - std::tr1::make_tuple(32000, 48000, 16000, 48000, 20, 20), - std::tr1::make_tuple(32000, 44100, 48000, 44100, 15, 20), - std::tr1::make_tuple(32000, 44100, 32000, 44100, 15, 15), - std::tr1::make_tuple(32000, 44100, 16000, 44100, 15, 15), - std::tr1::make_tuple(32000, 32000, 48000, 32000, 20, 35), - std::tr1::make_tuple(32000, 32000, 32000, 32000, 20, 0), - std::tr1::make_tuple(32000, 32000, 16000, 32000, 20, 20), + std::tr1::make_tuple(32000, 48000, 48000, 48000, 35, 0), + std::tr1::make_tuple(32000, 48000, 32000, 48000, 65, 30), + std::tr1::make_tuple(32000, 48000, 16000, 48000, 40, 20), + std::tr1::make_tuple(32000, 44100, 48000, 44100, 20, 20), + std::tr1::make_tuple(32000, 44100, 32000, 44100, 20, 15), + std::tr1::make_tuple(32000, 44100, 16000, 44100, 20, 15), + std::tr1::make_tuple(32000, 32000, 48000, 32000, 35, 35), + std::tr1::make_tuple(32000, 32000, 32000, 32000, 0, 0), + std::tr1::make_tuple(32000, 32000, 16000, 32000, 40, 20), std::tr1::make_tuple(32000, 16000, 48000, 16000, 20, 20), std::tr1::make_tuple(32000, 16000, 32000, 16000, 20, 20), std::tr1::make_tuple(32000, 16000, 16000, 16000, 20, 0), diff --git a/webrtc/voice_engine/transmit_mixer.cc b/webrtc/voice_engine/transmit_mixer.cc index 0240389be1..873fe39d15 100644 --- a/webrtc/voice_engine/transmit_mixer.cc +++ b/webrtc/voice_engine/transmit_mixer.cc @@ -1125,11 +1125,6 @@ void TransmitMixer::GenerateAudioFrame(const int16_t* audio, break; } } - if (audioproc_->echo_control_mobile()->is_enabled()) { - // AECM only supports 8 and 16 kHz. - _audioFrame.sample_rate_hz_ = std::min( - _audioFrame.sample_rate_hz_, AudioProcessing::kMaxAECMSampleRateHz); - } _audioFrame.num_channels_ = std::min(num_channels, num_codec_channels); RemixAndResample(audio, samples_per_channel, num_channels, sample_rate_hz, &resampler_, &_audioFrame);