Reland: Drop the 16kHz sample rate restriction on AECM and zero out higher bands

Landed originally here: https://codereview.webrtc.org/1774553002/
Revertede here: https://codereview.webrtc.org/1781893002/

TBR=solenberg@webrtc.org, tina.legrand@webrtc.org

Review URL: https://codereview.webrtc.org/1777093004

Cr-Commit-Position: refs/heads/master@{#12005}
This commit is contained in:
aluebs 2016-03-15 14:04:58 -07:00 committed by Commit bot
parent f5d4786080
commit 776593b139
6 changed files with 42 additions and 64 deletions

View File

@ -117,7 +117,6 @@ const size_t AudioProcessing::kNumNativeSampleRates =
arraysize(AudioProcessing::kNativeSampleRatesHz); arraysize(AudioProcessing::kNativeSampleRatesHz);
const int AudioProcessing::kMaxNativeSampleRateHz = AudioProcessing:: const int AudioProcessing::kMaxNativeSampleRateHz = AudioProcessing::
kNativeSampleRatesHz[AudioProcessing::kNumNativeSampleRates - 1]; kNativeSampleRatesHz[AudioProcessing::kNumNativeSampleRates - 1];
const int AudioProcessing::kMaxAECMSampleRateHz = kSampleRate16kHz;
AudioProcessing* AudioProcessing::Create() { AudioProcessing* AudioProcessing::Create() {
Config config; Config config;
@ -347,7 +346,7 @@ int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
formats_.api_format = config; formats_.api_format = config;
// We process at the closest native rate >= min(input rate, output rate)... // We process at the closest native rate >= min(input rate, output rate).
const int min_proc_rate = const int min_proc_rate =
std::min(formats_.api_format.input_stream().sample_rate_hz(), std::min(formats_.api_format.input_stream().sample_rate_hz(),
formats_.api_format.output_stream().sample_rate_hz()); formats_.api_format.output_stream().sample_rate_hz());
@ -358,11 +357,6 @@ int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
break; break;
} }
} }
// ...with one exception.
if (public_submodules_->echo_control_mobile->is_enabled() &&
min_proc_rate > kMaxAECMSampleRateHz) {
fwd_proc_rate = kMaxAECMSampleRateHz;
}
capture_nonlocked_.fwd_proc_format = StreamConfig(fwd_proc_rate); capture_nonlocked_.fwd_proc_format = StreamConfig(fwd_proc_rate);
@ -595,12 +589,6 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
return kBadSampleRateError; return kBadSampleRateError;
} }
if (public_submodules_->echo_control_mobile->is_enabled() &&
frame->sample_rate_hz_ > kMaxAECMSampleRateHz) {
LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates";
return kUnsupportedComponentError;
}
ProcessingConfig processing_config; ProcessingConfig processing_config;
{ {
// Aquire lock for the access of api_format. // Aquire lock for the access of api_format.
@ -1254,7 +1242,9 @@ void AudioProcessingImpl::InitializeGainController() {
void AudioProcessingImpl::InitializeEchoControlMobile() { void AudioProcessingImpl::InitializeEchoControlMobile() {
public_submodules_->echo_control_mobile->Initialize( public_submodules_->echo_control_mobile->Initialize(
proc_sample_rate_hz(), num_reverse_channels(), num_output_channels()); proc_split_sample_rate_hz(),
num_reverse_channels(),
num_output_channels());
} }
void AudioProcessingImpl::InitializeLevelEstimator() { void AudioProcessingImpl::InitializeLevelEstimator() {

View File

@ -243,6 +243,12 @@ int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio,
++handle_index; ++handle_index;
} }
for (size_t band = 1u; band < audio->num_bands(); ++band) {
memset(audio->split_bands(capture)[band],
0,
audio->num_frames_per_band() *
sizeof(audio->split_bands(capture)[band][0]));
}
} }
return AudioProcessing::kNoError; return AudioProcessing::kNoError;
} }

View File

@ -508,7 +508,6 @@ class AudioProcessing {
static const int kNativeSampleRatesHz[]; static const int kNativeSampleRatesHz[];
static const size_t kNumNativeSampleRates; static const size_t kNumNativeSampleRates;
static const int kMaxNativeSampleRateHz; static const int kMaxNativeSampleRateHz;
static const int kMaxAECMSampleRateHz;
static const int kChunkSizeMs = 10; static const int kChunkSizeMs = 10;
}; };

View File

@ -55,8 +55,8 @@ const google::protobuf::int32 kChannels[] = {1, 2};
const int kSampleRates[] = {8000, 16000, 32000, 48000}; const int kSampleRates[] = {8000, 16000, 32000, 48000};
#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE) #if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
// AECM doesn't support super-wb. // Android doesn't support 48kHz.
const int kProcessSampleRates[] = {8000, 16000}; const int kProcessSampleRates[] = {8000, 16000, 32000};
#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE) #elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
const int kProcessSampleRates[] = {8000, 16000, 32000, 48000}; const int kProcessSampleRates[] = {8000, 16000, 32000, 48000};
#endif #endif
@ -435,11 +435,7 @@ void ApmTest::SetUp() {
frame_ = new AudioFrame(); frame_ = new AudioFrame();
revframe_ = new AudioFrame(); revframe_ = new AudioFrame();
#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
Init(16000, 16000, 16000, 2, 2, 2, false);
#else
Init(32000, 32000, 32000, 2, 2, 2, false); Init(32000, 32000, 32000, 2, 2, 2, false);
#endif
} }
void ApmTest::TearDown() { void ApmTest::TearDown() {
@ -1039,18 +1035,6 @@ TEST_F(ApmTest, DISABLED_EchoCancellationReportsCorrectDelays) {
} }
TEST_F(ApmTest, EchoControlMobile) { TEST_F(ApmTest, EchoControlMobile) {
// AECM won't use super-wideband.
SetFrameSampleRate(frame_, 32000);
EXPECT_NOERR(apm_->ProcessStream(frame_));
EXPECT_EQ(apm_->kBadSampleRateError,
apm_->echo_control_mobile()->Enable(true));
SetFrameSampleRate(frame_, 16000);
EXPECT_NOERR(apm_->ProcessStream(frame_));
EXPECT_EQ(apm_->kNoError,
apm_->echo_control_mobile()->Enable(true));
SetFrameSampleRate(frame_, 32000);
EXPECT_EQ(apm_->kUnsupportedComponentError, apm_->ProcessStream(frame_));
// Turn AECM on (and AEC off) // Turn AECM on (and AEC off)
Init(16000, 16000, 16000, 2, 2, 2, false); Init(16000, 16000, 16000, 2, 2, 2, false);
EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true)); EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
@ -1974,6 +1958,7 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) {
num_input_channels); num_input_channels);
int analog_level = 127; int analog_level = 127;
size_t num_bad_chunks = 0;
while (ReadFrame(far_file_, revframe_, revfloat_cb_.get()) && while (ReadFrame(far_file_, revframe_, revfloat_cb_.get()) &&
ReadFrame(near_file_, frame_, float_cb_.get())) { ReadFrame(near_file_, frame_, float_cb_.get())) {
frame_->vad_activity_ = AudioFrame::kVadUnknown; frame_->vad_activity_ = AudioFrame::kVadUnknown;
@ -2012,18 +1997,13 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) {
float snr = ComputeSNR(output_int16.channels()[j], float snr = ComputeSNR(output_int16.channels()[j],
output_cb.channels()[j], output_cb.channels()[j],
samples_per_channel, &variance); samples_per_channel, &variance);
#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
// There are a few chunks in the fixed-point profile that give low SNR.
// Listening confirmed the difference is acceptable.
const float kVarianceThreshold = 150;
const float kSNRThreshold = 10;
#else
const float kVarianceThreshold = 20; const float kVarianceThreshold = 20;
const float kSNRThreshold = 20; const float kSNRThreshold = 20;
#endif
// Skip frames with low energy. // Skip frames with low energy.
if (sqrt(variance) > kVarianceThreshold) { if (sqrt(variance) > kVarianceThreshold && snr < kSNRThreshold) {
EXPECT_LT(kSNRThreshold, snr); ++num_bad_chunks;
} }
} }
@ -2039,6 +2019,16 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) {
// Reset in case of downmixing. // Reset in case of downmixing.
frame_->num_channels_ = static_cast<size_t>(test->num_input_channels()); frame_->num_channels_ = static_cast<size_t>(test->num_input_channels());
} }
#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
const size_t kMaxNumBadChunks = 0;
#elif defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
// There are a few chunks in the fixed-point profile that give low SNR.
// Listening confirmed the difference is acceptable.
const size_t kMaxNumBadChunks = 60;
#endif
EXPECT_LE(num_bad_chunks, kMaxNumBadChunks);
rewind(far_file_); rewind(far_file_);
rewind(near_file_); rewind(near_file_);
} }
@ -2560,9 +2550,9 @@ TEST_P(AudioProcessingTest, Formats) {
} else { } else {
ref_rate = 8000; ref_rate = 8000;
} }
#ifdef WEBRTC_AUDIOPROC_FIXED_PROFILE #ifdef WEBRTC_ARCH_ARM_FAMILY
if (file_direction == kForward) { if (file_direction == kForward) {
ref_rate = std::min(ref_rate, 16000); ref_rate = std::min(ref_rate, 32000);
} }
#endif #endif
FILE* out_file = fopen( FILE* out_file = fopen(
@ -2645,9 +2635,7 @@ TEST_P(AudioProcessingTest, Formats) {
EXPECT_NE(0, expected_snr); EXPECT_NE(0, expected_snr);
std::cout << "SNR=" << snr << " dB" << std::endl; std::cout << "SNR=" << snr << " dB" << std::endl;
} else { } else {
EXPECT_EQ(expected_snr, 0); std::cout << "SNR=inf dB" << std::endl;
std::cout << "SNR="
<< "inf dB" << std::endl;
} }
fclose(out_file); fclose(out_file);
@ -2729,9 +2717,9 @@ INSTANTIATE_TEST_CASE_P(
std::tr1::make_tuple(48000, 16000, 32000, 16000, 20, 20), std::tr1::make_tuple(48000, 16000, 32000, 16000, 20, 20),
std::tr1::make_tuple(48000, 16000, 16000, 16000, 20, 0), std::tr1::make_tuple(48000, 16000, 16000, 16000, 20, 0),
std::tr1::make_tuple(44100, 48000, 48000, 48000, 20, 0), std::tr1::make_tuple(44100, 48000, 48000, 48000, 15, 0),
std::tr1::make_tuple(44100, 48000, 32000, 48000, 20, 30), std::tr1::make_tuple(44100, 48000, 32000, 48000, 15, 30),
std::tr1::make_tuple(44100, 48000, 16000, 48000, 20, 20), std::tr1::make_tuple(44100, 48000, 16000, 48000, 15, 20),
std::tr1::make_tuple(44100, 44100, 48000, 44100, 15, 20), std::tr1::make_tuple(44100, 44100, 48000, 44100, 15, 20),
std::tr1::make_tuple(44100, 44100, 32000, 44100, 15, 15), std::tr1::make_tuple(44100, 44100, 32000, 44100, 15, 15),
std::tr1::make_tuple(44100, 44100, 16000, 44100, 15, 15), std::tr1::make_tuple(44100, 44100, 16000, 44100, 15, 15),
@ -2742,15 +2730,15 @@ INSTANTIATE_TEST_CASE_P(
std::tr1::make_tuple(44100, 16000, 32000, 16000, 20, 20), std::tr1::make_tuple(44100, 16000, 32000, 16000, 20, 20),
std::tr1::make_tuple(44100, 16000, 16000, 16000, 20, 0), std::tr1::make_tuple(44100, 16000, 16000, 16000, 20, 0),
std::tr1::make_tuple(32000, 48000, 48000, 48000, 20, 0), std::tr1::make_tuple(32000, 48000, 48000, 48000, 35, 0),
std::tr1::make_tuple(32000, 48000, 32000, 48000, 20, 30), std::tr1::make_tuple(32000, 48000, 32000, 48000, 65, 30),
std::tr1::make_tuple(32000, 48000, 16000, 48000, 20, 20), std::tr1::make_tuple(32000, 48000, 16000, 48000, 40, 20),
std::tr1::make_tuple(32000, 44100, 48000, 44100, 15, 20), std::tr1::make_tuple(32000, 44100, 48000, 44100, 20, 20),
std::tr1::make_tuple(32000, 44100, 32000, 44100, 15, 15), std::tr1::make_tuple(32000, 44100, 32000, 44100, 20, 15),
std::tr1::make_tuple(32000, 44100, 16000, 44100, 15, 15), std::tr1::make_tuple(32000, 44100, 16000, 44100, 20, 15),
std::tr1::make_tuple(32000, 32000, 48000, 32000, 20, 35), std::tr1::make_tuple(32000, 32000, 48000, 32000, 35, 35),
std::tr1::make_tuple(32000, 32000, 32000, 32000, 20, 0), std::tr1::make_tuple(32000, 32000, 32000, 32000, 0, 0),
std::tr1::make_tuple(32000, 32000, 16000, 32000, 20, 20), std::tr1::make_tuple(32000, 32000, 16000, 32000, 40, 20),
std::tr1::make_tuple(32000, 16000, 48000, 16000, 20, 20), std::tr1::make_tuple(32000, 16000, 48000, 16000, 20, 20),
std::tr1::make_tuple(32000, 16000, 32000, 16000, 20, 20), std::tr1::make_tuple(32000, 16000, 32000, 16000, 20, 20),
std::tr1::make_tuple(32000, 16000, 16000, 16000, 20, 0), std::tr1::make_tuple(32000, 16000, 16000, 16000, 20, 0),

View File

@ -1125,11 +1125,6 @@ void TransmitMixer::GenerateAudioFrame(const int16_t* audio,
break; break;
} }
} }
if (audioproc_->echo_control_mobile()->is_enabled()) {
// AECM only supports 8 and 16 kHz.
_audioFrame.sample_rate_hz_ = std::min(
_audioFrame.sample_rate_hz_, AudioProcessing::kMaxAECMSampleRateHz);
}
_audioFrame.num_channels_ = std::min(num_channels, num_codec_channels); _audioFrame.num_channels_ = std::min(num_channels, num_codec_channels);
RemixAndResample(audio, samples_per_channel, num_channels, sample_rate_hz, RemixAndResample(audio, samples_per_channel, num_channels, sample_rate_hz,
&resampler_, &_audioFrame); &resampler_, &_audioFrame);