Keep track of the user-facing number of channels in a ChannelBuffer

Before this change the ChannelBuffer had a fixed number of channels. This meant for example that when the Beamformer would reduce the number of channels to one, the merging filter bank was still merging all the channels, which was unnecessary since they were not processed and just discarded later. This change doesn't change the signal at all. It just reflects the number of channels in the ChannelBuffer, reducing the complexity.

R=henrik.lundin@webrtc.org, peah@webrtc.org, tina.legrand@webrtc.org

Review URL: https://codereview.webrtc.org/2053773002 .

Cr-Commit-Position: refs/heads/master@{#13352}
This commit is contained in:
Alejandro Luebs 2016-06-30 15:33:37 -07:00
parent e59122889f
commit a181c9ad17
11 changed files with 158 additions and 14 deletions

View File

@ -259,6 +259,7 @@ if (rtc_include_tests) {
"audio_ring_buffer_unittest.cc",
"audio_util_unittest.cc",
"blocker_unittest.cc",
"channel_buffer_unittest.cc",
"fir_filter_unittest.cc",
"lapped_transform_unittest.cc",
"real_fourier_unittest.cc",

View File

@ -47,6 +47,7 @@ const ChannelBuffer<float>* IFChannelBuffer::fbuf_const() const {
void IFChannelBuffer::RefreshF() const {
if (!fvalid_) {
RTC_DCHECK(ivalid_);
fbuf_.set_num_channels(ibuf_.num_channels());
const int16_t* const* int_channels = ibuf_.channels();
float* const* float_channels = fbuf_.channels();
for (size_t i = 0; i < ibuf_.num_channels(); ++i) {
@ -62,8 +63,9 @@ void IFChannelBuffer::RefreshI() const {
if (!ivalid_) {
RTC_DCHECK(fvalid_);
int16_t* const* int_channels = ibuf_.channels();
ibuf_.set_num_channels(fbuf_.num_channels());
const float* const* float_channels = fbuf_.channels();
for (size_t i = 0; i < ibuf_.num_channels(); ++i) {
for (size_t i = 0; i < fbuf_.num_channels(); ++i) {
FloatS16ToS16(float_channels[i],
ibuf_.num_frames(),
int_channels[i]);

View File

@ -48,13 +48,14 @@ class ChannelBuffer {
bands_(new T*[num_channels * num_bands]),
num_frames_(num_frames),
num_frames_per_band_(num_frames / num_bands),
num_allocated_channels_(num_channels),
num_channels_(num_channels),
num_bands_(num_bands) {
for (size_t i = 0; i < num_channels_; ++i) {
for (size_t i = 0; i < num_allocated_channels_; ++i) {
for (size_t j = 0; j < num_bands_; ++j) {
channels_[j * num_channels_ + i] =
channels_[j * num_allocated_channels_ + i] =
&data_[i * num_frames_ + j * num_frames_per_band_];
bands_[i * num_bands_ + j] = channels_[j * num_channels_ + i];
bands_[i * num_bands_ + j] = channels_[j * num_allocated_channels_ + i];
}
}
}
@ -63,7 +64,7 @@ class ChannelBuffer {
// Usage:
// channels()[channel][sample].
// Where:
// 0 <= channel < |num_channels_|
// 0 <= channel < |num_allocated_channels_|
// 0 <= sample < |num_frames_|
T* const* channels() { return channels(0); }
const T* const* channels() const { return channels(0); }
@ -73,11 +74,11 @@ class ChannelBuffer {
// channels(band)[channel][sample].
// Where:
// 0 <= band < |num_bands_|
// 0 <= channel < |num_channels_|
// 0 <= channel < |num_allocated_channels_|
// 0 <= sample < |num_frames_per_band_|
const T* const* channels(size_t band) const {
RTC_DCHECK_LT(band, num_bands_);
return &channels_[band * num_channels_];
return &channels_[band * num_allocated_channels_];
}
T* const* channels(size_t band) {
const ChannelBuffer<T>* t = this;
@ -118,7 +119,12 @@ class ChannelBuffer {
size_t num_frames_per_band() const { return num_frames_per_band_; }
size_t num_channels() const { return num_channels_; }
size_t num_bands() const { return num_bands_; }
size_t size() const {return num_frames_ * num_channels_; }
size_t size() const {return num_frames_ * num_allocated_channels_; }
void set_num_channels(size_t num_channels) {
RTC_DCHECK_LE(num_channels, num_allocated_channels_);
num_channels_ = num_channels;
}
void SetDataForTesting(const T* data, size_t size) {
RTC_CHECK_EQ(size, this->size());
@ -131,7 +137,10 @@ class ChannelBuffer {
std::unique_ptr<T* []> bands_;
const size_t num_frames_;
const size_t num_frames_per_band_;
const size_t num_channels_;
// Number of channels the internal buffer holds.
const size_t num_allocated_channels_;
// Number of channels the user sees.
size_t num_channels_;
const size_t num_bands_;
};
@ -152,7 +161,13 @@ class IFChannelBuffer {
size_t num_frames() const { return ibuf_.num_frames(); }
size_t num_frames_per_band() const { return ibuf_.num_frames_per_band(); }
size_t num_channels() const { return ibuf_.num_channels(); }
size_t num_channels() const {
return ivalid_ ? ibuf_.num_channels() : fbuf_.num_channels();
}
void set_num_channels(size_t num_channels) {
ibuf_.set_num_channels(num_channels);
fbuf_.set_num_channels(num_channels);
}
size_t num_bands() const { return ibuf_.num_bands(); }
private:

View File

@ -0,0 +1,65 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_audio/channel_buffer.h"
namespace webrtc {
namespace {
const size_t kNumFrames = 480u;
const size_t kStereo = 2u;
const size_t kMono = 1u;
void ExpectNumChannels(const IFChannelBuffer& ifchb, size_t num_channels) {
EXPECT_EQ(ifchb.ibuf_const()->num_channels(), num_channels);
EXPECT_EQ(ifchb.fbuf_const()->num_channels(), num_channels);
EXPECT_EQ(ifchb.num_channels(), num_channels);
}
} // namespace
TEST(ChannelBufferTest, SetNumChannelsSetsNumChannels) {
ChannelBuffer<float> chb(kNumFrames, kStereo);
EXPECT_EQ(chb.num_channels(), kStereo);
chb.set_num_channels(kMono);
EXPECT_EQ(chb.num_channels(), kMono);
}
TEST(IFChannelBufferTest, SetNumChannelsSetsChannelBuffersNumChannels) {
IFChannelBuffer ifchb(kNumFrames, kStereo);
ExpectNumChannels(ifchb, kStereo);
ifchb.set_num_channels(kMono);
ExpectNumChannels(ifchb, kMono);
}
TEST(IFChannelBufferTest, SettingNumChannelsOfOneChannelBufferSetsTheOther) {
IFChannelBuffer ifchb(kNumFrames, kStereo);
ExpectNumChannels(ifchb, kStereo);
ifchb.ibuf()->set_num_channels(kMono);
ExpectNumChannels(ifchb, kMono);
ifchb.fbuf()->set_num_channels(kStereo);
ExpectNumChannels(ifchb, kStereo);
}
#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
TEST(ChannelBufferTest, SetNumChannelsDeathTest) {
ChannelBuffer<float> chb(kNumFrames, kMono);
EXPECT_DEATH(chb.set_num_channels(kStereo), "num_channels");
}
TEST(IFChannelBufferTest, SetNumChannelsDeathTest) {
IFChannelBuffer ifchb(kNumFrames, kMono);
EXPECT_DEATH(ifchb.ibuf()->set_num_channels(kStereo), "num_channels");
}
#endif
} // namespace webrtc

View File

@ -244,6 +244,7 @@
'audio_ring_buffer_unittest.cc',
'audio_util_unittest.cc',
'blocker_unittest.cc',
'channel_buffer_unittest.cc',
'fir_filter_unittest.cc',
'lapped_transform_unittest.cc',
'real_fourier_unittest.cc',

View File

@ -112,6 +112,7 @@ if (rtc_include_tests) {
# "audio_processing/agc/agc_unittest.cc",
"audio_processing/agc/loudness_histogram_unittest.cc",
"audio_processing/agc/mock_agc.h",
"audio_processing/audio_buffer_unittest.cc",
"audio_processing/beamformer/array_util_unittest.cc",
"audio_processing/beamformer/complex_matrix_unittest.cc",
"audio_processing/beamformer/covariance_matrix_generator_unittest.cc",

View File

@ -184,6 +184,10 @@ void AudioBuffer::InitForNewData() {
reference_copied_ = false;
activity_ = AudioFrame::kVadUnknown;
num_channels_ = num_proc_channels_;
data_->set_num_channels(num_proc_channels_);
if (split_data_.get()) {
split_data_->set_num_channels(num_proc_channels_);
}
}
const int16_t* const* AudioBuffer::channels_const() const {
@ -345,6 +349,10 @@ size_t AudioBuffer::num_channels() const {
void AudioBuffer::set_num_channels(size_t num_channels) {
num_channels_ = num_channels;
data_->set_num_channels(num_channels);
if (split_data_.get()) {
split_data_->set_num_channels(num_channels);
}
}
size_t AudioBuffer::num_frames() const {

View File

@ -123,6 +123,8 @@ class AudioBuffer {
void MergeFrequencyBands();
private:
FRIEND_TEST_ALL_PREFIXES(AudioBufferTest,
SetNumChannelsSetsChannelBuffersNumChannels);
// Called from DeinterleaveFrom() and CopyFrom().
void InitForNewData();

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
namespace webrtc {
namespace {
const size_t kNumFrames = 480u;
const size_t kStereo = 2u;
const size_t kMono = 1u;
void ExpectNumChannels(const AudioBuffer& ab, size_t num_channels) {
EXPECT_EQ(ab.data()->num_channels(), num_channels);
EXPECT_EQ(ab.data_f()->num_channels(), num_channels);
EXPECT_EQ(ab.split_data()->num_channels(), num_channels);
EXPECT_EQ(ab.split_data_f()->num_channels(), num_channels);
EXPECT_EQ(ab.num_channels(), num_channels);
}
} // namespace
TEST(AudioBufferTest, SetNumChannelsSetsChannelBuffersNumChannels) {
AudioBuffer ab(kNumFrames, kStereo, kNumFrames, kStereo, kNumFrames);
ExpectNumChannels(ab, kStereo);
ab.set_num_channels(kMono);
ExpectNumChannels(ab, kMono);
ab.InitForNewData();
ExpectNumChannels(ab, kStereo);
}
#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
TEST(AudioBufferTest, SetNumChannelsDeathTest) {
AudioBuffer ab(kNumFrames, kMono, kNumFrames, kMono, kNumFrames);
EXPECT_DEATH(ab.set_num_channels(kStereo), "num_channels");
}
#endif
} // namespace webrtc

View File

@ -72,8 +72,8 @@ void SplittingFilter::TwoBandsAnalysis(const IFChannelBuffer* data,
void SplittingFilter::TwoBandsSynthesis(const IFChannelBuffer* bands,
IFChannelBuffer* data) {
RTC_DCHECK_EQ(two_bands_states_.size(), data->num_channels());
for (size_t i = 0; i < two_bands_states_.size(); ++i) {
RTC_DCHECK_LE(data->num_channels(), two_bands_states_.size());
for (size_t i = 0; i < data->num_channels(); ++i) {
WebRtcSpl_SynthesisQMF(bands->ibuf_const()->channels(0)[i],
bands->ibuf_const()->channels(1)[i],
bands->num_frames_per_band(),
@ -95,8 +95,8 @@ void SplittingFilter::ThreeBandsAnalysis(const IFChannelBuffer* data,
void SplittingFilter::ThreeBandsSynthesis(const IFChannelBuffer* bands,
IFChannelBuffer* data) {
RTC_DCHECK_EQ(three_band_filter_banks_.size(), data->num_channels());
for (size_t i = 0; i < three_band_filter_banks_.size(); ++i) {
RTC_DCHECK_LE(data->num_channels(), three_band_filter_banks_.size());
for (size_t i = 0; i < data->num_channels(); ++i) {
three_band_filter_banks_[i]->Synthesis(bands->fbuf_const()->bands(i),
bands->num_frames_per_band(),
data->fbuf()->channels()[i]);

View File

@ -238,6 +238,7 @@
# 'audio_processing/agc/agc_unittest.cc',
'audio_processing/agc/loudness_histogram_unittest.cc',
'audio_processing/agc/mock_agc.h',
'audio_processing/audio_buffer_unittest.cc',
'audio_processing/beamformer/array_util_unittest.cc',
'audio_processing/beamformer/complex_matrix_unittest.cc',
'audio_processing/beamformer/covariance_matrix_generator_unittest.cc',