Use audio views in Interleave() and Deinterleave()

Interleave and Deinterleave now accept two parameters, one for the
interleaved buffer and another for the deinterleaved one.

The previous versions of the functions still need to exist for test
code that uses ChannelBuffer.

Bug: chromium:335805780
Change-Id: I20371ab6408766d21e6901e6a04000afa05b3553
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/351664
Commit-Queue: Tomas Gunnarsson <tommi@webrtc.org>
Reviewed-by: Per Åhgren <peah@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#42412}
This commit is contained in:
Tommi 2024-05-30 13:29:11 +02:00 committed by WebRTC LUCI CQ
parent a97c292a05
commit f58ded7cf0
6 changed files with 95 additions and 27 deletions

View File

@ -124,20 +124,23 @@ TEST(AudioUtilTest, FloatS16ToDbfs) {
} }
TEST(AudioUtilTest, InterleavingStereo) { TEST(AudioUtilTest, InterleavingStereo) {
const int16_t kInterleaved[] = {2, 3, 4, 9, 8, 27, 16, 81}; constexpr int16_t kInterleaved[] = {2, 3, 4, 9, 8, 27, 16, 81};
const size_t kSamplesPerChannel = 4; constexpr size_t kSamplesPerChannel = 4;
const int kNumChannels = 2; constexpr int kNumChannels = 2;
const size_t kLength = kSamplesPerChannel * kNumChannels; constexpr size_t kLength = kSamplesPerChannel * kNumChannels;
int16_t left[kSamplesPerChannel], right[kSamplesPerChannel]; int16_t deinterleaved[kLength] = {};
int16_t* deinterleaved[] = {left, right}; DeinterleavedView<int16_t> deinterleaved_view(
Deinterleave(kInterleaved, kSamplesPerChannel, kNumChannels, deinterleaved); &deinterleaved[0], kSamplesPerChannel, kNumChannels);
Deinterleave({&kInterleaved[0], kSamplesPerChannel, kNumChannels},
deinterleaved_view);
const int16_t kRefLeft[] = {2, 4, 8, 16}; const int16_t kRefLeft[] = {2, 4, 8, 16};
const int16_t kRefRight[] = {3, 9, 27, 81}; const int16_t kRefRight[] = {3, 9, 27, 81};
ExpectArraysEq(kRefLeft, left, kSamplesPerChannel); ExpectArraysEq(kRefLeft, deinterleaved_view[0].data(), kSamplesPerChannel);
ExpectArraysEq(kRefRight, right, kSamplesPerChannel); ExpectArraysEq(kRefRight, deinterleaved_view[1].data(), kSamplesPerChannel);
int16_t interleaved[kLength]; int16_t interleaved[kLength];
Interleave(deinterleaved, kSamplesPerChannel, kNumChannels, interleaved); Interleave<int16_t>({&deinterleaved[0], kSamplesPerChannel, kNumChannels},
{&interleaved[0], kSamplesPerChannel, kNumChannels});
ExpectArraysEq(kInterleaved, interleaved, kLength); ExpectArraysEq(kInterleaved, interleaved, kLength);
} }
@ -146,12 +149,16 @@ TEST(AudioUtilTest, InterleavingMonoIsIdentical) {
const size_t kSamplesPerChannel = 5; const size_t kSamplesPerChannel = 5;
const int kNumChannels = 1; const int kNumChannels = 1;
int16_t mono[kSamplesPerChannel]; int16_t mono[kSamplesPerChannel];
int16_t* deinterleaved[] = {mono}; DeinterleavedView<int16_t> deinterleaved_view(&mono[0], kSamplesPerChannel,
Deinterleave(kInterleaved, kSamplesPerChannel, kNumChannels, deinterleaved); kNumChannels);
ExpectArraysEq(kInterleaved, mono, kSamplesPerChannel); Deinterleave({kInterleaved, kSamplesPerChannel, kNumChannels},
deinterleaved_view);
ExpectArraysEq(kInterleaved, deinterleaved_view.AsMono().data(),
kSamplesPerChannel);
int16_t interleaved[kSamplesPerChannel]; int16_t interleaved[kSamplesPerChannel];
Interleave(deinterleaved, kSamplesPerChannel, kNumChannels, interleaved); Interleave<int16_t>(deinterleaved_view,
{&interleaved[0], kSamplesPerChannel, kNumChannels});
ExpectArraysEq(mono, interleaved, kSamplesPerChannel); ExpectArraysEq(mono, interleaved, kSamplesPerChannel);
} }

View File

@ -18,6 +18,7 @@
#include <cstring> #include <cstring>
#include <limits> #include <limits>
#include "api/audio/audio_view.h"
#include "rtc_base/checks.h" #include "rtc_base/checks.h"
namespace webrtc { namespace webrtc {
@ -111,7 +112,26 @@ void CopyAudioIfNeeded(const T* const* src,
// by `deinterleaved`. There must be sufficient space allocated in the // by `deinterleaved`. There must be sufficient space allocated in the
// `deinterleaved` buffers (`num_channel` buffers with `samples_per_channel` // `deinterleaved` buffers (`num_channel` buffers with `samples_per_channel`
// per buffer). // per buffer).
// TODO: b/335805780 - Accept ArrayView. template <typename T>
void Deinterleave(const InterleavedView<const T>& interleaved,
const DeinterleavedView<T>& deinterleaved) {
RTC_DCHECK_EQ(NumChannels(interleaved), NumChannels(deinterleaved));
RTC_DCHECK_EQ(SamplesPerChannel(interleaved),
SamplesPerChannel(deinterleaved));
const auto num_channels = NumChannels(interleaved);
const auto samples_per_channel = SamplesPerChannel(interleaved);
for (size_t i = 0; i < num_channels; ++i) {
MonoView<T> channel = deinterleaved[i];
size_t interleaved_idx = i;
for (size_t j = 0; j < samples_per_channel; ++j) {
channel[j] = interleaved[interleaved_idx];
interleaved_idx += num_channels;
}
}
}
// TODO: b/335805780 - Move into test code where this is used once PushResampler
// has been changed to use a single allocation for deinterleaved audio buffers.
template <typename T> template <typename T>
void Deinterleave(const T* interleaved, void Deinterleave(const T* interleaved,
size_t samples_per_channel, size_t samples_per_channel,
@ -130,12 +150,32 @@ void Deinterleave(const T* interleaved,
// Interleave audio from the channel buffers pointed to by `deinterleaved` to // Interleave audio from the channel buffers pointed to by `deinterleaved` to
// `interleaved`. There must be sufficient space allocated in `interleaved` // `interleaved`. There must be sufficient space allocated in `interleaved`
// (`samples_per_channel` * `num_channels`). // (`samples_per_channel` * `num_channels`).
// TODO: b/335805780 - Accept ArrayView. template <typename T>
void Interleave(const DeinterleavedView<const T>& deinterleaved,
const InterleavedView<T>& interleaved) {
RTC_DCHECK_EQ(NumChannels(interleaved), NumChannels(deinterleaved));
RTC_DCHECK_EQ(SamplesPerChannel(interleaved),
SamplesPerChannel(deinterleaved));
for (size_t i = 0; i < deinterleaved.num_channels(); ++i) {
const auto channel = deinterleaved[i];
size_t interleaved_idx = i;
for (size_t j = 0; j < deinterleaved.samples_per_channel(); ++j) {
interleaved[interleaved_idx] = channel[j];
interleaved_idx += deinterleaved.num_channels();
}
}
}
// `Interleave()` variant for cases where the deinterleaved channels aren't
// represented by a `DeinterleavedView`.
// TODO: b/335805780 - Move into test code where this is used.
template <typename T> template <typename T>
void Interleave(const T* const* deinterleaved, void Interleave(const T* const* deinterleaved,
size_t samples_per_channel, size_t samples_per_channel,
size_t num_channels, size_t num_channels,
T* interleaved) { InterleavedView<T>& interleaved) {
RTC_DCHECK_EQ(NumChannels(interleaved), num_channels);
RTC_DCHECK_EQ(SamplesPerChannel(interleaved), samples_per_channel);
for (size_t i = 0; i < num_channels; ++i) { for (size_t i = 0; i < num_channels; ++i) {
const T* channel = deinterleaved[i]; const T* channel = deinterleaved[i];
size_t interleaved_idx = i; size_t interleaved_idx = i;

View File

@ -54,6 +54,11 @@ int PushResampler<T>::InitializeIfNeeded(int src_sample_rate_hz,
dst_sample_rate_hz_ = dst_sample_rate_hz; dst_sample_rate_hz_ = dst_sample_rate_hz;
num_channels_ = num_channels; num_channels_ = num_channels;
// TODO: b/335805780 - Change this to use a single buffer for source and
// destination and initialize each ChannelResampler() with a pointer to
// channels in each deinterleaved buffer. That way, DeinterleavedView can be
// used for the two buffers.
const size_t src_size_10ms_mono = const size_t src_size_10ms_mono =
static_cast<size_t>(src_sample_rate_hz / 100); static_cast<size_t>(src_sample_rate_hz / 100);
const size_t dst_size_10ms_mono = const size_t dst_size_10ms_mono =
@ -109,9 +114,9 @@ int PushResampler<T>::Resample(InterleavedView<const T> src,
channel_data_array_[ch] = channel_resamplers_[ch].destination.data(); channel_data_array_[ch] = channel_resamplers_[ch].destination.data();
} }
// TODO: b/335805780 - Interleave should accept InterleavedView<> as dst. // TODO: b/335805780 - Interleave should accept DeInterleavedView<> as src.
Interleave(channel_data_array_.data(), dst.samples_per_channel(), Interleave(channel_data_array_.data(), dst.samples_per_channel(),
num_channels_, &dst[0]); num_channels_, dst);
return static_cast<int>(dst.size()); return static_cast<int>(dst.size());
} }

View File

@ -2060,16 +2060,25 @@ class AudioProcessingTest
StreamConfig(output_rate, num_output_channels), out_cb.channels())); StreamConfig(output_rate, num_output_channels), out_cb.channels()));
// Dump forward output to file. // Dump forward output to file.
Interleave(out_cb.channels(), out_cb.num_frames(), out_cb.num_channels(), RTC_DCHECK_EQ(out_cb.num_bands(), 1u); // Assumes full frequency band.
float_data.get()); DeinterleavedView<const float> deinterleaved_src(
out_cb.channels()[0], out_cb.num_frames(), out_cb.num_channels());
InterleavedView<float> interleaved_dst(
float_data.get(), out_cb.num_frames(), out_cb.num_channels());
Interleave(deinterleaved_src, interleaved_dst);
size_t out_length = out_cb.num_channels() * out_cb.num_frames(); size_t out_length = out_cb.num_channels() * out_cb.num_frames();
ASSERT_EQ(out_length, fwrite(float_data.get(), sizeof(float_data[0]), ASSERT_EQ(out_length, fwrite(float_data.get(), sizeof(float_data[0]),
out_length, out_file)); out_length, out_file));
// Dump reverse output to file. // Dump reverse output to file.
Interleave(rev_out_cb.channels(), rev_out_cb.num_frames(), RTC_DCHECK_EQ(rev_out_cb.num_bands(), 1u);
rev_out_cb.num_channels(), float_data.get()); deinterleaved_src = DeinterleavedView<const float>(
rev_out_cb.channels()[0], rev_out_cb.num_frames(),
rev_out_cb.num_channels());
interleaved_dst = InterleavedView<float>(
float_data.get(), rev_out_cb.num_frames(), rev_out_cb.num_channels());
Interleave(deinterleaved_src, interleaved_dst);
size_t rev_out_length = size_t rev_out_length =
rev_out_cb.num_channels() * rev_out_cb.num_frames(); rev_out_cb.num_channels() * rev_out_cb.num_frames();

View File

@ -46,8 +46,12 @@ ChannelBufferWavWriter::~ChannelBufferWavWriter() = default;
void ChannelBufferWavWriter::Write(const ChannelBuffer<float>& buffer) { void ChannelBufferWavWriter::Write(const ChannelBuffer<float>& buffer) {
RTC_CHECK_EQ(file_->num_channels(), buffer.num_channels()); RTC_CHECK_EQ(file_->num_channels(), buffer.num_channels());
interleaved_.resize(buffer.size()); interleaved_.resize(buffer.size());
Interleave(buffer.channels(), buffer.num_frames(), buffer.num_channels(), InterleavedView<float> view(&interleaved_[0], buffer.num_frames(),
&interleaved_[0]); buffer.num_channels());
const float* samples = buffer.channels()[0];
DeinterleavedView<const float> source(samples, buffer.num_frames(),
buffer.num_channels());
Interleave(source, view);
FloatToFloatS16(&interleaved_[0], interleaved_.size(), &interleaved_[0]); FloatToFloatS16(&interleaved_[0], interleaved_.size(), &interleaved_[0]);
file_->WriteSamples(&interleaved_[0], interleaved_.size()); file_->WriteSamples(&interleaved_[0], interleaved_.size());
} }
@ -62,8 +66,10 @@ ChannelBufferVectorWriter::~ChannelBufferVectorWriter() = default;
void ChannelBufferVectorWriter::Write(const ChannelBuffer<float>& buffer) { void ChannelBufferVectorWriter::Write(const ChannelBuffer<float>& buffer) {
// Account for sample rate changes throughout a simulation. // Account for sample rate changes throughout a simulation.
interleaved_buffer_.resize(buffer.size()); interleaved_buffer_.resize(buffer.size());
InterleavedView<float> view(&interleaved_buffer_[0], buffer.num_frames(),
buffer.num_channels());
Interleave(buffer.channels(), buffer.num_frames(), buffer.num_channels(), Interleave(buffer.channels(), buffer.num_frames(), buffer.num_channels(),
interleaved_buffer_.data()); view);
size_t old_size = output_->size(); size_t old_size = output_->size();
output_->resize(old_size + interleaved_buffer_.size()); output_->resize(old_size + interleaved_buffer_.size());
FloatToFloatS16(interleaved_buffer_.data(), interleaved_buffer_.size(), FloatToFloatS16(interleaved_buffer_.data(), interleaved_buffer_.size(),

View File

@ -146,7 +146,8 @@ void WriteFloatData(const float* const* data,
RawFile* raw_file) { RawFile* raw_file) {
size_t length = num_channels * samples_per_channel; size_t length = num_channels * samples_per_channel;
std::unique_ptr<float[]> buffer(new float[length]); std::unique_ptr<float[]> buffer(new float[length]);
Interleave(data, samples_per_channel, num_channels, buffer.get()); InterleavedView<float> view(buffer.get(), samples_per_channel, num_channels);
Interleave(data, samples_per_channel, num_channels, view);
if (raw_file) { if (raw_file) {
raw_file->WriteSamples(buffer.get(), length); raw_file->WriteSamples(buffer.get(), length);
} }