APM: Replace most usages of AudioFrame with a stream interface

This CL creates a new stream interface and uses it to replace
most of the usage of AudioFrame in the non-test code.

The CL changes some of the test code as well, as the other
changes required that.

The CL will be followed by 2 more related CLs.

Bug: webrtc:5298
Change-Id: I5cfbe6079f30fc3fbf35b35fd077b6fb49c7def0
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/170040
Commit-Queue: Per Åhgren <peah@webrtc.org>
Reviewed-by: Sam Zackrisson <saza@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#30799}
This commit is contained in:
Per Åhgren 2020-03-16 12:06:02 +01:00 committed by Commit Bot
parent d5d0a2b546
commit 645f24cb86
16 changed files with 257 additions and 114 deletions

View File

@ -35,7 +35,7 @@ rtc_library("mock_aec_dump") {
rtc_library("mock_aec_dump_unittests") { rtc_library("mock_aec_dump_unittests") {
testonly = true testonly = true
configs += [ "..:apm_debug_dump" ]
sources = [ "aec_dump_integration_test.cc" ] sources = [ "aec_dump_integration_test.cc" ]
deps = [ deps = [

View File

@ -109,12 +109,16 @@ void AecDumpImpl::AddCaptureStreamOutput(
capture_stream_info_.AddOutput(src); capture_stream_info_.AddOutput(src);
} }
void AecDumpImpl::AddCaptureStreamInput(const AudioFrame& frame) { void AecDumpImpl::AddCaptureStreamInput(const int16_t* const data,
capture_stream_info_.AddInput(frame); int num_channels,
int samples_per_channel) {
capture_stream_info_.AddInput(data, num_channels, samples_per_channel);
} }
void AecDumpImpl::AddCaptureStreamOutput(const AudioFrame& frame) { void AecDumpImpl::AddCaptureStreamOutput(const int16_t* const data,
capture_stream_info_.AddOutput(frame); int num_channels,
int samples_per_channel) {
capture_stream_info_.AddOutput(data, num_channels, samples_per_channel);
} }
void AecDumpImpl::AddAudioProcessingState(const AudioProcessingState& state) { void AecDumpImpl::AddAudioProcessingState(const AudioProcessingState& state) {
@ -128,15 +132,16 @@ void AecDumpImpl::WriteCaptureStreamMessage() {
capture_stream_info_.SetTask(CreateWriteToFileTask()); capture_stream_info_.SetTask(CreateWriteToFileTask());
} }
void AecDumpImpl::WriteRenderStreamMessage(const AudioFrame& frame) { void AecDumpImpl::WriteRenderStreamMessage(const int16_t* const data,
int num_channels,
int samples_per_channel) {
auto task = CreateWriteToFileTask(); auto task = CreateWriteToFileTask();
auto* event = task->GetEvent(); auto* event = task->GetEvent();
event->set_type(audioproc::Event::REVERSE_STREAM); event->set_type(audioproc::Event::REVERSE_STREAM);
audioproc::ReverseStream* msg = event->mutable_reverse_stream(); audioproc::ReverseStream* msg = event->mutable_reverse_stream();
const size_t data_size = const size_t data_size = sizeof(int16_t) * samples_per_channel * num_channels;
sizeof(int16_t) * frame.samples_per_channel_ * frame.num_channels_; msg->set_data(data, data_size);
msg->set_data(frame.data(), data_size);
worker_queue_->PostTask(std::move(task)); worker_queue_->PostTask(std::move(task));
} }

View File

@ -15,7 +15,6 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include "api/audio/audio_frame.h"
#include "modules/audio_processing/aec_dump/capture_stream_info.h" #include "modules/audio_processing/aec_dump/capture_stream_info.h"
#include "modules/audio_processing/aec_dump/write_to_file_task.h" #include "modules/audio_processing/aec_dump/write_to_file_task.h"
#include "modules/audio_processing/include/aec_dump.h" #include "modules/audio_processing/include/aec_dump.h"
@ -55,12 +54,18 @@ class AecDumpImpl : public AecDump {
int64_t time_now_ms) override; int64_t time_now_ms) override;
void AddCaptureStreamInput(const AudioFrameView<const float>& src) override; void AddCaptureStreamInput(const AudioFrameView<const float>& src) override;
void AddCaptureStreamOutput(const AudioFrameView<const float>& src) override; void AddCaptureStreamOutput(const AudioFrameView<const float>& src) override;
void AddCaptureStreamInput(const AudioFrame& frame) override; void AddCaptureStreamInput(const int16_t* const data,
void AddCaptureStreamOutput(const AudioFrame& frame) override; int num_channels,
int samples_per_channel) override;
void AddCaptureStreamOutput(const int16_t* const data,
int num_channels,
int samples_per_channel) override;
void AddAudioProcessingState(const AudioProcessingState& state) override; void AddAudioProcessingState(const AudioProcessingState& state) override;
void WriteCaptureStreamMessage() override; void WriteCaptureStreamMessage() override;
void WriteRenderStreamMessage(const AudioFrame& frame) override; void WriteRenderStreamMessage(const int16_t* const data,
int num_channels,
int samples_per_channel) override;
void WriteRenderStreamMessage( void WriteRenderStreamMessage(
const AudioFrameView<const float>& src) override; const AudioFrameView<const float>& src) override;

View File

@ -8,16 +8,17 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#include <array>
#include <memory> #include <memory>
#include <utility> #include <utility>
#include "modules/audio_processing/aec_dump/mock_aec_dump.h" #include "modules/audio_processing/aec_dump/mock_aec_dump.h"
#include "modules/audio_processing/audio_processing_impl.h"
#include "modules/audio_processing/include/audio_processing.h" #include "modules/audio_processing/include/audio_processing.h"
using ::testing::_; using ::testing::_;
using ::testing::AtLeast; using ::testing::AtLeast;
using ::testing::Exactly; using ::testing::Exactly;
using ::testing::Matcher;
using ::testing::StrictMock; using ::testing::StrictMock;
namespace { namespace {
@ -37,14 +38,6 @@ std::unique_ptr<webrtc::test::MockAecDump> CreateMockAecDump() {
return std::unique_ptr<webrtc::test::MockAecDump>(std::move(mock_aec_dump)); return std::unique_ptr<webrtc::test::MockAecDump>(std::move(mock_aec_dump));
} }
std::unique_ptr<webrtc::AudioFrame> CreateFakeFrame() {
auto fake_frame = std::make_unique<webrtc::AudioFrame>();
fake_frame->num_channels_ = 1;
fake_frame->sample_rate_hz_ = 48000;
fake_frame->samples_per_channel_ = 480;
return fake_frame;
}
} // namespace } // namespace
TEST(AecDumpIntegration, ConfigurationAndInitShouldBeLogged) { TEST(AecDumpIntegration, ConfigurationAndInitShouldBeLogged) {
@ -57,27 +50,40 @@ TEST(AecDumpIntegration,
RenderStreamShouldBeLoggedOnceEveryProcessReverseStream) { RenderStreamShouldBeLoggedOnceEveryProcessReverseStream) {
auto apm = CreateAudioProcessing(); auto apm = CreateAudioProcessing();
auto mock_aec_dump = CreateMockAecDump(); auto mock_aec_dump = CreateMockAecDump();
auto fake_frame = CreateFakeFrame(); constexpr int kNumChannels = 1;
constexpr int kNumSampleRateHz = 16000;
constexpr int kNumSamplesPerChannel = kNumSampleRateHz / 100;
std::array<int16_t, kNumSamplesPerChannel * kNumChannels> frame;
frame.fill(0.f);
webrtc::StreamConfig stream_config(kNumSampleRateHz, kNumChannels,
/*has_keyboard=*/false);
EXPECT_CALL(*mock_aec_dump.get(), EXPECT_CALL(*mock_aec_dump.get(), WriteRenderStreamMessage(_, _, _))
WriteRenderStreamMessage(Matcher<const webrtc::AudioFrame&>(_)))
.Times(Exactly(1)); .Times(Exactly(1));
apm->AttachAecDump(std::move(mock_aec_dump)); apm->AttachAecDump(std::move(mock_aec_dump));
apm->ProcessReverseStream(fake_frame.get()); apm->ProcessReverseStream(frame.data(), stream_config, stream_config,
frame.data());
} }
TEST(AecDumpIntegration, CaptureStreamShouldBeLoggedOnceEveryProcessStream) { TEST(AecDumpIntegration, CaptureStreamShouldBeLoggedOnceEveryProcessStream) {
auto apm = CreateAudioProcessing(); auto apm = CreateAudioProcessing();
auto mock_aec_dump = CreateMockAecDump(); auto mock_aec_dump = CreateMockAecDump();
auto fake_frame = CreateFakeFrame(); constexpr int kNumChannels = 1;
constexpr int kNumSampleRateHz = 16000;
constexpr int kNumSamplesPerChannel = kNumSampleRateHz / 100;
std::array<int16_t, kNumSamplesPerChannel * kNumChannels> frame;
frame.fill(0.f);
webrtc::AudioProcessing::VoiceDetectionResult vad_result =
webrtc::AudioProcessing::VoiceDetectionResult::kNotAvailable;
EXPECT_CALL(*mock_aec_dump.get(), webrtc::StreamConfig stream_config(kNumSampleRateHz, kNumChannels,
AddCaptureStreamInput(Matcher<const webrtc::AudioFrame&>(_))) /*has_keyboard=*/false);
EXPECT_CALL(*mock_aec_dump.get(), AddCaptureStreamInput(_, _, _))
.Times(AtLeast(1)); .Times(AtLeast(1));
EXPECT_CALL(*mock_aec_dump.get(), EXPECT_CALL(*mock_aec_dump.get(), AddCaptureStreamOutput(_, _, _))
AddCaptureStreamOutput(Matcher<const webrtc::AudioFrame&>(_)))
.Times(Exactly(1)); .Times(Exactly(1));
EXPECT_CALL(*mock_aec_dump.get(), AddAudioProcessingState(_)) EXPECT_CALL(*mock_aec_dump.get(), AddAudioProcessingState(_))
@ -87,5 +93,6 @@ TEST(AecDumpIntegration, CaptureStreamShouldBeLoggedOnceEveryProcessStream) {
.Times(Exactly(1)); .Times(Exactly(1));
apm->AttachAecDump(std::move(mock_aec_dump)); apm->AttachAecDump(std::move(mock_aec_dump));
apm->ProcessStream(fake_frame.get()); apm->ProcessStream(frame.data(), stream_config, stream_config, frame.data(),
&vad_result);
} }

View File

@ -8,6 +8,7 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#include <array>
#include <utility> #include <utility>
#include "modules/audio_processing/aec_dump/aec_dump_factory.h" #include "modules/audio_processing/aec_dump/aec_dump_factory.h"
@ -27,11 +28,17 @@ TEST(AecDumper, APICallsDoNotCrash) {
std::unique_ptr<webrtc::AecDump> aec_dump = std::unique_ptr<webrtc::AecDump> aec_dump =
webrtc::AecDumpFactory::Create(filename, -1, &file_writer_queue); webrtc::AecDumpFactory::Create(filename, -1, &file_writer_queue);
const webrtc::AudioFrame frame; constexpr int kNumChannels = 1;
aec_dump->WriteRenderStreamMessage(frame); constexpr int kNumSamplesPerChannel = 160;
std::array<int16_t, kNumSamplesPerChannel * kNumChannels> frame;
frame.fill(0.f);
aec_dump->WriteRenderStreamMessage(frame.data(), kNumChannels,
kNumSamplesPerChannel);
aec_dump->AddCaptureStreamInput(frame); aec_dump->AddCaptureStreamInput(frame.data(), kNumChannels,
aec_dump->AddCaptureStreamOutput(frame); kNumSamplesPerChannel);
aec_dump->AddCaptureStreamOutput(frame.data(), kNumChannels,
kNumSamplesPerChannel);
aec_dump->WriteCaptureStreamMessage(); aec_dump->WriteCaptureStreamMessage();
@ -55,8 +62,14 @@ TEST(AecDumper, WriteToFile) {
{ {
std::unique_ptr<webrtc::AecDump> aec_dump = std::unique_ptr<webrtc::AecDump> aec_dump =
webrtc::AecDumpFactory::Create(filename, -1, &file_writer_queue); webrtc::AecDumpFactory::Create(filename, -1, &file_writer_queue);
const webrtc::AudioFrame frame;
aec_dump->WriteRenderStreamMessage(frame); constexpr int kNumChannels = 1;
constexpr int kNumSamplesPerChannel = 160;
std::array<int16_t, kNumSamplesPerChannel * kNumChannels> frame;
frame.fill(0.f);
aec_dump->WriteRenderStreamMessage(frame.data(), kNumChannels,
kNumSamplesPerChannel);
} }
// Verify the file has been written after the AecDump d-tor has // Verify the file has been written after the AecDump d-tor has

View File

@ -41,20 +41,22 @@ void CaptureStreamInfo::AddOutput(const AudioFrameView<const float>& src) {
} }
} }
void CaptureStreamInfo::AddInput(const AudioFrame& frame) { void CaptureStreamInfo::AddInput(const int16_t* const data,
int num_channels,
int samples_per_channel) {
RTC_DCHECK(task_); RTC_DCHECK(task_);
auto* stream = task_->GetEvent()->mutable_stream(); auto* stream = task_->GetEvent()->mutable_stream();
const size_t data_size = const size_t data_size = sizeof(int16_t) * samples_per_channel * num_channels;
sizeof(int16_t) * frame.samples_per_channel_ * frame.num_channels_; stream->set_input_data(data, data_size);
stream->set_input_data(frame.data(), data_size);
} }
void CaptureStreamInfo::AddOutput(const AudioFrame& frame) { void CaptureStreamInfo::AddOutput(const int16_t* const data,
int num_channels,
int samples_per_channel) {
RTC_DCHECK(task_); RTC_DCHECK(task_);
auto* stream = task_->GetEvent()->mutable_stream(); auto* stream = task_->GetEvent()->mutable_stream();
const size_t data_size = const size_t data_size = sizeof(int16_t) * samples_per_channel * num_channels;
sizeof(int16_t) * frame.samples_per_channel_ * frame.num_channels_; stream->set_output_data(data, data_size);
stream->set_output_data(frame.data(), data_size);
} }
void CaptureStreamInfo::AddAudioProcessingState( void CaptureStreamInfo::AddAudioProcessingState(

View File

@ -15,7 +15,6 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "api/audio/audio_frame.h"
#include "modules/audio_processing/aec_dump/write_to_file_task.h" #include "modules/audio_processing/aec_dump/write_to_file_task.h"
#include "modules/audio_processing/include/aec_dump.h" #include "modules/audio_processing/include/aec_dump.h"
#include "rtc_base/checks.h" #include "rtc_base/checks.h"
@ -40,8 +39,12 @@ class CaptureStreamInfo {
void AddInput(const AudioFrameView<const float>& src); void AddInput(const AudioFrameView<const float>& src);
void AddOutput(const AudioFrameView<const float>& src); void AddOutput(const AudioFrameView<const float>& src);
void AddInput(const AudioFrame& frame); void AddInput(const int16_t* const data,
void AddOutput(const AudioFrame& frame); int num_channels,
int samples_per_channel);
void AddOutput(const int16_t* const data,
int num_channels,
int samples_per_channel);
void AddAudioProcessingState(const AecDump::AudioProcessingState& state); void AddAudioProcessingState(const AecDump::AudioProcessingState& state);

View File

@ -32,13 +32,22 @@ class MockAecDump : public AecDump {
void(const AudioFrameView<const float>& src)); void(const AudioFrameView<const float>& src));
MOCK_METHOD1(AddCaptureStreamOutput, MOCK_METHOD1(AddCaptureStreamOutput,
void(const AudioFrameView<const float>& src)); void(const AudioFrameView<const float>& src));
MOCK_METHOD1(AddCaptureStreamInput, void(const AudioFrame& frame)); MOCK_METHOD3(AddCaptureStreamInput,
MOCK_METHOD1(AddCaptureStreamOutput, void(const AudioFrame& frame)); void(const int16_t* const data,
int num_channels,
int samples_per_channel));
MOCK_METHOD3(AddCaptureStreamOutput,
void(const int16_t* const data,
int num_channels,
int samples_per_channel));
MOCK_METHOD1(AddAudioProcessingState, MOCK_METHOD1(AddAudioProcessingState,
void(const AudioProcessingState& state)); void(const AudioProcessingState& state));
MOCK_METHOD0(WriteCaptureStreamMessage, void()); MOCK_METHOD0(WriteCaptureStreamMessage, void());
MOCK_METHOD1(WriteRenderStreamMessage, void(const AudioFrame& frame)); MOCK_METHOD3(WriteRenderStreamMessage,
void(const int16_t* const data,
int num_channels,
int samples_per_channel));
MOCK_METHOD1(WriteRenderStreamMessage, MOCK_METHOD1(WriteRenderStreamMessage,
void(const AudioFrameView<const float>& src)); void(const AudioFrameView<const float>& src));

View File

@ -22,7 +22,6 @@
namespace webrtc { namespace webrtc {
class MonoAgc; class MonoAgc;
class AudioFrame;
class GainControl; class GainControl;
// Direct interface to use AGC to set volume and compression values. // Direct interface to use AGC to set volume and compression values.

View File

@ -111,7 +111,7 @@ void AudioBuffer::set_downmixing_by_averaging() {
downmix_by_averaging_ = true; downmix_by_averaging_ = true;
} }
void AudioBuffer::CopyFrom(const float* const* data, void AudioBuffer::CopyFrom(const float* const* stacked_data,
const StreamConfig& stream_config) { const StreamConfig& stream_config) {
RTC_DCHECK_EQ(stream_config.num_frames(), input_num_frames_); RTC_DCHECK_EQ(stream_config.num_frames(), input_num_frames_);
RTC_DCHECK_EQ(stream_config.num_channels(), input_num_channels_); RTC_DCHECK_EQ(stream_config.num_channels(), input_num_channels_);
@ -127,15 +127,16 @@ void AudioBuffer::CopyFrom(const float* const* data,
if (downmix_by_averaging_) { if (downmix_by_averaging_) {
const float kOneByNumChannels = 1.f / input_num_channels_; const float kOneByNumChannels = 1.f / input_num_channels_;
for (size_t i = 0; i < input_num_frames_; ++i) { for (size_t i = 0; i < input_num_frames_; ++i) {
float value = data[0][i]; float value = stacked_data[0][i];
for (size_t j = 1; j < input_num_channels_; ++j) { for (size_t j = 1; j < input_num_channels_; ++j) {
value += data[j][i]; value += stacked_data[j][i];
} }
downmix[i] = value * kOneByNumChannels; downmix[i] = value * kOneByNumChannels;
} }
} }
const float* downmixed_data = const float* downmixed_data = downmix_by_averaging_
downmix_by_averaging_ ? downmix.data() : data[channel_for_downmixing_]; ? downmix.data()
: stacked_data[channel_for_downmixing_];
if (resampling_needed) { if (resampling_needed) {
input_resamplers_[0]->Resample(downmixed_data, input_num_frames_, input_resamplers_[0]->Resample(downmixed_data, input_num_frames_,
@ -147,7 +148,7 @@ void AudioBuffer::CopyFrom(const float* const* data,
} else { } else {
if (resampling_needed) { if (resampling_needed) {
for (size_t i = 0; i < num_channels_; ++i) { for (size_t i = 0; i < num_channels_; ++i) {
input_resamplers_[i]->Resample(data[i], input_num_frames_, input_resamplers_[i]->Resample(stacked_data[i], input_num_frames_,
data_->channels()[i], data_->channels()[i],
buffer_num_frames_); buffer_num_frames_);
FloatToFloatS16(data_->channels()[i], buffer_num_frames_, FloatToFloatS16(data_->channels()[i], buffer_num_frames_,
@ -155,14 +156,15 @@ void AudioBuffer::CopyFrom(const float* const* data,
} }
} else { } else {
for (size_t i = 0; i < num_channels_; ++i) { for (size_t i = 0; i < num_channels_; ++i) {
FloatToFloatS16(data[i], buffer_num_frames_, data_->channels()[i]); FloatToFloatS16(stacked_data[i], buffer_num_frames_,
data_->channels()[i]);
} }
} }
} }
} }
void AudioBuffer::CopyTo(const StreamConfig& stream_config, void AudioBuffer::CopyTo(const StreamConfig& stream_config,
float* const* data) { float* const* stacked_data) {
RTC_DCHECK_EQ(stream_config.num_frames(), output_num_frames_); RTC_DCHECK_EQ(stream_config.num_frames(), output_num_frames_);
const bool resampling_needed = output_num_frames_ != buffer_num_frames_; const bool resampling_needed = output_num_frames_ != buffer_num_frames_;
@ -171,16 +173,18 @@ void AudioBuffer::CopyTo(const StreamConfig& stream_config,
FloatS16ToFloat(data_->channels()[i], buffer_num_frames_, FloatS16ToFloat(data_->channels()[i], buffer_num_frames_,
data_->channels()[i]); data_->channels()[i]);
output_resamplers_[i]->Resample(data_->channels()[i], buffer_num_frames_, output_resamplers_[i]->Resample(data_->channels()[i], buffer_num_frames_,
data[i], output_num_frames_); stacked_data[i], output_num_frames_);
} }
} else { } else {
for (size_t i = 0; i < num_channels_; ++i) { for (size_t i = 0; i < num_channels_; ++i) {
FloatS16ToFloat(data_->channels()[i], buffer_num_frames_, data[i]); FloatS16ToFloat(data_->channels()[i], buffer_num_frames_,
stacked_data[i]);
} }
} }
for (size_t i = num_channels_; i < stream_config.num_channels(); ++i) { for (size_t i = num_channels_; i < stream_config.num_channels(); ++i) {
memcpy(data[i], data[0], output_num_frames_ * sizeof(**data)); memcpy(stacked_data[i], stacked_data[0],
output_num_frames_ * sizeof(**stacked_data));
} }
} }
@ -225,14 +229,15 @@ void AudioBuffer::set_num_channels(size_t num_channels) {
} }
// The resampler is only for supporting 48kHz to 16kHz in the reverse stream. // The resampler is only for supporting 48kHz to 16kHz in the reverse stream.
void AudioBuffer::CopyFrom(const AudioFrame* frame) { void AudioBuffer::CopyFrom(const int16_t* const interleaved_data,
RTC_DCHECK_EQ(frame->num_channels_, input_num_channels_); const StreamConfig& stream_config) {
RTC_DCHECK_EQ(frame->samples_per_channel_, input_num_frames_); RTC_DCHECK_EQ(stream_config.num_channels(), input_num_channels_);
RTC_DCHECK_EQ(stream_config.num_frames(), input_num_frames_);
RestoreNumChannels(); RestoreNumChannels();
const bool resampling_required = input_num_frames_ != buffer_num_frames_; const bool resampling_required = input_num_frames_ != buffer_num_frames_;
const int16_t* interleaved = frame->data(); const int16_t* interleaved = interleaved_data;
if (num_channels_ == 1) { if (num_channels_ == 1) {
if (input_num_channels_ == 1) { if (input_num_channels_ == 1) {
if (resampling_required) { if (resampling_required) {
@ -297,13 +302,16 @@ void AudioBuffer::CopyFrom(const AudioFrame* frame) {
} }
} }
void AudioBuffer::CopyTo(AudioFrame* frame) const { void AudioBuffer::CopyTo(const StreamConfig& stream_config,
RTC_DCHECK(frame->num_channels_ == num_channels_ || num_channels_ == 1); int16_t* const interleaved_data) {
RTC_DCHECK_EQ(frame->samples_per_channel_, output_num_frames_); const size_t config_num_channels = stream_config.num_channels();
RTC_DCHECK(config_num_channels == num_channels_ || num_channels_ == 1);
RTC_DCHECK_EQ(stream_config.num_frames(), output_num_frames_);
const bool resampling_required = buffer_num_frames_ != output_num_frames_; const bool resampling_required = buffer_num_frames_ != output_num_frames_;
int16_t* interleaved = frame->mutable_data(); int16_t* interleaved = interleaved_data;
if (num_channels_ == 1) { if (num_channels_ == 1) {
std::array<float, kMaxSamplesPerChannel> float_buffer; std::array<float, kMaxSamplesPerChannel> float_buffer;
@ -314,14 +322,14 @@ void AudioBuffer::CopyTo(AudioFrame* frame) const {
const float* deinterleaved = const float* deinterleaved =
resampling_required ? float_buffer.data() : data_->channels()[0]; resampling_required ? float_buffer.data() : data_->channels()[0];
if (frame->num_channels_ == 1) { if (config_num_channels == 1) {
for (size_t j = 0; j < output_num_frames_; ++j) { for (size_t j = 0; j < output_num_frames_; ++j) {
interleaved[j] = FloatS16ToS16(deinterleaved[j]); interleaved[j] = FloatS16ToS16(deinterleaved[j]);
} }
} else { } else {
for (size_t i = 0, k = 0; i < output_num_frames_; ++i) { for (size_t i = 0, k = 0; i < output_num_frames_; ++i) {
float tmp = FloatS16ToS16(deinterleaved[i]); float tmp = FloatS16ToS16(deinterleaved[i]);
for (size_t j = 0; j < frame->num_channels_; ++j, ++k) { for (size_t j = 0; j < config_num_channels; ++j, ++k) {
interleaved[k] = tmp; interleaved[k] = tmp;
} }
} }
@ -342,19 +350,19 @@ void AudioBuffer::CopyTo(AudioFrame* frame) const {
output_resamplers_[i]->Resample(data_->channels()[i], output_resamplers_[i]->Resample(data_->channels()[i],
buffer_num_frames_, float_buffer.data(), buffer_num_frames_, float_buffer.data(),
output_num_frames_); output_num_frames_);
interleave_channel(i, frame->num_channels_, output_num_frames_, interleave_channel(i, config_num_channels, output_num_frames_,
float_buffer.data(), interleaved); float_buffer.data(), interleaved);
} }
} else { } else {
for (size_t i = 0; i < num_channels_; ++i) { for (size_t i = 0; i < num_channels_; ++i) {
interleave_channel(i, frame->num_channels_, output_num_frames_, interleave_channel(i, config_num_channels, output_num_frames_,
data_->channels()[i], interleaved); data_->channels()[i], interleaved);
} }
} }
for (size_t i = num_channels_; i < frame->num_channels_; ++i) { for (size_t i = num_channels_; i < config_num_channels; ++i) {
for (size_t j = 0, k = i, n = num_channels_; j < output_num_frames_; for (size_t j = 0, k = i, n = num_channels_; j < output_num_frames_;
++j, k += frame->num_channels_, n += frame->num_channels_) { ++j, k += config_num_channels, n += config_num_channels) {
interleaved[k] = interleaved[n]; interleaved[k] = interleaved[n];
} }
} }

View File

@ -17,7 +17,6 @@
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "api/audio/audio_frame.h"
#include "common_audio/channel_buffer.h" #include "common_audio/channel_buffer.h"
#include "modules/audio_processing/include/audio_processing.h" #include "modules/audio_processing/include/audio_processing.h"
@ -109,12 +108,15 @@ class AudioBuffer {
} }
// Copies data into the buffer. // Copies data into the buffer.
void CopyFrom(const AudioFrame* frame); void CopyFrom(const int16_t* const interleaved_data,
void CopyFrom(const float* const* data, const StreamConfig& stream_config); const StreamConfig& stream_config);
void CopyFrom(const float* const* stacked_data,
const StreamConfig& stream_config);
// Copies data from the buffer. // Copies data from the buffer.
void CopyTo(AudioFrame* frame) const; void CopyTo(const StreamConfig& stream_config,
void CopyTo(const StreamConfig& stream_config, float* const* data); int16_t* const interleaved_data);
void CopyTo(const StreamConfig& stream_config, float* const* stacked_data);
void CopyTo(AudioBuffer* buffer) const; void CopyTo(AudioBuffer* buffer) const;
// Splits the buffer data into frequency bands. // Splits the buffer data into frequency bands.
@ -145,8 +147,6 @@ class AudioBuffer {
const float* const* split_channels_const_f(Band band) const { const float* const* split_channels_const_f(Band band) const {
return split_channels_const(band); return split_channels_const(band);
} }
void DeinterleaveFrom(const AudioFrame* frame) { CopyFrom(frame); }
void InterleaveTo(AudioFrame* frame) const { CopyTo(frame); }
private: private:
FRIEND_TEST_ALL_PREFIXES(AudioBufferTest, FRIEND_TEST_ALL_PREFIXES(AudioBufferTest,

View File

@ -19,6 +19,7 @@
#include "absl/types/optional.h" #include "absl/types/optional.h"
#include "api/array_view.h" #include "api/array_view.h"
#include "api/audio/audio_frame.h"
#include "common_audio/audio_converter.h" #include "common_audio/audio_converter.h"
#include "common_audio/include/audio_util.h" #include "common_audio/include/audio_util.h"
#include "modules/audio_processing/agc2/gain_applier.h" #include "modules/audio_processing/agc2/gain_applier.h"
@ -1064,35 +1065,60 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
StreamConfig output_config(frame->sample_rate_hz_, frame->num_channels_, StreamConfig output_config(frame->sample_rate_hz_, frame->num_channels_,
/*has_keyboard=*/false); /*has_keyboard=*/false);
RTC_DCHECK_EQ(frame->samples_per_channel(), input_config.num_frames()); RTC_DCHECK_EQ(frame->samples_per_channel(), input_config.num_frames());
VoiceDetectionResult vad_result = VoiceDetectionResult::kNotAvailable;
int result = ProcessStream(frame->data(), input_config, output_config,
frame->mutable_data(), &vad_result);
if (vad_result != VoiceDetectionResult::kNotAvailable) {
frame->vad_activity_ = vad_result == VoiceDetectionResult::kDetected
? AudioFrame::VADActivity::kVadActive
: AudioFrame::VADActivity::kVadPassive;
}
return result;
}
int AudioProcessingImpl::ProcessStream(const int16_t* const src,
const StreamConfig& input_config,
const StreamConfig& output_config,
int16_t* const dest,
VoiceDetectionResult* vad_result) {
RETURN_ON_ERR(MaybeInitializeCapture(input_config, output_config)); RETURN_ON_ERR(MaybeInitializeCapture(input_config, output_config));
rtc::CritScope cs_capture(&crit_capture_); rtc::CritScope cs_capture(&crit_capture_);
if (aec_dump_) { if (aec_dump_) {
RecordUnprocessedCaptureStream(*frame); RecordUnprocessedCaptureStream(src, input_config);
} }
capture_.capture_audio->CopyFrom(frame); capture_.capture_audio->CopyFrom(src, input_config);
if (capture_.capture_fullband_audio) { if (capture_.capture_fullband_audio) {
capture_.capture_fullband_audio->CopyFrom(frame); capture_.capture_fullband_audio->CopyFrom(src, input_config);
} }
RETURN_ON_ERR(ProcessCaptureStreamLocked()); RETURN_ON_ERR(ProcessCaptureStreamLocked());
if (submodule_states_.CaptureMultiBandProcessingPresent() || if (submodule_states_.CaptureMultiBandProcessingPresent() ||
submodule_states_.CaptureFullBandProcessingActive()) { submodule_states_.CaptureFullBandProcessingActive()) {
if (capture_.capture_fullband_audio) { if (capture_.capture_fullband_audio) {
capture_.capture_fullband_audio->CopyTo(frame); capture_.capture_fullband_audio->CopyTo(output_config, dest);
} else { } else {
capture_.capture_audio->CopyTo(frame); capture_.capture_audio->CopyTo(output_config, dest);
} }
} }
if (capture_.stats.voice_detected) {
frame->vad_activity_ = *capture_.stats.voice_detected if (vad_result) {
? AudioFrame::kVadActive if (capture_.stats.voice_detected) {
: AudioFrame::kVadPassive; *vad_result = *capture_.stats.voice_detected
? VoiceDetectionResult::kDetected
: VoiceDetectionResult::kNotDetected;
} else {
*vad_result = VoiceDetectionResult::kNotAvailable;
}
} }
if (aec_dump_) { if (aec_dump_) {
RecordProcessedCaptureStream(*frame); RecordProcessedCaptureStream(dest, output_config);
} }
return kNoError; return kNoError;
@ -1430,7 +1456,6 @@ int AudioProcessingImpl::AnalyzeReverseStreamLocked(
int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) {
TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame");
rtc::CritScope cs(&crit_render_);
if (frame == nullptr) { if (frame == nullptr) {
return kNullPointerError; return kNullPointerError;
} }
@ -1446,31 +1471,47 @@ int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) {
return kBadNumberChannelsError; return kBadNumberChannelsError;
} }
StreamConfig input_config(frame->sample_rate_hz_, frame->num_channels_,
/*has_keyboard=*/false);
StreamConfig output_config(frame->sample_rate_hz_, frame->num_channels_,
/*has_keyboard=*/false);
int result = ProcessReverseStream(frame->data(), input_config, output_config,
frame->mutable_data());
return result;
}
int AudioProcessingImpl::ProcessReverseStream(const int16_t* const src,
const StreamConfig& input_config,
const StreamConfig& output_config,
int16_t* const dest) {
rtc::CritScope cs(&crit_render_);
ProcessingConfig processing_config = formats_.api_format; ProcessingConfig processing_config = formats_.api_format;
processing_config.reverse_input_stream().set_sample_rate_hz( processing_config.reverse_input_stream().set_sample_rate_hz(
frame->sample_rate_hz_); input_config.sample_rate_hz());
processing_config.reverse_input_stream().set_num_channels( processing_config.reverse_input_stream().set_num_channels(
frame->num_channels_); input_config.num_channels());
processing_config.reverse_output_stream().set_sample_rate_hz( processing_config.reverse_output_stream().set_sample_rate_hz(
frame->sample_rate_hz_); output_config.sample_rate_hz());
processing_config.reverse_output_stream().set_num_channels( processing_config.reverse_output_stream().set_num_channels(
frame->num_channels_); output_config.num_channels());
RETURN_ON_ERR(MaybeInitializeRender(processing_config)); RETURN_ON_ERR(MaybeInitializeRender(processing_config));
if (frame->samples_per_channel_ != if (input_config.num_frames() !=
formats_.api_format.reverse_input_stream().num_frames()) { formats_.api_format.reverse_input_stream().num_frames()) {
return kBadDataLengthError; return kBadDataLengthError;
} }
if (aec_dump_) { if (aec_dump_) {
aec_dump_->WriteRenderStreamMessage(*frame); aec_dump_->WriteRenderStreamMessage(src, input_config.num_frames(),
input_config.num_channels());
} }
render_.render_audio->CopyFrom(frame); render_.render_audio->CopyFrom(src, input_config);
RETURN_ON_ERR(ProcessRenderStreamLocked()); RETURN_ON_ERR(ProcessRenderStreamLocked());
if (submodule_states_.RenderMultiBandProcessingActive() || if (submodule_states_.RenderMultiBandProcessingActive() ||
submodule_states_.RenderFullBandProcessingActive()) { submodule_states_.RenderFullBandProcessingActive()) {
render_.render_audio->CopyTo(frame); render_.render_audio->CopyTo(output_config, dest);
} }
return kNoError; return kNoError;
} }
@ -2007,11 +2048,13 @@ void AudioProcessingImpl::RecordUnprocessedCaptureStream(
} }
void AudioProcessingImpl::RecordUnprocessedCaptureStream( void AudioProcessingImpl::RecordUnprocessedCaptureStream(
const AudioFrame& capture_frame) { const int16_t* const data,
const StreamConfig& config) {
RTC_DCHECK(aec_dump_); RTC_DCHECK(aec_dump_);
WriteAecDumpConfigMessage(false); WriteAecDumpConfigMessage(false);
aec_dump_->AddCaptureStreamInput(capture_frame); aec_dump_->AddCaptureStreamInput(data, config.num_channels(),
config.num_frames());
RecordAudioProcessingState(); RecordAudioProcessingState();
} }
@ -2028,10 +2071,12 @@ void AudioProcessingImpl::RecordProcessedCaptureStream(
} }
void AudioProcessingImpl::RecordProcessedCaptureStream( void AudioProcessingImpl::RecordProcessedCaptureStream(
const AudioFrame& processed_capture_frame) { const int16_t* const data,
const StreamConfig& config) {
RTC_DCHECK(aec_dump_); RTC_DCHECK(aec_dump_);
aec_dump_->AddCaptureStreamOutput(processed_capture_frame); aec_dump_->AddCaptureStreamOutput(data, config.num_channels(),
config.num_channels());
aec_dump_->WriteCaptureStreamMessage(); aec_dump_->WriteCaptureStreamMessage();
} }

View File

@ -43,6 +43,7 @@
namespace webrtc { namespace webrtc {
class ApmDataDumper; class ApmDataDumper;
class AudioFrame;
class AudioConverter; class AudioConverter;
class AudioProcessingImpl : public AudioProcessing { class AudioProcessingImpl : public AudioProcessing {
@ -80,6 +81,11 @@ class AudioProcessingImpl : public AudioProcessing {
// Capture-side exclusive methods possibly running APM in a // Capture-side exclusive methods possibly running APM in a
// multi-threaded manner. Acquire the capture lock. // multi-threaded manner. Acquire the capture lock.
int ProcessStream(AudioFrame* frame) override; int ProcessStream(AudioFrame* frame) override;
int ProcessStream(const int16_t* const src,
const StreamConfig& input_config,
const StreamConfig& output_config,
int16_t* const dest,
VoiceDetectionResult* vad_state) override;
int ProcessStream(const float* const* src, int ProcessStream(const float* const* src,
const StreamConfig& input_config, const StreamConfig& input_config,
const StreamConfig& output_config, const StreamConfig& output_config,
@ -95,6 +101,10 @@ class AudioProcessingImpl : public AudioProcessing {
// Render-side exclusive methods possibly running APM in a // Render-side exclusive methods possibly running APM in a
// multi-threaded manner. Acquire the render lock. // multi-threaded manner. Acquire the render lock.
int ProcessReverseStream(AudioFrame* frame) override; int ProcessReverseStream(AudioFrame* frame) override;
int ProcessReverseStream(const int16_t* const src,
const StreamConfig& input_config,
const StreamConfig& output_config,
int16_t* const dest) override;
int AnalyzeReverseStream(const float* const* data, int AnalyzeReverseStream(const float* const* data,
const StreamConfig& reverse_config) override; const StreamConfig& reverse_config) override;
int ProcessReverseStream(const float* const* src, int ProcessReverseStream(const float* const* src,
@ -292,7 +302,8 @@ class AudioProcessingImpl : public AudioProcessing {
void RecordUnprocessedCaptureStream(const float* const* capture_stream) void RecordUnprocessedCaptureStream(const float* const* capture_stream)
RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_); RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
void RecordUnprocessedCaptureStream(const AudioFrame& capture_frame) void RecordUnprocessedCaptureStream(const int16_t* const data,
const StreamConfig& config)
RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_); RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
// Notifies attached AecDump of current configuration and // Notifies attached AecDump of current configuration and
@ -302,7 +313,8 @@ class AudioProcessingImpl : public AudioProcessing {
const float* const* processed_capture_stream) const float* const* processed_capture_stream)
RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_); RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
void RecordProcessedCaptureStream(const AudioFrame& processed_capture_frame) void RecordProcessedCaptureStream(const int16_t* const data,
const StreamConfig& config)
RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_); RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
// Notifies attached AecDump about current state (delay, drift, etc). // Notifies attached AecDump about current state (delay, drift, etc).

View File

@ -15,7 +15,6 @@
#include <string> #include <string>
#include "api/audio/audio_frame.h"
#include "modules/audio_processing/include/audio_frame_view.h" #include "modules/audio_processing/include/audio_frame_view.h"
#include "modules/audio_processing/include/audio_processing.h" #include "modules/audio_processing/include/audio_processing.h"
#include "rtc_base/deprecation.h" #include "rtc_base/deprecation.h"
@ -88,13 +87,19 @@ class AecDump {
const AudioFrameView<const float>& src) = 0; const AudioFrameView<const float>& src) = 0;
virtual void AddCaptureStreamOutput( virtual void AddCaptureStreamOutput(
const AudioFrameView<const float>& src) = 0; const AudioFrameView<const float>& src) = 0;
virtual void AddCaptureStreamInput(const AudioFrame& frame) = 0; virtual void AddCaptureStreamInput(const int16_t* const data,
virtual void AddCaptureStreamOutput(const AudioFrame& frame) = 0; int num_channels,
int samples_per_channel) = 0;
virtual void AddCaptureStreamOutput(const int16_t* const data,
int num_channels,
int samples_per_channel) = 0;
virtual void AddAudioProcessingState(const AudioProcessingState& state) = 0; virtual void AddAudioProcessingState(const AudioProcessingState& state) = 0;
virtual void WriteCaptureStreamMessage() = 0; virtual void WriteCaptureStreamMessage() = 0;
// Logs Event::Type REVERSE_STREAM message. // Logs Event::Type REVERSE_STREAM message.
virtual void WriteRenderStreamMessage(const AudioFrame& frame) = 0; virtual void WriteRenderStreamMessage(const int16_t* const data,
int num_channels,
int samples_per_channel) = 0;
virtual void WriteRenderStreamMessage( virtual void WriteRenderStreamMessage(
const AudioFrameView<const float>& src) = 0; const AudioFrameView<const float>& src) = 0;

View File

@ -372,6 +372,8 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface {
kStereoAndKeyboard kStereoAndKeyboard
}; };
enum class VoiceDetectionResult { kNotAvailable, kDetected, kNotDetected };
// Specifies the properties of a setting to be passed to AudioProcessing at // Specifies the properties of a setting to be passed to AudioProcessing at
// runtime. // runtime.
class RuntimeSetting { class RuntimeSetting {
@ -538,6 +540,15 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface {
// method, it will trigger an initialization. // method, it will trigger an initialization.
virtual int ProcessStream(AudioFrame* frame) = 0; virtual int ProcessStream(AudioFrame* frame) = 0;
// Accepts and produces a 10 ms frame interleaved 16 bit integer audio as
// specified in |input_config| and |output_config|. |src| and |dest| may use
// the same memory, if desired.
virtual int ProcessStream(const int16_t* const src,
const StreamConfig& input_config,
const StreamConfig& output_config,
int16_t* const dest,
VoiceDetectionResult* vad_result) = 0;
// Accepts deinterleaved float audio with the range [-1, 1]. Each element of // Accepts deinterleaved float audio with the range [-1, 1]. Each element of
// |src| points to a channel buffer, arranged according to |input_stream|. At // |src| points to a channel buffer, arranged according to |input_stream|. At
// output, the channels will be arranged according to |output_stream| in // output, the channels will be arranged according to |output_stream| in
@ -564,6 +575,14 @@ class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface {
// members of |frame| must be valid. // members of |frame| must be valid.
virtual int ProcessReverseStream(AudioFrame* frame) = 0; virtual int ProcessReverseStream(AudioFrame* frame) = 0;
// Accepts and produces a 10 ms frame of interleaved 16 bit integer audio for
// the reverse direction audio stream as specified in |input_config| and
// |output_config|. |src| and |dest| may use the same memory, if desired.
virtual int ProcessReverseStream(const int16_t* const src,
const StreamConfig& input_config,
const StreamConfig& output_config,
int16_t* const dest) = 0;
// Accepts deinterleaved float audio with the range [-1, 1]. Each element of // Accepts deinterleaved float audio with the range [-1, 1]. Each element of
// |data| points to a channel buffer, arranged according to |reverse_config|. // |data| points to a channel buffer, arranged according to |reverse_config|.
virtual int ProcessReverseStream(const float* const* src, virtual int ProcessReverseStream(const float* const* src,

View File

@ -82,6 +82,12 @@ class MockAudioProcessing : public ::testing::NiceMock<AudioProcessing> {
MOCK_METHOD1(set_output_will_be_muted, void(bool muted)); MOCK_METHOD1(set_output_will_be_muted, void(bool muted));
MOCK_METHOD1(SetRuntimeSetting, void(RuntimeSetting setting)); MOCK_METHOD1(SetRuntimeSetting, void(RuntimeSetting setting));
MOCK_METHOD1(ProcessStream, int(AudioFrame* frame)); MOCK_METHOD1(ProcessStream, int(AudioFrame* frame));
MOCK_METHOD5(ProcessStream,
int(const int16_t* const src,
const StreamConfig& input_config,
const StreamConfig& output_config,
int16_t* const dest,
VoiceDetectionResult* const vad_result));
MOCK_METHOD7(ProcessStream, MOCK_METHOD7(ProcessStream,
int(const float* const* src, int(const float* const* src,
size_t samples_per_channel, size_t samples_per_channel,
@ -96,6 +102,11 @@ class MockAudioProcessing : public ::testing::NiceMock<AudioProcessing> {
const StreamConfig& output_config, const StreamConfig& output_config,
float* const* dest)); float* const* dest));
MOCK_METHOD1(ProcessReverseStream, int(AudioFrame* frame)); MOCK_METHOD1(ProcessReverseStream, int(AudioFrame* frame));
MOCK_METHOD4(ProcessReverseStream,
int(const int16_t* const src,
const StreamConfig& input_config,
const StreamConfig& output_config,
int16_t* const dest));
MOCK_METHOD4(AnalyzeReverseStream, MOCK_METHOD4(AnalyzeReverseStream,
int(const float* const* data, int(const float* const* data,
size_t samples_per_channel, size_t samples_per_channel,