Adds stereo support to FineAudioBuffer for mobile platforms.
...continuation of review in https://webrtc-review.googlesource.com/c/src/+/70781 This CL ensures that the FineAudioBuffer can support stereo and also adapts all classes which uses the FineAudioBuffer. Note that, this CL does NOT enable stereo on mobile platforms by default. All it does is to ensure that we *can*. As is, the only functional change is that all clients will now use a FineAudioBuffer implementation which supports stereo (see separate unittest). The FineAudioBuffer constructor has been modified since it is better to utilize the information provided in the injected AudioDeviceBuffer pointer instead of forcing the user to supply redundant parameters. The capacity parameter was also removed since it adds no value now when the more flexible rtc::BufferT is used. I have also done local changes (not included in the CL) where I switch all affected audio backends to stereo and verified that it works in real-time on all affected platforms (Androiod:OpenSL ES, Android:AAudio and iOS). Also note that, changes in: sdk/android/src/jni/audio_device/aaudio_player.cc sdk/android/src/jni/audio_device/aaudio_recorder.cc sdk/android/src/jni/audio_device/opensles_player.cc sdk/android/src/jni/audio_device/opensles_recorder.cc are simply copies of the changes done under modules/audio_device/android since we currently have two versions of the ADM for Android. Bug: webrtc:9172 Change-Id: I1ed3798bd1925381d68f0f9492af921f515b9053 Reviewed-on: https://webrtc-review.googlesource.com/71201 Commit-Queue: Henrik Andreassson <henrika@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Cr-Commit-Position: refs/heads/master@{#22998}
This commit is contained in:
parent
47d7fbd8fe
commit
29e865a5d8
@ -15,6 +15,7 @@
|
||||
#include "modules/audio_device/fine_audio_buffer.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/ptr_util.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
@ -39,7 +40,9 @@ AAudioPlayer::~AAudioPlayer() {
|
||||
int AAudioPlayer::Init() {
|
||||
RTC_LOG(INFO) << "Init";
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
RTC_DCHECK_EQ(aaudio_.audio_parameters().channels(), 1u);
|
||||
if (aaudio_.audio_parameters().channels() == 2) {
|
||||
RTC_DLOG(LS_WARNING) << "Stereo mode is enabled";
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -119,12 +122,8 @@ void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
RTC_CHECK(audio_device_buffer_);
|
||||
// Create a modified audio buffer class which allows us to ask for any number
|
||||
// of samples (and not only multiple of 10ms) to match the optimal buffer
|
||||
// size per callback used by AAudio. Use an initial capacity of 50ms to ensure
|
||||
// that the buffer can cache old data and at the same time be prepared for
|
||||
// increased burst size in AAudio if buffer underruns are detected.
|
||||
const size_t capacity = 5 * audio_parameters.frames_per_10ms_buffer();
|
||||
fine_audio_buffer_.reset(new FineAudioBuffer(
|
||||
audio_device_buffer_, audio_parameters.sample_rate(), capacity));
|
||||
// size per callback used by AAudio.
|
||||
fine_audio_buffer_ = rtc::MakeUnique<FineAudioBuffer>(audio_device_buffer_);
|
||||
}
|
||||
|
||||
int AAudioPlayer::SpeakerVolumeIsAvailable(bool& available) {
|
||||
@ -193,7 +192,8 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data,
|
||||
memset(audio_data, 0, num_bytes);
|
||||
} else {
|
||||
fine_audio_buffer_->GetPlayoutData(
|
||||
rtc::MakeArrayView(static_cast<int16_t*>(audio_data), num_frames),
|
||||
rtc::MakeArrayView(static_cast<int16_t*>(audio_data),
|
||||
aaudio_.samples_per_frame() * num_frames),
|
||||
static_cast<int>(latency_millis_ + 0.5));
|
||||
}
|
||||
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
#include "modules/audio_device/fine_audio_buffer.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/ptr_util.h"
|
||||
#include "rtc_base/timeutils.h"
|
||||
#include "system_wrappers/include/sleep.h"
|
||||
|
||||
@ -41,7 +42,9 @@ AAudioRecorder::~AAudioRecorder() {
|
||||
int AAudioRecorder::Init() {
|
||||
RTC_LOG(INFO) << "Init";
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
RTC_DCHECK_EQ(aaudio_.audio_parameters().channels(), 1u);
|
||||
if (aaudio_.audio_parameters().channels() == 2) {
|
||||
RTC_DLOG(LS_WARNING) << "Stereo mode is enabled";
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -107,9 +110,7 @@ void AAudioRecorder::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
// Create a modified audio buffer class which allows us to deliver any number
|
||||
// of samples (and not only multiples of 10ms which WebRTC uses) to match the
|
||||
// native AAudio buffer size.
|
||||
const size_t capacity = 5 * audio_parameters.GetBytesPer10msBuffer();
|
||||
fine_audio_buffer_.reset(new FineAudioBuffer(
|
||||
audio_device_buffer_, audio_parameters.sample_rate(), capacity));
|
||||
fine_audio_buffer_ = rtc::MakeUnique<FineAudioBuffer>(audio_device_buffer_);
|
||||
}
|
||||
|
||||
int AAudioRecorder::EnableBuiltInAEC(bool enable) {
|
||||
@ -181,7 +182,8 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
|
||||
// Copy recorded audio in |audio_data| to the WebRTC sink using the
|
||||
// FineAudioBuffer object.
|
||||
fine_audio_buffer_->DeliverRecordedData(
|
||||
rtc::MakeArrayView(static_cast<const int16_t*>(audio_data), num_frames),
|
||||
rtc::MakeArrayView(static_cast<const int16_t*>(audio_data),
|
||||
aaudio_.samples_per_frame() * num_frames),
|
||||
static_cast<int>(latency_millis_ + 0.5));
|
||||
|
||||
return AAUDIO_CALLBACK_RESULT_CONTINUE;
|
||||
|
||||
@ -20,6 +20,7 @@
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/format_macros.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/ptr_util.h"
|
||||
#include "rtc_base/timeutils.h"
|
||||
|
||||
#define TAG "OpenSLESPlayer"
|
||||
@ -81,9 +82,7 @@ int OpenSLESPlayer::Init() {
|
||||
ALOGD("Init[tid=%d]", rtc::CurrentThreadId());
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
if (audio_parameters_.channels() == 2) {
|
||||
// TODO(henrika): FineAudioBuffer needs more work to support stereo.
|
||||
ALOGE("OpenSLESPlayer does not support stereo");
|
||||
return -1;
|
||||
ALOGW("Stereo mode is enabled");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -212,13 +211,12 @@ void OpenSLESPlayer::AllocateDataBuffers() {
|
||||
// recommended to construct audio buffers so that they contain an exact
|
||||
// multiple of this number. If so, callbacks will occur at regular intervals,
|
||||
// which reduces jitter.
|
||||
const size_t buffer_size_in_samples = audio_parameters_.frames_per_buffer();
|
||||
const size_t buffer_size_in_samples =
|
||||
audio_parameters_.frames_per_buffer() * audio_parameters_.channels();
|
||||
ALOGD("native buffer size: %" PRIuS, buffer_size_in_samples);
|
||||
ALOGD("native buffer size in ms: %.2f",
|
||||
audio_parameters_.GetBufferSizeInMilliseconds());
|
||||
fine_audio_buffer_.reset(
|
||||
new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(),
|
||||
2 * audio_parameters_.frames_per_buffer()));
|
||||
fine_audio_buffer_ = rtc::MakeUnique<FineAudioBuffer>(audio_device_buffer_);
|
||||
// Allocated memory for audio buffers.
|
||||
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
||||
audio_buffers_[i].reset(new SLint16[buffer_size_in_samples]);
|
||||
@ -397,7 +395,7 @@ void OpenSLESPlayer::EnqueuePlayoutData(bool silence) {
|
||||
reinterpret_cast<SLint8*>(audio_buffers_[buffer_index_].get());
|
||||
if (silence) {
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
// Avoid aquiring real audio data from WebRTC and fill the buffer with
|
||||
// Avoid acquiring real audio data from WebRTC and fill the buffer with
|
||||
// zeros instead. Used to prime the buffer with silence and to avoid asking
|
||||
// for audio data from two different threads.
|
||||
memset(audio_ptr8, 0, audio_parameters_.GetBytesPerBuffer());
|
||||
@ -409,7 +407,8 @@ void OpenSLESPlayer::EnqueuePlayoutData(bool silence) {
|
||||
// delay estimation.
|
||||
fine_audio_buffer_->GetPlayoutData(
|
||||
rtc::ArrayView<int16_t>(audio_buffers_[buffer_index_].get(),
|
||||
audio_parameters_.frames_per_buffer()),
|
||||
audio_parameters_.frames_per_buffer() *
|
||||
audio_parameters_.channels()),
|
||||
25);
|
||||
}
|
||||
// Enqueue the decoded audio buffer for playback.
|
||||
|
||||
@ -20,6 +20,7 @@
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/format_macros.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/ptr_util.h"
|
||||
#include "rtc_base/timeutils.h"
|
||||
|
||||
#define TAG "OpenSLESRecorder"
|
||||
@ -78,9 +79,7 @@ int OpenSLESRecorder::Init() {
|
||||
ALOGD("Init[tid=%d]", rtc::CurrentThreadId());
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
if (audio_parameters_.channels() == 2) {
|
||||
// TODO(henrika): FineAudioBuffer needs more work to support stereo.
|
||||
ALOGE("OpenSLESRecorder does not support stereo");
|
||||
return -1;
|
||||
ALOGD("Stereo mode is enabled");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -342,14 +341,13 @@ void OpenSLESRecorder::AllocateDataBuffers() {
|
||||
audio_parameters_.GetBytesPerBuffer());
|
||||
ALOGD("native sample rate: %d", audio_parameters_.sample_rate());
|
||||
RTC_DCHECK(audio_device_buffer_);
|
||||
fine_audio_buffer_.reset(
|
||||
new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(),
|
||||
2 * audio_parameters_.frames_per_buffer()));
|
||||
fine_audio_buffer_ = rtc::MakeUnique<FineAudioBuffer>(audio_device_buffer_);
|
||||
// Allocate queue of audio buffers that stores recorded audio samples.
|
||||
const int data_size_samples = audio_parameters_.frames_per_buffer();
|
||||
const int buffer_size_samples =
|
||||
audio_parameters_.frames_per_buffer() * audio_parameters_.channels();
|
||||
audio_buffers_.reset(new std::unique_ptr<SLint16[]>[kNumOfOpenSLESBuffers]);
|
||||
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
||||
audio_buffers_[i].reset(new SLint16[data_size_samples]);
|
||||
audio_buffers_[i].reset(new SLint16[buffer_size_samples]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -374,11 +372,10 @@ void OpenSLESRecorder::ReadBufferQueue() {
|
||||
// since there is no support to turn off built-in EC in combination with
|
||||
// OpenSL ES anyhow. Hence, as is, the WebRTC based AEC (which would use
|
||||
// these estimates) will never be active.
|
||||
const size_t size_in_samples =
|
||||
static_cast<size_t>(audio_parameters_.frames_per_buffer());
|
||||
fine_audio_buffer_->DeliverRecordedData(
|
||||
rtc::ArrayView<const int16_t>(audio_buffers_[buffer_index_].get(),
|
||||
size_in_samples),
|
||||
rtc::ArrayView<const int16_t>(
|
||||
audio_buffers_[buffer_index_].get(),
|
||||
audio_parameters_.frames_per_buffer() * audio_parameters_.channels()),
|
||||
25);
|
||||
// Enqueue the utilized audio buffer and use if for recording again.
|
||||
EnqueueAudioBuffer();
|
||||
|
||||
@ -356,9 +356,17 @@ int32_t AudioDeviceBuffer::GetPlayoutData(void* audio_buffer) {
|
||||
const double phase_increment =
|
||||
k2Pi * 440.0 / static_cast<double>(play_sample_rate_);
|
||||
int16_t* destination_r = reinterpret_cast<int16_t*>(audio_buffer);
|
||||
for (size_t i = 0; i < play_buffer_.size(); ++i) {
|
||||
destination_r[i] = static_cast<int16_t>((sin(phase_) * (1 << 14)));
|
||||
phase_ += phase_increment;
|
||||
if (play_channels_ == 1) {
|
||||
for (size_t i = 0; i < play_buffer_.size(); ++i) {
|
||||
destination_r[i] = static_cast<int16_t>((sin(phase_) * (1 << 14)));
|
||||
phase_ += phase_increment;
|
||||
}
|
||||
} else if (play_channels_ == 2) {
|
||||
for (size_t i = 0; i < play_buffer_.size() / 2; ++i) {
|
||||
destination_r[2 * i] = destination_r[2 * i + 1] =
|
||||
static_cast<int16_t>((sin(phase_) * (1 << 14)));
|
||||
phase_ += phase_increment;
|
||||
}
|
||||
}
|
||||
#else
|
||||
memcpy(audio_buffer, play_buffer_.data(),
|
||||
|
||||
@ -10,25 +10,32 @@
|
||||
|
||||
#include "modules/audio_device/fine_audio_buffer.h"
|
||||
|
||||
#include <memory.h>
|
||||
#include <stdio.h>
|
||||
#include <algorithm>
|
||||
|
||||
#include "modules/audio_device/audio_device_buffer.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/numerics/safe_conversions.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* device_buffer,
|
||||
int sample_rate,
|
||||
size_t capacity)
|
||||
: device_buffer_(device_buffer),
|
||||
sample_rate_(sample_rate),
|
||||
samples_per_10_ms_(static_cast<size_t>(sample_rate_ * 10 / 1000)),
|
||||
playout_buffer_(0, capacity),
|
||||
record_buffer_(0, capacity) {
|
||||
RTC_LOG(INFO) << "samples_per_10_ms_: " << samples_per_10_ms_;
|
||||
FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* audio_device_buffer)
|
||||
: audio_device_buffer_(audio_device_buffer),
|
||||
playout_samples_per_channel_10ms_(rtc::dchecked_cast<size_t>(
|
||||
audio_device_buffer->PlayoutSampleRate() * 10 / 1000)),
|
||||
record_samples_per_channel_10ms_(rtc::dchecked_cast<size_t>(
|
||||
audio_device_buffer->RecordingSampleRate() * 10 / 1000)),
|
||||
playout_channels_(audio_device_buffer->PlayoutChannels()),
|
||||
record_channels_(audio_device_buffer->RecordingChannels()) {
|
||||
RTC_DCHECK(audio_device_buffer_);
|
||||
if (IsReadyForPlayout()) {
|
||||
RTC_DLOG(INFO) << "playout_samples_per_channel_10ms: "
|
||||
<< playout_samples_per_channel_10ms_;
|
||||
RTC_DLOG(INFO) << "playout_channels: " << playout_channels_;
|
||||
}
|
||||
if (IsReadyForRecord()) {
|
||||
RTC_DLOG(INFO) << "record_samples_per_channel_10ms: "
|
||||
<< record_samples_per_channel_10ms_;
|
||||
RTC_DLOG(INFO) << "record_channels: " << record_channels_;
|
||||
}
|
||||
}
|
||||
|
||||
FineAudioBuffer::~FineAudioBuffer() {}
|
||||
@ -41,28 +48,39 @@ void FineAudioBuffer::ResetRecord() {
|
||||
record_buffer_.Clear();
|
||||
}
|
||||
|
||||
bool FineAudioBuffer::IsReadyForPlayout() const {
|
||||
return playout_samples_per_channel_10ms_ > 0 && playout_channels_ > 0;
|
||||
}
|
||||
|
||||
bool FineAudioBuffer::IsReadyForRecord() const {
|
||||
return record_samples_per_channel_10ms_ > 0 && record_channels_ > 0;
|
||||
}
|
||||
|
||||
void FineAudioBuffer::GetPlayoutData(rtc::ArrayView<int16_t> audio_buffer,
|
||||
int playout_delay_ms) {
|
||||
RTC_DCHECK(IsReadyForPlayout());
|
||||
// Ask WebRTC for new data in chunks of 10ms until we have enough to
|
||||
// fulfill the request. It is possible that the buffer already contains
|
||||
// enough samples from the last round.
|
||||
while (playout_buffer_.size() < audio_buffer.size()) {
|
||||
// Get 10ms decoded audio from WebRTC.
|
||||
device_buffer_->RequestPlayoutData(samples_per_10_ms_);
|
||||
// Append |bytes_per_10_ms_| elements to the end of the buffer.
|
||||
const size_t samples_written = playout_buffer_.AppendData(
|
||||
samples_per_10_ms_, [&](rtc::ArrayView<int16_t> buf) {
|
||||
const size_t samples_per_channel =
|
||||
device_buffer_->GetPlayoutData(buf.data());
|
||||
// TODO(henrika): this class is only used on mobile devices and is
|
||||
// currently limited to mono. Modifications are needed for stereo.
|
||||
return samples_per_channel;
|
||||
// Get 10ms decoded audio from WebRTC. The ADB knows about number of
|
||||
// channels; hence we can ask for number of samples per channel here.
|
||||
audio_device_buffer_->RequestPlayoutData(playout_samples_per_channel_10ms_);
|
||||
// Append 10ms to the end of the local buffer taking number of channels
|
||||
// into account.
|
||||
const size_t num_elements_10ms =
|
||||
playout_channels_ * playout_samples_per_channel_10ms_;
|
||||
const size_t written_elements = playout_buffer_.AppendData(
|
||||
num_elements_10ms, [&](rtc::ArrayView<int16_t> buf) {
|
||||
const size_t samples_per_channel_10ms =
|
||||
audio_device_buffer_->GetPlayoutData(buf.data());
|
||||
return playout_channels_ * samples_per_channel_10ms;
|
||||
});
|
||||
RTC_DCHECK_EQ(samples_per_10_ms_, samples_written);
|
||||
RTC_DCHECK_EQ(num_elements_10ms, written_elements);
|
||||
}
|
||||
|
||||
const size_t num_bytes = audio_buffer.size() * sizeof(int16_t);
|
||||
// Provide the requested number of bytes to the consumer.
|
||||
const size_t num_bytes = audio_buffer.size() * sizeof(int16_t);
|
||||
memcpy(audio_buffer.data(), playout_buffer_.data(), num_bytes);
|
||||
// Move remaining samples to start of buffer to prepare for next round.
|
||||
memmove(playout_buffer_.data(), playout_buffer_.data() + audio_buffer.size(),
|
||||
@ -75,19 +93,22 @@ void FineAudioBuffer::GetPlayoutData(rtc::ArrayView<int16_t> audio_buffer,
|
||||
void FineAudioBuffer::DeliverRecordedData(
|
||||
rtc::ArrayView<const int16_t> audio_buffer,
|
||||
int record_delay_ms) {
|
||||
// Always append new data and grow the buffer if needed.
|
||||
RTC_DCHECK(IsReadyForRecord());
|
||||
// Always append new data and grow the buffer when needed.
|
||||
record_buffer_.AppendData(audio_buffer.data(), audio_buffer.size());
|
||||
// Consume samples from buffer in chunks of 10ms until there is not
|
||||
// enough data left. The number of remaining samples in the cache is given by
|
||||
// the new size of the buffer.
|
||||
while (record_buffer_.size() >= samples_per_10_ms_) {
|
||||
device_buffer_->SetRecordedBuffer(record_buffer_.data(),
|
||||
samples_per_10_ms_);
|
||||
device_buffer_->SetVQEData(playout_delay_ms_, record_delay_ms);
|
||||
device_buffer_->DeliverRecordedData();
|
||||
memmove(record_buffer_.data(), record_buffer_.data() + samples_per_10_ms_,
|
||||
(record_buffer_.size() - samples_per_10_ms_) * sizeof(int16_t));
|
||||
record_buffer_.SetSize(record_buffer_.size() - samples_per_10_ms_);
|
||||
// the new size of the internal |record_buffer_|.
|
||||
const size_t num_elements_10ms =
|
||||
record_channels_ * record_samples_per_channel_10ms_;
|
||||
while (record_buffer_.size() >= num_elements_10ms) {
|
||||
audio_device_buffer_->SetRecordedBuffer(record_buffer_.data(),
|
||||
record_samples_per_channel_10ms_);
|
||||
audio_device_buffer_->SetVQEData(playout_delay_ms_, record_delay_ms);
|
||||
audio_device_buffer_->DeliverRecordedData();
|
||||
memmove(record_buffer_.data(), record_buffer_.data() + num_elements_10ms,
|
||||
(record_buffer_.size() - num_elements_10ms) * sizeof(int16_t));
|
||||
record_buffer_.SetSize(record_buffer_.size() - num_elements_10ms);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -11,11 +11,8 @@
|
||||
#ifndef MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_
|
||||
#define MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "api/array_view.h"
|
||||
#include "rtc_base/buffer.h"
|
||||
#include "typedefs.h" // NOLINT(build/include)
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
@ -30,28 +27,26 @@ class AudioDeviceBuffer;
|
||||
// buffers differs from 10ms.
|
||||
// As an example: calling DeliverRecordedData() with 5ms buffers will deliver
|
||||
// accumulated 10ms worth of data to the ADB every second call.
|
||||
// TODO(henrika): add support for stereo when mobile platforms need it.
|
||||
class FineAudioBuffer {
|
||||
public:
|
||||
// |device_buffer| is a buffer that provides 10ms of audio data.
|
||||
// |sample_rate| is the sample rate of the audio data. This is needed because
|
||||
// |device_buffer| delivers 10ms of data. Given the sample rate the number
|
||||
// of samples can be calculated. The |capacity| ensures that the buffer size
|
||||
// can be increased to at least capacity without further reallocation.
|
||||
FineAudioBuffer(AudioDeviceBuffer* device_buffer,
|
||||
int sample_rate,
|
||||
size_t capacity);
|
||||
FineAudioBuffer(AudioDeviceBuffer* audio_device_buffer);
|
||||
~FineAudioBuffer();
|
||||
|
||||
// Clears buffers and counters dealing with playout and/or recording.
|
||||
void ResetPlayout();
|
||||
void ResetRecord();
|
||||
|
||||
// Utility methods which returns true if valid parameters are acquired at
|
||||
// constructions.
|
||||
bool IsReadyForPlayout() const;
|
||||
bool IsReadyForRecord() const;
|
||||
|
||||
// Copies audio samples into |audio_buffer| where number of requested
|
||||
// elements is specified by |audio_buffer.size()|. The producer will always
|
||||
// fill up the audio buffer and if no audio exists, the buffer will contain
|
||||
// silence instead. The provided delay estimate in |playout_delay_ms| should
|
||||
// contain an estime of the latency between when an audio frame is read from
|
||||
// contain an estimate of the latency between when an audio frame is read from
|
||||
// WebRTC and when it is played out on the speaker.
|
||||
void GetPlayoutData(rtc::ArrayView<int16_t> audio_buffer,
|
||||
int playout_delay_ms);
|
||||
@ -72,14 +67,18 @@ class FineAudioBuffer {
|
||||
// Device buffer that works with 10ms chunks of data both for playout and
|
||||
// for recording. I.e., the WebRTC side will always be asked for audio to be
|
||||
// played out in 10ms chunks and recorded audio will be sent to WebRTC in
|
||||
// 10ms chunks as well. This pointer is owned by the constructor of this
|
||||
// 10ms chunks as well. This raw pointer is owned by the constructor of this
|
||||
// class and the owner must ensure that the pointer is valid during the life-
|
||||
// time of this object.
|
||||
AudioDeviceBuffer* const device_buffer_;
|
||||
// Sample rate in Hertz.
|
||||
const int sample_rate_;
|
||||
// Number of audio samples per 10ms.
|
||||
const size_t samples_per_10_ms_;
|
||||
AudioDeviceBuffer* const audio_device_buffer_;
|
||||
// Number of audio samples per channel per 10ms. Set once at construction
|
||||
// based on parameters in |audio_device_buffer|.
|
||||
const size_t playout_samples_per_channel_10ms_;
|
||||
const size_t record_samples_per_channel_10ms_;
|
||||
// Number of audio channels. Set once at construction based on parameters in
|
||||
// |audio_device_buffer|.
|
||||
const size_t playout_channels_;
|
||||
const size_t record_channels_;
|
||||
// Storage for output samples from which a consumer can read audio buffers
|
||||
// in any size using GetPlayoutData().
|
||||
rtc::BufferT<int16_t> playout_buffer_;
|
||||
|
||||
@ -26,6 +26,7 @@ using ::testing::Return;
|
||||
namespace webrtc {
|
||||
|
||||
const int kSampleRate = 44100;
|
||||
const int kChannels = 2;
|
||||
const int kSamplesPer10Ms = kSampleRate * 10 / 1000;
|
||||
|
||||
// The fake audio data is 0,1,..SCHAR_MAX-1,0,1,... This is to make it easy
|
||||
@ -57,7 +58,8 @@ ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) {
|
||||
for (int i = 0; i < samples_per_10_ms; ++i) {
|
||||
buffer[i] = (i + start_value) % SCHAR_MAX;
|
||||
}
|
||||
return samples_per_10_ms;
|
||||
// Should return samples per channel.
|
||||
return samples_per_10_ms / kChannels;
|
||||
}
|
||||
|
||||
// Writes a periodic ramp pattern to the supplied |buffer|. See UpdateBuffer()
|
||||
@ -90,13 +92,18 @@ void RunFineBufferTest(int frame_size_in_samples) {
|
||||
1 + ((kNumberOfFrames * frame_size_in_samples - 1) / kSamplesPer10Ms);
|
||||
|
||||
MockAudioDeviceBuffer audio_device_buffer;
|
||||
audio_device_buffer.SetPlayoutSampleRate(kSampleRate);
|
||||
audio_device_buffer.SetPlayoutChannels(kChannels);
|
||||
audio_device_buffer.SetRecordingSampleRate(kSampleRate);
|
||||
audio_device_buffer.SetRecordingChannels(kChannels);
|
||||
|
||||
EXPECT_CALL(audio_device_buffer, RequestPlayoutData(_))
|
||||
.WillRepeatedly(Return(kSamplesPer10Ms));
|
||||
{
|
||||
InSequence s;
|
||||
for (int i = 0; i < kNumberOfUpdateBufferCalls; ++i) {
|
||||
EXPECT_CALL(audio_device_buffer, GetPlayoutData(_))
|
||||
.WillOnce(UpdateBuffer(i, kSamplesPer10Ms))
|
||||
.WillOnce(UpdateBuffer(i, kChannels * kSamplesPer10Ms))
|
||||
.RetiresOnSaturation();
|
||||
}
|
||||
}
|
||||
@ -104,7 +111,7 @@ void RunFineBufferTest(int frame_size_in_samples) {
|
||||
InSequence s;
|
||||
for (int j = 0; j < kNumberOfUpdateBufferCalls - 1; ++j) {
|
||||
EXPECT_CALL(audio_device_buffer, SetRecordedBuffer(_, kSamplesPer10Ms))
|
||||
.WillOnce(VerifyInputBuffer(j, kSamplesPer10Ms))
|
||||
.WillOnce(VerifyInputBuffer(j, kChannels * kSamplesPer10Ms))
|
||||
.RetiresOnSaturation();
|
||||
}
|
||||
}
|
||||
@ -112,20 +119,26 @@ void RunFineBufferTest(int frame_size_in_samples) {
|
||||
.Times(kNumberOfUpdateBufferCalls - 1);
|
||||
EXPECT_CALL(audio_device_buffer, DeliverRecordedData())
|
||||
.Times(kNumberOfUpdateBufferCalls - 1)
|
||||
.WillRepeatedly(Return(kSamplesPer10Ms));
|
||||
.WillRepeatedly(Return(0));
|
||||
|
||||
FineAudioBuffer fine_buffer(&audio_device_buffer, kSampleRate,
|
||||
kFrameSizeSamples);
|
||||
std::unique_ptr<int16_t[]> out_buffer(new int16_t[kFrameSizeSamples]);
|
||||
std::unique_ptr<int16_t[]> in_buffer(new int16_t[kFrameSizeSamples]);
|
||||
FineAudioBuffer fine_buffer(&audio_device_buffer);
|
||||
std::unique_ptr<int16_t[]> out_buffer(
|
||||
new int16_t[kChannels * kFrameSizeSamples]);
|
||||
std::unique_ptr<int16_t[]> in_buffer(
|
||||
new int16_t[kChannels * kFrameSizeSamples]);
|
||||
|
||||
for (int i = 0; i < kNumberOfFrames; ++i) {
|
||||
fine_buffer.GetPlayoutData(
|
||||
rtc::ArrayView<int16_t>(out_buffer.get(), kFrameSizeSamples), 0);
|
||||
EXPECT_TRUE(VerifyBuffer(out_buffer.get(), i, kFrameSizeSamples));
|
||||
UpdateInputBuffer(in_buffer.get(), i, kFrameSizeSamples);
|
||||
rtc::ArrayView<int16_t>(out_buffer.get(),
|
||||
kChannels * kFrameSizeSamples),
|
||||
0);
|
||||
EXPECT_TRUE(
|
||||
VerifyBuffer(out_buffer.get(), i, kChannels * kFrameSizeSamples));
|
||||
UpdateInputBuffer(in_buffer.get(), i, kChannels * kFrameSizeSamples);
|
||||
fine_buffer.DeliverRecordedData(
|
||||
rtc::ArrayView<const int16_t>(in_buffer.get(), kFrameSizeSamples), 0);
|
||||
rtc::ArrayView<const int16_t>(in_buffer.get(),
|
||||
kChannels * kFrameSizeSamples),
|
||||
0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -699,12 +699,9 @@ void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
|
||||
|
||||
// Create a modified audio buffer class which allows us to ask for,
|
||||
// or deliver, any number of samples (and not only multiple of 10ms) to match
|
||||
// the native audio unit buffer size. Use a reasonable capacity to avoid
|
||||
// reallocations while audio is played to reduce risk of glitches.
|
||||
// the native audio unit buffer size.
|
||||
RTC_DCHECK(audio_device_buffer_);
|
||||
const size_t capacity_in_samples = 2 * playout_parameters_.frames_per_buffer();
|
||||
fine_audio_buffer_.reset(new FineAudioBuffer(
|
||||
audio_device_buffer_, playout_parameters_.sample_rate(), capacity_in_samples));
|
||||
fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_));
|
||||
}
|
||||
|
||||
bool AudioDeviceIOS::CreateAudioUnit() {
|
||||
|
||||
@ -14,6 +14,7 @@
|
||||
#include "modules/audio_device/fine_audio_buffer.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/ptr_util.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
@ -40,7 +41,9 @@ AAudioPlayer::~AAudioPlayer() {
|
||||
int AAudioPlayer::Init() {
|
||||
RTC_LOG(INFO) << "Init";
|
||||
RTC_DCHECK_RUN_ON(&main_thread_checker_);
|
||||
RTC_DCHECK_EQ(aaudio_.audio_parameters().channels(), 1u);
|
||||
if (aaudio_.audio_parameters().channels() == 2) {
|
||||
RTC_DLOG(LS_WARNING) << "Stereo mode is enabled";
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -120,12 +123,8 @@ void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
RTC_CHECK(audio_device_buffer_);
|
||||
// Create a modified audio buffer class which allows us to ask for any number
|
||||
// of samples (and not only multiple of 10ms) to match the optimal buffer
|
||||
// size per callback used by AAudio. Use an initial capacity of 50ms to ensure
|
||||
// that the buffer can cache old data and at the same time be prepared for
|
||||
// increased burst size in AAudio if buffer underruns are detected.
|
||||
const size_t capacity = 5 * audio_parameters.frames_per_10ms_buffer();
|
||||
fine_audio_buffer_.reset(new FineAudioBuffer(
|
||||
audio_device_buffer_, audio_parameters.sample_rate(), capacity));
|
||||
// size per callback used by AAudio.
|
||||
fine_audio_buffer_ = rtc::MakeUnique<FineAudioBuffer>(audio_device_buffer_);
|
||||
}
|
||||
|
||||
bool AAudioPlayer::SpeakerVolumeIsAvailable() {
|
||||
@ -209,7 +208,8 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data,
|
||||
memset(audio_data, 0, num_bytes);
|
||||
} else {
|
||||
fine_audio_buffer_->GetPlayoutData(
|
||||
rtc::MakeArrayView(static_cast<int16_t*>(audio_data), num_frames),
|
||||
rtc::MakeArrayView(static_cast<int16_t*>(audio_data),
|
||||
aaudio_.samples_per_frame() * num_frames),
|
||||
static_cast<int>(latency_millis_ + 0.5));
|
||||
}
|
||||
|
||||
|
||||
@ -14,6 +14,7 @@
|
||||
#include "modules/audio_device/fine_audio_buffer.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/ptr_util.h"
|
||||
#include "rtc_base/timeutils.h"
|
||||
|
||||
#include "system_wrappers/include/sleep.h"
|
||||
@ -43,7 +44,9 @@ AAudioRecorder::~AAudioRecorder() {
|
||||
int AAudioRecorder::Init() {
|
||||
RTC_LOG(INFO) << "Init";
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
RTC_DCHECK_EQ(aaudio_.audio_parameters().channels(), 1u);
|
||||
if (aaudio_.audio_parameters().channels() == 2) {
|
||||
RTC_DLOG(LS_WARNING) << "Stereo mode is enabled";
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -117,9 +120,7 @@ void AAudioRecorder::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
// Create a modified audio buffer class which allows us to deliver any number
|
||||
// of samples (and not only multiples of 10ms which WebRTC uses) to match the
|
||||
// native AAudio buffer size.
|
||||
const size_t capacity = 5 * audio_parameters.GetBytesPer10msBuffer();
|
||||
fine_audio_buffer_.reset(new FineAudioBuffer(
|
||||
audio_device_buffer_, audio_parameters.sample_rate(), capacity));
|
||||
fine_audio_buffer_ = rtc::MakeUnique<FineAudioBuffer>(audio_device_buffer_);
|
||||
}
|
||||
|
||||
bool AAudioRecorder::IsAcousticEchoCancelerSupported() const {
|
||||
@ -193,7 +194,8 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
|
||||
// Copy recorded audio in |audio_data| to the WebRTC sink using the
|
||||
// FineAudioBuffer object.
|
||||
fine_audio_buffer_->DeliverRecordedData(
|
||||
rtc::MakeArrayView(static_cast<const int16_t*>(audio_data), num_frames),
|
||||
rtc::MakeArrayView(static_cast<const int16_t*>(audio_data),
|
||||
aaudio_.samples_per_frame() * num_frames),
|
||||
static_cast<int>(latency_millis_ + 0.5));
|
||||
|
||||
return AAUDIO_CALLBACK_RESULT_CONTINUE;
|
||||
|
||||
@ -18,6 +18,7 @@
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/format_macros.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/ptr_util.h"
|
||||
#include "rtc_base/timeutils.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_common.h"
|
||||
|
||||
@ -84,9 +85,7 @@ int OpenSLESPlayer::Init() {
|
||||
ALOGD("Init[tid=%d]", rtc::CurrentThreadId());
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
if (audio_parameters_.channels() == 2) {
|
||||
// TODO(henrika): FineAudioBuffer needs more work to support stereo.
|
||||
ALOGE("OpenSLESPlayer does not support stereo");
|
||||
return -1;
|
||||
ALOGW("Stereo mode is enabled");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -222,13 +221,12 @@ void OpenSLESPlayer::AllocateDataBuffers() {
|
||||
// recommended to construct audio buffers so that they contain an exact
|
||||
// multiple of this number. If so, callbacks will occur at regular intervals,
|
||||
// which reduces jitter.
|
||||
const size_t buffer_size_in_samples = audio_parameters_.frames_per_buffer();
|
||||
const size_t buffer_size_in_samples =
|
||||
audio_parameters_.frames_per_buffer() * audio_parameters_.channels();
|
||||
ALOGD("native buffer size: %" PRIuS, buffer_size_in_samples);
|
||||
ALOGD("native buffer size in ms: %.2f",
|
||||
audio_parameters_.GetBufferSizeInMilliseconds());
|
||||
fine_audio_buffer_.reset(
|
||||
new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(),
|
||||
2 * audio_parameters_.frames_per_buffer()));
|
||||
fine_audio_buffer_ = rtc::MakeUnique<FineAudioBuffer>(audio_device_buffer_);
|
||||
// Allocated memory for audio buffers.
|
||||
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
||||
audio_buffers_[i].reset(new SLint16[buffer_size_in_samples]);
|
||||
@ -407,7 +405,7 @@ void OpenSLESPlayer::EnqueuePlayoutData(bool silence) {
|
||||
reinterpret_cast<SLint8*>(audio_buffers_[buffer_index_].get());
|
||||
if (silence) {
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
// Avoid aquiring real audio data from WebRTC and fill the buffer with
|
||||
// Avoid acquiring real audio data from WebRTC and fill the buffer with
|
||||
// zeros instead. Used to prime the buffer with silence and to avoid asking
|
||||
// for audio data from two different threads.
|
||||
memset(audio_ptr8, 0, audio_parameters_.GetBytesPerBuffer());
|
||||
@ -419,7 +417,8 @@ void OpenSLESPlayer::EnqueuePlayoutData(bool silence) {
|
||||
// delay estimation.
|
||||
fine_audio_buffer_->GetPlayoutData(
|
||||
rtc::ArrayView<int16_t>(audio_buffers_[buffer_index_].get(),
|
||||
audio_parameters_.frames_per_buffer()),
|
||||
audio_parameters_.frames_per_buffer() *
|
||||
audio_parameters_.channels()),
|
||||
25);
|
||||
}
|
||||
// Enqueue the decoded audio buffer for playback.
|
||||
|
||||
@ -18,6 +18,7 @@
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/format_macros.h"
|
||||
#include "rtc_base/platform_thread.h"
|
||||
#include "rtc_base/ptr_util.h"
|
||||
#include "rtc_base/timeutils.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_common.h"
|
||||
|
||||
@ -80,9 +81,7 @@ int OpenSLESRecorder::Init() {
|
||||
ALOGD("Init[tid=%d]", rtc::CurrentThreadId());
|
||||
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
||||
if (audio_parameters_.channels() == 2) {
|
||||
// TODO(henrika): FineAudioBuffer needs more work to support stereo.
|
||||
ALOGE("OpenSLESRecorder does not support stereo");
|
||||
return -1;
|
||||
ALOGD("Stereo mode is enabled");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -353,14 +352,13 @@ void OpenSLESRecorder::AllocateDataBuffers() {
|
||||
audio_parameters_.GetBytesPerBuffer());
|
||||
ALOGD("native sample rate: %d", audio_parameters_.sample_rate());
|
||||
RTC_DCHECK(audio_device_buffer_);
|
||||
fine_audio_buffer_.reset(
|
||||
new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(),
|
||||
2 * audio_parameters_.frames_per_buffer()));
|
||||
fine_audio_buffer_ = rtc::MakeUnique<FineAudioBuffer>(audio_device_buffer_);
|
||||
// Allocate queue of audio buffers that stores recorded audio samples.
|
||||
const int data_size_samples = audio_parameters_.frames_per_buffer();
|
||||
const int buffer_size_samples =
|
||||
audio_parameters_.frames_per_buffer() * audio_parameters_.channels();
|
||||
audio_buffers_.reset(new std::unique_ptr<SLint16[]>[kNumOfOpenSLESBuffers]);
|
||||
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
||||
audio_buffers_[i].reset(new SLint16[data_size_samples]);
|
||||
audio_buffers_[i].reset(new SLint16[buffer_size_samples]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -385,11 +383,10 @@ void OpenSLESRecorder::ReadBufferQueue() {
|
||||
// since there is no support to turn off built-in EC in combination with
|
||||
// OpenSL ES anyhow. Hence, as is, the WebRTC based AEC (which would use
|
||||
// these estimates) will never be active.
|
||||
const size_t size_in_samples =
|
||||
static_cast<size_t>(audio_parameters_.frames_per_buffer());
|
||||
fine_audio_buffer_->DeliverRecordedData(
|
||||
rtc::ArrayView<const int16_t>(audio_buffers_[buffer_index_].get(),
|
||||
size_in_samples),
|
||||
rtc::ArrayView<const int16_t>(
|
||||
audio_buffers_[buffer_index_].get(),
|
||||
audio_parameters_.frames_per_buffer() * audio_parameters_.channels()),
|
||||
25);
|
||||
// Enqueue the utilized audio buffer and use if for recording again.
|
||||
EnqueueAudioBuffer();
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user