Adds detection of audio glitches for playout on iOS.

Bug: b/38018041
Change-Id: If6b53d3909a52333543c8aade500fd4c26b47255
Reviewed-on: https://chromium-review.googlesource.com/522563
Commit-Queue: Henrik Andreasson <henrika@webrtc.org>
Reviewed-by: Minyue Li <minyue@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#18570}
This commit is contained in:
henrika 2017-06-13 16:11:08 +02:00 committed by Commit Bot
parent dea075c7a6
commit 33e4e65706
7 changed files with 102 additions and 10 deletions

View File

@ -92,6 +92,9 @@ rtc_static_library("audio_device") {
}
defines = []
cflags = []
if (rtc_audio_device_plays_sinus_tone) {
defines += [ "AUDIO_DEVICE_PLAYS_SINUS_TONE" ]
}
if (rtc_include_internal_audio_device) {
sources += [
"audio_device_data_observer.cc",

View File

@ -9,6 +9,7 @@
*/
#include <algorithm>
#include <cmath>
#include "webrtc/modules/audio_device/audio_device_buffer.h"
@ -36,6 +37,9 @@ static const size_t kTimerIntervalInMilliseconds =
static const size_t kMinValidCallTimeTimeInSeconds = 10;
static const size_t kMinValidCallTimeTimeInMilliseconds =
kMinValidCallTimeTimeInSeconds * rtc::kNumMillisecsPerSec;
#ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE
static const double k2Pi = 6.28318530717959;
#endif
AudioDeviceBuffer::AudioDeviceBuffer()
: task_queue_(kTimerQueueName),
@ -60,6 +64,10 @@ AudioDeviceBuffer::AudioDeviceBuffer()
only_silence_recorded_(true),
log_stats_(false) {
LOG(INFO) << "AudioDeviceBuffer::ctor";
#ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE
phase_ = 0.0;
LOG(WARNING) << "AUDIO_DEVICE_PLAYS_SINUS_TONE is defined!";
#endif
playout_thread_checker_.DetachFromThread();
recording_thread_checker_.DetachFromThread();
}
@ -391,9 +399,18 @@ int32_t AudioDeviceBuffer::RequestPlayoutData(size_t samples_per_channel) {
int32_t AudioDeviceBuffer::GetPlayoutData(void* audio_buffer) {
RTC_DCHECK_RUN_ON(&playout_thread_checker_);
RTC_DCHECK_GT(play_buffer_.size(), 0);
const size_t bytes_per_sample = sizeof(int16_t);
#ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE
const double phase_increment =
k2Pi * 440.0 / static_cast<double>(play_sample_rate_);
int16_t* destination_r = reinterpret_cast<int16_t*>(audio_buffer);
for (size_t i = 0; i < play_buffer_.size(); ++i) {
destination_r[i] = static_cast<int16_t>((sin(phase_) * (1 << 14)));
phase_ += phase_increment;
}
#else
memcpy(audio_buffer, play_buffer_.data(),
play_buffer_.size() * bytes_per_sample);
play_buffer_.size() * sizeof(int16_t));
#endif
// Return samples per channel or number of frames.
return static_cast<int32_t>(play_buffer_.size() / play_channels_);
}

View File

@ -21,6 +21,7 @@
#include "webrtc/typedefs.h"
namespace webrtc {
// Delta times between two successive playout callbacks are limited to this
// value before added to an internal array.
const size_t kMaxDeltaTimeInMs = 500;
@ -251,6 +252,12 @@ class AudioDeviceBuffer {
// Setting this member to false prevents (possiby invalid) log messages from
// being printed in the LogStats() task.
bool log_stats_ ACCESS_ON(task_queue_);
// Should *never* be defined in production builds. Only used for testing.
// When defined, the output signal will be replaced by a sinus tone at 440Hz.
#ifdef AUDIO_DEVICE_PLAYS_SINUS_TONE
double phase_ ACCESS_ON(playout_thread_checker_);
#endif
};
} // namespace webrtc

View File

@ -189,6 +189,7 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
void HandleValidRouteChange();
void HandleCanPlayOrRecordChange(bool can_play_or_record);
void HandleSampleRateChange(float sample_rate);
void HandlePlayoutGlitchDetected();
// Uses current |playout_parameters_| and |record_parameters_| to inform the
// audio device buffer (ADB) about our internal audio parameters.
@ -290,6 +291,13 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
// Set to true if we've activated the audio session.
bool has_configured_session_;
// Counts number of detected audio glitches on the playout side.
int64_t num_detected_playout_glitches_;
int64_t last_playout_time_;
// Counts number of playout callbacks per call.
int64_t num_playout_callbacks_;
// Exposes private members for testing purposes only.
FRIEND_TEST_ALL_PREFIXES(AudioDeviceTest, testInterruptedAudioSession);
};

View File

@ -13,8 +13,6 @@
#include "webrtc/modules/audio_device/ios/audio_device_ios.h"
#include <cmath>
#include "webrtc/base/array_view.h"
#include "webrtc/base/atomicops.h"
#include "webrtc/base/bind.h"
@ -23,8 +21,10 @@
#include "webrtc/base/logging.h"
#include "webrtc/base/thread.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/base/timeutils.h"
#include "webrtc/modules/audio_device/fine_audio_buffer.h"
#include "webrtc/sdk/objc/Framework/Classes/Common/helpers.h"
#include "webrtc/system_wrappers/include/metrics.h"
#import "WebRTC/RTCLogging.h"
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionDelegateAdapter.h"
@ -66,6 +66,7 @@ enum AudioDeviceMessageType : uint32_t {
kMessageTypeInterruptionEnd,
kMessageTypeValidRouteChange,
kMessageTypeCanPlayOrRecordChange,
kMessageTypePlayoutGlitchDetected,
};
using ios::CheckAndLogError;
@ -109,7 +110,10 @@ AudioDeviceIOS::AudioDeviceIOS()
initialized_(false),
audio_is_initialized_(false),
is_interrupted_(false),
has_configured_session_(false) {
has_configured_session_(false),
num_detected_playout_glitches_(0),
last_playout_time_(0),
num_playout_callbacks_(0) {
LOGI() << "ctor" << ios::GetCurrentThreadDescription();
thread_ = rtc::Thread::Current();
audio_session_observer_ =
@ -220,6 +224,7 @@ int32_t AudioDeviceIOS::StartPlayout() {
LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started";
}
rtc::AtomicOps::ReleaseStore(&playing_, 1);
num_playout_callbacks_ = 0;
return 0;
}
@ -234,6 +239,19 @@ int32_t AudioDeviceIOS::StopPlayout() {
audio_is_initialized_ = false;
}
rtc::AtomicOps::ReleaseStore(&playing_, 0);
// Derive average number of calls to OnGetPlayoutData() between detected
// audio glitches and add the result to a histogram.
int average_number_of_playout_callbacks_between_glitches = 100000;
if (num_detected_playout_glitches_ > 0) {
average_number_of_playout_callbacks_between_glitches =
num_playout_callbacks_ / num_detected_playout_glitches_;
}
RTC_HISTOGRAM_COUNTS_100000(
"WebRTC.Audio.AveragePlayoutCallbacksBetweenGlitches",
average_number_of_playout_callbacks_between_glitches);
RTCLog(@"Average number of playout callbacks between glitches: %d",
average_number_of_playout_callbacks_between_glitches);
return 0;
}
@ -419,6 +437,7 @@ OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
RTC_DCHECK_EQ(1, io_data->mNumberBuffers);
AudioBuffer* audio_buffer = &io_data->mBuffers[0];
RTC_DCHECK_EQ(1, audio_buffer->mNumberChannels);
// Get pointer to internal audio buffer to which new audio data shall be
// written.
const size_t size_in_bytes = audio_buffer->mDataByteSize;
@ -433,10 +452,30 @@ OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
return noErr;
}
// Measure time since last call to OnGetPlayoutData() and see if it is larger
// than a well defined threshold. If so, we have a clear indication of a
// glitch in the output audio since the core audio layer will most likely run
// dry in this state.
++num_playout_callbacks_;
const int64_t now_time = rtc::TimeMillis();
if (time_stamp->mSampleTime != num_frames) {
const int64_t delta_time = now_time - last_playout_time_;
const int glitch_threshold =
1.5 * playout_parameters_.GetBufferSizeInMilliseconds() - 1;
if (delta_time > glitch_threshold) {
RTCLogWarning(@"Playout audio glitch detected.\n"
" Time since last OnGetPlayoutData was %lld ms.",
delta_time);
thread_->Post(RTC_FROM_HERE, this, kMessageTypePlayoutGlitchDetected);
}
}
last_playout_time_ = now_time;
// Read decoded 16-bit PCM samples from WebRTC (using a size that matches
// the native I/O audio unit) and copy the result to the audio buffer in the
// |io_data| destination.
fine_audio_buffer_->GetPlayoutData(rtc::ArrayView<int8_t>(destination, size_in_bytes));
fine_audio_buffer_->GetPlayoutData(
rtc::ArrayView<int8_t>(destination, size_in_bytes));
return noErr;
}
@ -458,6 +497,9 @@ void AudioDeviceIOS::OnMessage(rtc::Message *msg) {
delete data;
break;
}
case kMessageTypePlayoutGlitchDetected:
HandlePlayoutGlitchDetected();
break;
}
}
@ -530,8 +572,10 @@ void AudioDeviceIOS::HandleSampleRateChange(float sample_rate) {
" Session sample rate: %f frames_per_buffer: %lu\n"
" ADM sample rate: %f frames_per_buffer: %lu",
sample_rate,
session_sample_rate, (unsigned long)session_frames_per_buffer,
current_sample_rate, (unsigned long)current_frames_per_buffer);;
session_sample_rate,
(unsigned long)session_frames_per_buffer,
current_sample_rate,
(unsigned long)current_frames_per_buffer);
// Sample rate and buffer size are the same, no work to do.
if (std::abs(current_sample_rate - session_sample_rate) <= DBL_EPSILON &&
@ -572,6 +616,13 @@ void AudioDeviceIOS::HandleSampleRateChange(float sample_rate) {
RTCLog(@"Successfully handled sample rate change.");
}
void AudioDeviceIOS::HandlePlayoutGlitchDetected() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
num_detected_playout_glitches_++;
RTCLog(@"Number of detected playout glitches: %lld",
num_detected_playout_glitches_);
}
void AudioDeviceIOS::UpdateAudioDeviceBuffer() {
LOGI() << "UpdateAudioDevicebuffer";
// AttachAudioBuffer() is called at construction by the main class but check

View File

@ -41,8 +41,8 @@ const double kRTCAudioSessionLowComplexitySampleRate = 16000.0;
// ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will
// take care of any buffering required to convert between native buffers and
// buffers used by WebRTC. It is beneficial for the performance if the native
// size is as close to 10ms as possible since it results in "clean" callback
// sequence without bursts of callbacks back to back.
// size is as an even multiple of 10ms as possible since it results in "clean"
// callback sequence without bursts of callbacks back to back.
const double kRTCAudioSessionHighPerformanceIOBufferDuration = 0.01;
// Use a larger buffer size on devices with only one core (e.g. iPhone 4).

View File

@ -147,6 +147,12 @@ declare_args() {
# use file-based audio playout and record.
rtc_use_dummy_audio_file_devices = false
# When set to true, replace the audio output with a sinus tone at 440Hz.
# The ADM will ask for audio data from WebRTC but instead of reading real
# audio samples from NetEQ, a sinus tone will be generated and replace the
# real audio samples.
rtc_audio_device_plays_sinus_tone = false
# When set to true, test targets will declare the files needed to run memcheck
# as data dependencies. This is to enable memcheck execution on swarming bots.
rtc_use_memcheck = false