Refactor AVAudioSession intialization code.

BUG=

Review URL: https://codereview.webrtc.org/1778793005

Cr-Commit-Position: refs/heads/master@{#11972}
This commit is contained in:
tkchin 2016-03-12 20:06:28 -08:00 committed by Commit bot
parent 0ce3bf9cc4
commit 9f987d3200
9 changed files with 456 additions and 208 deletions

View File

@ -135,9 +135,12 @@ source_set("audio_device") {
"ios/audio_device_ios.h",
"ios/audio_device_ios.mm",
"ios/audio_device_not_implemented_ios.mm",
"ios/objc/RTCAudioSession+Configuration.mm",
"ios/objc/RTCAudioSession+Private.h",
"ios/objc/RTCAudioSession.h",
"ios/objc/RTCAudioSession.mm",
"ios/objc/RTCAudioSessionConfiguration.h",
"ios/objc/RTCAudioSessionConfiguration.m",
]
cflags += [ "-fobjc-arc" ] # CLANG_ENABLE_OBJC_ARC = YES.
libs = [

View File

@ -173,9 +173,12 @@
'ios/audio_device_ios.h',
'ios/audio_device_ios.mm',
'ios/audio_device_not_implemented_ios.mm',
'ios/objc/RTCAudioSession+Configuration.mm',
'ios/objc/RTCAudioSession+Private.h',
'ios/objc/RTCAudioSession.h',
'ios/objc/RTCAudioSession.mm',
'ios/objc/RTCAudioSessionConfiguration.h',
'ios/objc/RTCAudioSessionConfiguration.m',
],
'xcode_settings': {
'CLANG_ENABLE_OBJC_ARC': 'YES',

View File

@ -25,7 +25,9 @@
#include "webrtc/modules/audio_device/fine_audio_buffer.h"
#include "webrtc/modules/utility/include/helpers_ios.h"
#import "webrtc/base/objc/RTCLogging.h"
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSession.h"
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h"
namespace webrtc {
@ -48,38 +50,7 @@ namespace webrtc {
} \
} while (0)
// Preferred hardware sample rate (unit is in Hertz). The client sample rate
// will be set to this value as well to avoid resampling the the audio unit's
// format converter. Note that, some devices, e.g. BT headsets, only supports
// 8000Hz as native sample rate.
const double kHighPerformanceSampleRate = 48000.0;
// A lower sample rate will be used for devices with only one core
// (e.g. iPhone 4). The goal is to reduce the CPU load of the application.
const double kLowComplexitySampleRate = 16000.0;
// Use a hardware I/O buffer size (unit is in seconds) that matches the 10ms
// size used by WebRTC. The exact actual size will differ between devices.
// Example: using 48kHz on iPhone 6 results in a native buffer size of
// ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will
// take care of any buffering required to convert between native buffers and
// buffers used by WebRTC. It is beneficial for the performance if the native
// size is as close to 10ms as possible since it results in "clean" callback
// sequence without bursts of callbacks back to back.
const double kHighPerformanceIOBufferDuration = 0.01;
// Use a larger buffer size on devices with only one core (e.g. iPhone 4).
// It will result in a lower CPU consumption at the cost of a larger latency.
// The size of 60ms is based on instrumentation that shows a significant
// reduction in CPU load compared with 10ms on low-end devices.
// TODO(henrika): monitor this size and determine if it should be modified.
const double kLowComplexityIOBufferDuration = 0.06;
// Try to use mono to save resources. Also avoids channel format conversion
// in the I/O audio unit. Initial tests have shown that it is possible to use
// mono natively for built-in microphones and for BT headsets but not for
// wired headsets. Wired headsets only support stereo as native channel format
// but it is a low cost operation to do a format conversion to mono in the
// audio unit. Hence, we will not hit a RTC_CHECK in
// VerifyAudioParametersForActiveAudioSession() for a mismatch between the
// preferred number of channels and the actual number of channels.
const int kPreferredNumberOfChannels = 1;
// Number of bytes per audio sample for 16-bit signed integer representation.
const UInt32 kBytesPerSample = 2;
// Hardcoded delay estimates based on real measurements.
@ -95,149 +66,6 @@ const int kMaxNumberOfAudioUnitInitializeAttempts = 5;
using ios::CheckAndLogError;
// Return the preferred sample rate given number of CPU cores. Use highest
// possible if the CPU has more than one core.
static double GetPreferredSampleRate() {
return (ios::GetProcessorCount() > 1) ? kHighPerformanceSampleRate
: kLowComplexitySampleRate;
}
// Return the preferred I/O buffer size given number of CPU cores. Use smallest
// possible if the CPU has more than one core.
static double GetPreferredIOBufferDuration() {
return (ios::GetProcessorCount() > 1) ? kHighPerformanceIOBufferDuration
: kLowComplexityIOBufferDuration;
}
// Verifies that the current audio session supports input audio and that the
// required category and mode are enabled.
static bool VerifyAudioSession(RTCAudioSession* session) {
LOG(LS_INFO) << "VerifyAudioSession";
// Ensure that the device currently supports audio input.
if (!session.inputAvailable) {
LOG(LS_ERROR) << "No audio input path is available!";
return false;
}
// Ensure that the required category and mode are actually activated.
if (![session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
LOG(LS_ERROR)
<< "Failed to set category to AVAudioSessionCategoryPlayAndRecord";
return false;
}
if (![session.mode isEqualToString:AVAudioSessionModeVoiceChat]) {
LOG(LS_ERROR) << "Failed to set mode to AVAudioSessionModeVoiceChat";
return false;
}
return true;
}
// Activates an audio session suitable for full duplex VoIP sessions when
// |activate| is true. Also sets the preferred sample rate and IO buffer
// duration. Deactivates an active audio session if |activate| is set to false.
static bool ActivateAudioSession(RTCAudioSession* session, bool activate) {
LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")";
NSError* error = nil;
BOOL success = NO;
[session lockForConfiguration];
if (!activate) {
success = [session setActive:NO
error:&error];
[session unlockForConfiguration];
return CheckAndLogError(success, error);
}
// Go ahead and active our own audio session since |activate| is true.
// Use a category which supports simultaneous recording and playback.
// By default, using this category implies that our apps audio is
// nonmixable, hence activating the session will interrupt any other
// audio sessions which are also nonmixable.
if (session.category != AVAudioSessionCategoryPlayAndRecord) {
error = nil;
success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
withOptions:AVAudioSessionCategoryOptionAllowBluetooth
error:&error];
RTC_DCHECK(CheckAndLogError(success, error));
}
// Specify mode for two-way voice communication (e.g. VoIP).
if (session.mode != AVAudioSessionModeVoiceChat) {
error = nil;
success = [session setMode:AVAudioSessionModeVoiceChat error:&error];
RTC_DCHECK(CheckAndLogError(success, error));
}
// Set the session's sample rate or the hardware sample rate.
// It is essential that we use the same sample rate as stream format
// to ensure that the I/O unit does not have to do sample rate conversion.
error = nil;
success =
[session setPreferredSampleRate:GetPreferredSampleRate() error:&error];
RTC_DCHECK(CheckAndLogError(success, error));
// Set the preferred audio I/O buffer duration, in seconds.
error = nil;
success = [session setPreferredIOBufferDuration:GetPreferredIOBufferDuration()
error:&error];
RTC_DCHECK(CheckAndLogError(success, error));
// Activate the audio session. Activation can fail if another active audio
// session (e.g. phone call) has higher priority than ours.
error = nil;
success = [session setActive:YES error:&error];
if (!CheckAndLogError(success, error)) {
[session unlockForConfiguration];
return false;
}
// Ensure that the active audio session has the correct category and mode.
if (!VerifyAudioSession(session)) {
LOG(LS_ERROR) << "Failed to verify audio session category and mode";
[session unlockForConfiguration];
return false;
}
// Try to set the preferred number of hardware audio channels. These calls
// must be done after setting the audio sessions category and mode and
// activating the session.
// We try to use mono in both directions to save resources and format
// conversions in the audio unit. Some devices does only support stereo;
// e.g. wired headset on iPhone 6.
// TODO(henrika): add support for stereo if needed.
error = nil;
success =
[session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels
error:&error];
RTC_DCHECK(CheckAndLogError(success, error));
error = nil;
success =
[session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels
error:&error];
RTC_DCHECK(CheckAndLogError(success, error));
[session unlockForConfiguration];
return true;
}
// An application can create more than one ADM and start audio streaming
// for all of them. It is essential that we only activate the app's audio
// session once (for the first one) and deactivate it once (for the last).
static bool ActivateAudioSession() {
LOGI() << "ActivateAudioSession";
RTCAudioSession* session = [RTCAudioSession sharedInstance];
return ActivateAudioSession(session, true);
}
// If more than one object is using the audio session, ensure that only the
// last object deactivates. Apple recommends: "activate your audio session
// only as needed and deactivate it when you are not using audio".
static bool DeactivateAudioSession() {
LOGI() << "DeactivateAudioSession";
RTCAudioSession* session = [RTCAudioSession sharedInstance];
return ActivateAudioSession(session, false);
}
#if !defined(NDEBUG)
// Helper method for printing out an AudioStreamBasicDescription structure.
static void LogABSD(AudioStreamBasicDescription absd) {
@ -313,13 +141,15 @@ int32_t AudioDeviceIOS::Init() {
LogDeviceInfo();
#endif
// Store the preferred sample rate and preferred number of channels already
// here. They have not been set and confirmed yet since ActivateAudioSession()
// here. They have not been set and confirmed yet since configureForWebRTC
// is not called until audio is about to start. However, it makes sense to
// store the parameters now and then verify at a later stage.
playout_parameters_.reset(GetPreferredSampleRate(),
kPreferredNumberOfChannels);
record_parameters_.reset(GetPreferredSampleRate(),
kPreferredNumberOfChannels);
RTCAudioSessionConfiguration* config =
[RTCAudioSessionConfiguration webRTCConfiguration];
playout_parameters_.reset(config.sampleRate,
config.outputNumberOfChannels);
record_parameters_.reset(config.sampleRate,
config.inputNumberOfChannels);
// Ensure that the audio device buffer (ADB) knows about the internal audio
// parameters. Note that, even if we are unable to get a mono audio session,
// we will always tell the I/O audio unit to do a channel format conversion
@ -673,7 +503,9 @@ void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
// hardware sample rate but continue and use the non-ideal sample rate after
// reinitializing the audio parameters. Most BT headsets only support 8kHz or
// 16kHz.
if (session.sampleRate != GetPreferredSampleRate()) {
RTCAudioSessionConfiguration* webRTCConfig =
[RTCAudioSessionConfiguration webRTCConfiguration];
if (session.sampleRate != webRTCConfig.sampleRate) {
LOG(LS_WARNING) << "Unable to set the preferred sample rate";
}
@ -791,7 +623,7 @@ bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() {
UInt32 size = sizeof(application_format);
RTC_DCHECK_EQ(playout_parameters_.sample_rate(),
record_parameters_.sample_rate());
RTC_DCHECK_EQ(1, kPreferredNumberOfChannels);
RTC_DCHECK_EQ(1, kRTCAudioSessionPreferredNumberOfChannels);
application_format.mSampleRate = playout_parameters_.sample_rate();
application_format.mFormatID = kAudioFormatLinearPCM;
application_format.mFormatFlags =
@ -799,7 +631,8 @@ bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() {
application_format.mBytesPerPacket = kBytesPerSample;
application_format.mFramesPerPacket = 1; // uncompressed
application_format.mBytesPerFrame = kBytesPerSample;
application_format.mChannelsPerFrame = kPreferredNumberOfChannels;
application_format.mChannelsPerFrame =
kRTCAudioSessionPreferredNumberOfChannels;
application_format.mBitsPerChannel = 8 * kBytesPerSample;
// Store the new format.
application_format_ = application_format;
@ -937,16 +770,16 @@ bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) {
bool AudioDeviceIOS::InitPlayOrRecord() {
LOGI() << "InitPlayOrRecord";
// Activate the audio session if not already activated.
if (!ActivateAudioSession()) {
return false;
}
// Ensure that the active audio session has the correct category and mode.
// Use the correct audio session configuration for WebRTC.
// This will attempt to activate the audio session.
RTCAudioSession* session = [RTCAudioSession sharedInstance];
if (!VerifyAudioSession(session)) {
DeactivateAudioSession();
LOG(LS_ERROR) << "Failed to verify audio session category and mode";
[session lockForConfiguration];
NSError* error = nil;
if (![session configureWebRTCSession:&error]) {
RTCLogError(@"Failed to configure WebRTC session: %@",
error.localizedDescription);
[session unlockForConfiguration];
return false;
}
@ -958,11 +791,11 @@ bool AudioDeviceIOS::InitPlayOrRecord() {
// Create, setup and initialize a new Voice-Processing I/O unit.
if (!SetupAndInitializeVoiceProcessingAudioUnit()) {
// Reduce usage count for the audio session and possibly deactivate it if
// this object is the only user.
DeactivateAudioSession();
[session setActive:NO error:nil];
[session unlockForConfiguration];
return false;
}
[session unlockForConfiguration];
return true;
}
@ -987,7 +820,10 @@ void AudioDeviceIOS::ShutdownPlayOrRecord() {
// All I/O should be stopped or paused prior to deactivating the audio
// session, hence we deactivate as last action.
DeactivateAudioSession();
RTCAudioSession* session = [RTCAudioSession sharedInstance];
[session lockForConfiguration];
[session setActive:NO error:nil];
[session unlockForConfiguration];
}
void AudioDeviceIOS::DisposeAudioUnit() {

View File

@ -0,0 +1,185 @@
/*
* Copyright 2016 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSession.h"
#import "webrtc/base/objc/RTCLogging.h"
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSession+Private.h"
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h"
@implementation RTCAudioSession (Configuration)
- (BOOL)setConfiguration:(RTCAudioSessionConfiguration *)configuration
active:(BOOL)active
error:(NSError **)outError {
if (![self checkLock:outError]) {
return NO;
}
// Provide an error even if there isn't one so we can log it. We will not
// return immediately on error in this function and instead try to set
// everything we can.
NSError *error = nil;
if (self.category != configuration.category ||
self.categoryOptions != configuration.categoryOptions) {
NSError *categoryError = nil;
if (![self setCategory:configuration.category
withOptions:configuration.categoryOptions
error:&categoryError]) {
RTCLogError(@"Failed to set category: %@",
categoryError.localizedDescription);
error = categoryError;
}
}
if (self.mode != configuration.mode) {
NSError *modeError = nil;
if (![self setMode:configuration.mode error:&modeError]) {
RTCLogError(@"Failed to set mode: %@",
modeError.localizedDescription);
error = modeError;
}
}
if (self.sampleRate != configuration.sampleRate) {
NSError *sampleRateError = nil;
if (![self setPreferredSampleRate:configuration.sampleRate
error:&sampleRateError]) {
RTCLogError(@"Failed to set preferred sample rate: %@",
sampleRateError.localizedDescription);
error = sampleRateError;
}
}
if (self.IOBufferDuration != configuration.ioBufferDuration) {
NSError *bufferDurationError = nil;
if (![self setPreferredIOBufferDuration:configuration.ioBufferDuration
error:&bufferDurationError]) {
RTCLogError(@"Failed to set preferred IO buffer duration: %@",
bufferDurationError.localizedDescription);
error = bufferDurationError;
}
}
NSError *activeError = nil;
if (![self setActive:active error:&activeError]) {
RTCLogError(@"Failed to setActive to %d: %@",
active, activeError.localizedDescription);
error = activeError;
}
if (self.isActive) {
// Try to set the preferred number of hardware audio channels. These calls
// must be done after setting the audio sessions category and mode and
// activating the session.
NSInteger inputNumberOfChannels = configuration.inputNumberOfChannels;
if (self.inputNumberOfChannels != inputNumberOfChannels) {
NSError *inputChannelsError = nil;
if (![self setPreferredInputNumberOfChannels:inputNumberOfChannels
error:&inputChannelsError]) {
RTCLogError(@"Failed to set preferred input number of channels: %@",
inputChannelsError.localizedDescription);
error = inputChannelsError;
}
}
NSInteger outputNumberOfChannels = configuration.outputNumberOfChannels;
if (self.outputNumberOfChannels != outputNumberOfChannels) {
NSError *outputChannelsError = nil;
if (![self setPreferredOutputNumberOfChannels:outputNumberOfChannels
error:&outputChannelsError]) {
RTCLogError(@"Failed to set preferred output number of channels: %@",
outputChannelsError.localizedDescription);
error = outputChannelsError;
}
}
}
if (outError) {
*outError = error;
}
return error == nil;
}
- (BOOL)configureWebRTCSession:(NSError **)outError {
if (![self checkLock:outError]) {
return NO;
}
RTCLog(@"Configuring audio session for WebRTC.");
// Provide an error even if there isn't one so we can log it.
BOOL hasSucceeded = YES;
NSError *error = nil;
RTCAudioSessionConfiguration *currentConfig =
[RTCAudioSessionConfiguration currentConfiguration];
RTCAudioSessionConfiguration *webRTCConfig =
[RTCAudioSessionConfiguration webRTCConfiguration];
if (![self setConfiguration:webRTCConfig active:YES error:&error]) {
RTCLogError(@"Failed to set WebRTC audio configuration: %@",
error.localizedDescription);
// Attempt to restore previous state.
[self setConfiguration:currentConfig active:NO error:nil];
hasSucceeded = NO;
} else if (![self isConfiguredForWebRTC]) {
// Ensure that the active audio session has the correct category and mode.
// This should never happen - this means that we succeeded earlier but
// somehow the settings didn't apply.
RTCLogError(@"Failed to configure audio session.");
// Attempt to restore previous state.
[self setConfiguration:currentConfig active:NO error:nil];
error =
[[NSError alloc] initWithDomain:kRTCAudioSessionErrorDomain
code:kRTCAudioSessionErrorConfiguration
userInfo:nil];
hasSucceeded = NO;
}
if (outError) {
*outError = error;
}
return hasSucceeded;
}
#pragma mark - Private
- (BOOL)isConfiguredForWebRTC {
// Ensure that the device currently supports audio input.
if (!self.inputAvailable) {
RTCLogError(@"No audio input path is available!");
return NO;
}
// Only check a minimal list of requirements for whether we have
// what we want.
RTCAudioSessionConfiguration *currentConfig =
[RTCAudioSessionConfiguration currentConfiguration];
RTCAudioSessionConfiguration *webRTCConfig =
[RTCAudioSessionConfiguration webRTCConfiguration];
if (![currentConfig.category isEqualToString:webRTCConfig.category]) {
RTCLog(@"Current category %@ does not match %@",
currentConfig.category,
webRTCConfig.category);
return NO;
}
if (![currentConfig.mode isEqualToString:webRTCConfig.mode]) {
RTCLog(@"Current mode %@ does not match %@",
currentConfig.mode,
webRTCConfig.mode);
return NO;
}
return YES;
}
@end

View File

@ -22,6 +22,8 @@ NS_ASSUME_NONNULL_BEGIN
*/
@property(nonatomic, readonly) NSInteger activationCount;
- (BOOL)checkLock:(NSError **)outError;
@end
NS_ASSUME_NONNULL_END

View File

@ -14,9 +14,13 @@
NS_ASSUME_NONNULL_BEGIN
extern NSString * const kRTCAudioSessionErrorDomain;
/** Method that requires lock was called without lock. */
extern NSInteger const kRTCAudioSessionErrorLockRequired;
/** Unknown configuration error occurred. */
extern NSInteger const kRTCAudioSessionErrorConfiguration;
@class RTCAudioSession;
@class RTCAudioSessionConfiguration;
// Surfaces AVAudioSession events. WebRTC will listen directly for notifications
// from AVAudioSession and handle them before calling these delegate methods,
@ -41,6 +45,18 @@ extern NSInteger const kRTCAudioSessionErrorLockRequired;
/** Called when AVAudioSession media server restarts. */
- (void)audioSessionMediaServicesWereReset:(RTCAudioSession *)session;
/** Called when WebRTC needs to take over audio. Applications should call
* -[RTCAudioSession configure] to allow WebRTC to play and record audio.
* TODO(tkchin): Implement this behavior in RTCAudioSession.
*/
- (void)audioSessionShouldConfigure:(RTCAudioSession *)session;
/** Called when WebRTC no longer requires audio. Applications should restore
* their audio state at this point.
* TODO(tkchin): Implement this behavior in RTCAudioSession.
*/
- (void)audioSessionShouldUnconfigure:(RTCAudioSession *)session;
// TODO(tkchin): Maybe handle SilenceSecondaryAudioHintNotification.
@end
@ -68,6 +84,14 @@ extern NSInteger const kRTCAudioSessionErrorLockRequired;
/** Whether RTCAudioSession is currently locked for configuration. */
@property(nonatomic, readonly) BOOL isLocked;
/** If YES, WebRTC will not initialize the audio unit automatically when an
* audio track is ready for playout or recording. Instead, applications should
* listen to the delegate method |audioSessionShouldConfigure| and configure
* the session manually. This should be set before making WebRTC media calls.
* TODO(tkchin): Implement behavior. Currently this just stores a BOOL.
*/
@property(nonatomic, assign) BOOL shouldDelayAudioConfiguration;
// Proxy properties.
@property(readonly) NSString *category;
@property(readonly) AVAudioSessionCategoryOptions categoryOptions;
@ -150,4 +174,23 @@ extern NSInteger const kRTCAudioSessionErrorLockRequired;
@end
@interface RTCAudioSession (Configuration)
/** Applies the configuration to the current session. Attempts to set all
* properties even if previous ones fail. Only the last error will be
* returned. Also calls setActive with |active|.
* |lockForConfiguration| must be called first.
*/
- (BOOL)setConfiguration:(RTCAudioSessionConfiguration *)configuration
active:(BOOL)active
error:(NSError **)outError;
/** Configure the audio session for WebRTC. On failure, we will attempt to
* restore the previously used audio session configuration.
* |lockForConfiguration| must be called first.
*/
- (BOOL)configureWebRTCSession:(NSError **)outError;
@end
NS_ASSUME_NONNULL_END

View File

@ -18,6 +18,7 @@
NSString * const kRTCAudioSessionErrorDomain = @"org.webrtc.RTCAudioSession";
NSInteger const kRTCAudioSessionErrorLockRequired = -1;
NSInteger const kRTCAudioSessionErrorConfiguration = -2;
// This class needs to be thread-safe because it is accessed from many threads.
// TODO(tkchin): Consider more granular locking. We're not expecting a lot of
@ -29,6 +30,7 @@ NSInteger const kRTCAudioSessionErrorLockRequired = -1;
NSInteger _activationCount;
NSInteger _lockRecursionCount;
BOOL _isActive;
BOOL _shouldDelayAudioConfiguration;
}
@synthesize session = _session;
@ -91,6 +93,21 @@ NSInteger const kRTCAudioSessionErrorLockRequired = -1;
}
}
- (void)setShouldDelayAudioConfiguration:(BOOL)shouldDelayAudioConfiguration {
@synchronized(self) {
if (_shouldDelayAudioConfiguration == shouldDelayAudioConfiguration) {
return;
}
_shouldDelayAudioConfiguration = shouldDelayAudioConfiguration;
}
}
- (BOOL)shouldDelayAudioConfiguration {
@synchronized(self) {
return _shouldDelayAudioConfiguration;
}
}
- (void)addDelegate:(id<RTCAudioSessionDelegate>)delegate {
@synchronized(self) {
[_delegates addObject:delegate];
@ -250,7 +267,8 @@ NSInteger const kRTCAudioSessionErrorLockRequired = -1;
[self incrementActivationCount];
}
} else {
RTCLogError(@"Failed to setActive:%d. Error: %@", active, error);
RTCLogError(@"Failed to setActive:%d. Error: %@",
active, error.localizedDescription);
}
// Decrement activation count on deactivation whether or not it succeeded.
if (!active) {
@ -441,18 +459,6 @@ NSInteger const kRTCAudioSessionErrorLockRequired = -1;
return error;
}
- (BOOL)checkLock:(NSError **)outError {
// Check ivar instead of trying to acquire lock so that we won't accidentally
// acquire lock if it hasn't already been called.
if (!self.isLocked) {
if (outError) {
*outError = [RTCAudioSession lockError];
}
return NO;
}
return YES;
}
- (NSSet *)delegates {
@synchronized(self) {
return _delegates.setRepresentation;
@ -479,6 +485,18 @@ NSInteger const kRTCAudioSessionErrorLockRequired = -1;
}
}
- (BOOL)checkLock:(NSError **)outError {
// Check ivar instead of trying to acquire lock so that we won't accidentally
// acquire lock if it hasn't already been called.
if (!self.isLocked) {
if (outError) {
*outError = [RTCAudioSession lockError];
}
return NO;
}
return YES;
}
- (void)updateAudioSessionAfterEvent {
BOOL shouldActivate = self.activationCount > 0;
AVAudioSessionSetActiveOptions options = shouldActivate ?

View File

@ -0,0 +1,43 @@
/*
* Copyright 2016 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#import <AVFoundation/AVFoundation.h>
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
extern const int kRTCAudioSessionPreferredNumberOfChannels;
extern const double kRTCAudioSessionHighPerformanceSampleRate;
extern const double kRTCAudioSessionLowComplexitySampleRate;
extern const double kRTCAudioSessionHighPerformanceIOBufferDuration;
extern const double kRTCAudioSessionLowComplexityIOBufferDuration;
// Struct to hold configuration values.
@interface RTCAudioSessionConfiguration : NSObject
@property(nonatomic, strong) NSString *category;
@property(nonatomic, assign) AVAudioSessionCategoryOptions categoryOptions;
@property(nonatomic, strong) NSString *mode;
@property(nonatomic, assign) double sampleRate;
@property(nonatomic, assign) NSTimeInterval ioBufferDuration;
@property(nonatomic, assign) NSInteger inputNumberOfChannels;
@property(nonatomic, assign) NSInteger outputNumberOfChannels;
/** Initializes configuration to defaults. */
- (instancetype)init NS_DESIGNATED_INITIALIZER;
/** Returns the current configuration of the audio session. */
+ (instancetype)currentConfiguration;
/** Returns the configuration that WebRTC needs. */
+ (instancetype)webRTCConfiguration;
@end
NS_ASSUME_NONNULL_END

View File

@ -0,0 +1,115 @@
/*
* Copyright 2016 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h"
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSession.h"
// Try to use mono to save resources. Also avoids channel format conversion
// in the I/O audio unit. Initial tests have shown that it is possible to use
// mono natively for built-in microphones and for BT headsets but not for
// wired headsets. Wired headsets only support stereo as native channel format
// but it is a low cost operation to do a format conversion to mono in the
// audio unit. Hence, we will not hit a RTC_CHECK in
// VerifyAudioParametersForActiveAudioSession() for a mismatch between the
// preferred number of channels and the actual number of channels.
const int kRTCAudioSessionPreferredNumberOfChannels = 1;
// Preferred hardware sample rate (unit is in Hertz). The client sample rate
// will be set to this value as well to avoid resampling the the audio unit's
// format converter. Note that, some devices, e.g. BT headsets, only supports
// 8000Hz as native sample rate.
const double kRTCAudioSessionHighPerformanceSampleRate = 48000.0;
// A lower sample rate will be used for devices with only one core
// (e.g. iPhone 4). The goal is to reduce the CPU load of the application.
const double kRTCAudioSessionLowComplexitySampleRate = 16000.0;
// Use a hardware I/O buffer size (unit is in seconds) that matches the 10ms
// size used by WebRTC. The exact actual size will differ between devices.
// Example: using 48kHz on iPhone 6 results in a native buffer size of
// ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will
// take care of any buffering required to convert between native buffers and
// buffers used by WebRTC. It is beneficial for the performance if the native
// size is as close to 10ms as possible since it results in "clean" callback
// sequence without bursts of callbacks back to back.
const double kRTCAudioSessionHighPerformanceIOBufferDuration = 0.01;
// Use a larger buffer size on devices with only one core (e.g. iPhone 4).
// It will result in a lower CPU consumption at the cost of a larger latency.
// The size of 60ms is based on instrumentation that shows a significant
// reduction in CPU load compared with 10ms on low-end devices.
// TODO(henrika): monitor this size and determine if it should be modified.
const double kRTCAudioSessionLowComplexityIOBufferDuration = 0.06;
@implementation RTCAudioSessionConfiguration
@synthesize category = _category;
@synthesize categoryOptions = _categoryOptions;
@synthesize mode = _mode;
@synthesize sampleRate = _sampleRate;
@synthesize ioBufferDuration = _ioBufferDuration;
@synthesize inputNumberOfChannels = _inputNumberOfChannels;
@synthesize outputNumberOfChannels = _outputNumberOfChannels;
- (instancetype)init {
if (self = [super init]) {
// Use a category which supports simultaneous recording and playback.
// By default, using this category implies that our apps audio is
// nonmixable, hence activating the session will interrupt any other
// audio sessions which are also nonmixable.
_category = AVAudioSessionCategoryPlayAndRecord;
_categoryOptions = AVAudioSessionCategoryOptionAllowBluetooth;
// Specify mode for two-way voice communication (e.g. VoIP).
_mode = AVAudioSessionModeVoiceChat;
// Set the session's sample rate or the hardware sample rate.
// It is essential that we use the same sample rate as stream format
// to ensure that the I/O unit does not have to do sample rate conversion.
// Set the preferred audio I/O buffer duration, in seconds.
NSUInteger processorCount = [NSProcessInfo processInfo].processorCount;
// Use best sample rate and buffer duration if the CPU has more than one
// core.
if (processorCount > 1) {
_sampleRate = kRTCAudioSessionHighPerformanceSampleRate;
_ioBufferDuration = kRTCAudioSessionHighPerformanceIOBufferDuration;
} else {
_sampleRate = kRTCAudioSessionLowComplexitySampleRate;
_ioBufferDuration = kRTCAudioSessionLowComplexityIOBufferDuration;
}
// We try to use mono in both directions to save resources and format
// conversions in the audio unit. Some devices does only support stereo;
// e.g. wired headset on iPhone 6.
// TODO(henrika): add support for stereo if needed.
_inputNumberOfChannels = kRTCAudioSessionPreferredNumberOfChannels;
_outputNumberOfChannels = kRTCAudioSessionPreferredNumberOfChannels;
}
return self;
}
+ (instancetype)currentConfiguration {
RTCAudioSession *session = [RTCAudioSession sharedInstance];
RTCAudioSessionConfiguration *config =
[[RTCAudioSessionConfiguration alloc] init];
config.mode = session.mode;
config.sampleRate = session.sampleRate;
config.ioBufferDuration = session.IOBufferDuration;
config.inputNumberOfChannels = session.inputNumberOfChannels;
config.outputNumberOfChannels = session.outputNumberOfChannels;
return config;
}
+ (instancetype)webRTCConfiguration {
return [[self alloc] init];
}
@end