Support delayed AudioUnit initialization.

Applications can choose to decide when to give up control of the
AVAudioSession to WebRTC. Otherwise, behavior should be
unchanged.

Adds a toggle to AppRTCDemo so developers can see the different
paths.

BUG=
R=haysc@webrtc.org

Review URL: https://codereview.webrtc.org/1822543002 .

Cr-Commit-Position: refs/heads/master@{#12080}
This commit is contained in:
Tze Kwang Chin 2016-03-21 13:57:40 -07:00
parent bc73fe1aad
commit 307a0922c5
16 changed files with 644 additions and 179 deletions

View File

@ -43,12 +43,6 @@
return YES;
}
- (void)applicationWillResignActive:(UIApplication *)application {
ARDMainViewController *viewController =
(ARDMainViewController *)_window.rootViewController;
[viewController applicationWillResignActive:application];
}
- (void)applicationWillTerminate:(UIApplication *)application {
RTCShutdownInternalTracer();
RTCCleanupSSL();

View File

@ -15,9 +15,12 @@
@protocol ARDMainViewDelegate <NSObject>
- (void)mainView:(ARDMainView *)mainView
didInputRoom:(NSString *)room
isLoopback:(BOOL)isLoopback
isAudioOnly:(BOOL)isAudioOnly;
didInputRoom:(NSString *)room
isLoopback:(BOOL)isLoopback
isAudioOnly:(BOOL)isAudioOnly
shouldDelayAudioConfig:(BOOL)shouldDelayAudioConfig;
- (void)mainViewDidToggleAudioLoop:(ARDMainView *)mainView;
@end
@ -26,5 +29,7 @@
@interface ARDMainView : UIView
@property(nonatomic, weak) id<ARDMainViewDelegate> delegate;
// Updates the audio loop button as needed.
@property(nonatomic, assign) BOOL isAudioLoopPlaying;
@end

View File

@ -10,8 +10,6 @@
#import "ARDMainView.h"
#import <AVFoundation/AVFoundation.h>
#import "UIImage+ARDUtilities.h"
// TODO(tkchin): retrieve status bar height dynamically.
@ -124,24 +122,17 @@ static CGFloat const kAppLabelHeight = 20;
UILabel *_audioOnlyLabel;
UISwitch *_loopbackSwitch;
UILabel *_loopbackLabel;
UISwitch *_audioConfigDelaySwitch;
UILabel *_audioConfigDelayLabel;
UIButton *_startCallButton;
UIButton *_audioLoopButton;
AVAudioPlayer *_audioPlayer;
}
@synthesize delegate = _delegate;
@synthesize isAudioLoopPlaying = _isAudioLoopPlaying;
- (instancetype)initWithFrame:(CGRect)frame {
if (self = [super initWithFrame:frame]) {
NSString *audioFilePath =
[[NSBundle mainBundle] pathForResource:@"mozart" ofType:@"mp3"];
NSURL *audioFileURL = [NSURL URLWithString:audioFilePath];
_audioPlayer = [[AVAudioPlayer alloc] initWithContentsOfURL:audioFileURL
error:nil];
_audioPlayer.numberOfLoops = -1;
_audioPlayer.volume = 1.0;
[_audioPlayer prepareToPlay];
_appLabel = [[UILabel alloc] initWithFrame:CGRectZero];
_appLabel.text = @"AppRTCDemo";
_appLabel.font = [UIFont fontWithName:@"Roboto" size:34];
@ -184,6 +175,18 @@ static CGFloat const kAppLabelHeight = 20;
[_loopbackLabel sizeToFit];
[self addSubview:_loopbackLabel];
_audioConfigDelaySwitch = [[UISwitch alloc] initWithFrame:CGRectZero];
[_audioConfigDelaySwitch sizeToFit];
_audioConfigDelaySwitch.on = YES;
[self addSubview:_audioConfigDelaySwitch];
_audioConfigDelayLabel = [[UILabel alloc] initWithFrame:CGRectZero];
_audioConfigDelayLabel.text = @"Delay audio config";
_audioConfigDelayLabel.font = controlFont;
_audioConfigDelayLabel.textColor = controlFontColor;
[_audioConfigDelayLabel sizeToFit];
[self addSubview:_audioConfigDelayLabel];
_startCallButton = [UIButton buttonWithType:UIButtonTypeSystem];
_startCallButton.backgroundColor = [UIColor blueColor];
_startCallButton.layer.cornerRadius = 10;
@ -223,6 +226,14 @@ static CGFloat const kAppLabelHeight = 20;
return self;
}
- (void)setIsAudioLoopPlaying:(BOOL)isAudioLoopPlaying {
if (_isAudioLoopPlaying == isAudioLoopPlaying) {
return;
}
_isAudioLoopPlaying = isAudioLoopPlaying;
[self updateAudioLoopButton];
}
- (void)layoutSubviews {
CGRect bounds = self.bounds;
CGFloat roomTextWidth = bounds.size.width - 2 * kRoomTextFieldMargin;
@ -264,8 +275,22 @@ static CGFloat const kAppLabelHeight = 20;
_loopbackLabel.center = CGPointMake(loopbackModeLabelCenterX,
CGRectGetMidY(loopbackModeRect));
CGFloat audioConfigDelayTop =
CGRectGetMaxY(_loopbackSwitch.frame) + kCallControlMargin;
CGRect audioConfigDelayRect =
CGRectMake(kCallControlMargin * 3,
audioConfigDelayTop,
_audioConfigDelaySwitch.frame.size.width,
_audioConfigDelaySwitch.frame.size.height);
_audioConfigDelaySwitch.frame = audioConfigDelayRect;
CGFloat audioConfigDelayLabelCenterX = CGRectGetMaxX(audioConfigDelayRect) +
kCallControlMargin + _audioConfigDelayLabel.frame.size.width / 2;
_audioConfigDelayLabel.center =
CGPointMake(audioConfigDelayLabelCenterX,
CGRectGetMidY(audioConfigDelayRect));
CGFloat audioLoopTop =
CGRectGetMaxY(loopbackModeRect) + kCallControlMargin * 3;
CGRectGetMaxY(audioConfigDelayRect) + kCallControlMargin * 3;
_audioLoopButton.frame = CGRectMake(kCallControlMargin,
audioLoopTop,
_audioLoopButton.frame.size.width,
@ -282,7 +307,7 @@ static CGFloat const kAppLabelHeight = 20;
#pragma mark - Private
- (void)updateAudioLoopButton {
if (_audioPlayer.playing) {
if (_isAudioLoopPlaying) {
_audioLoopButton.backgroundColor = [UIColor redColor];
[_audioLoopButton setTitle:@"Stop sound"
forState:UIControlStateNormal];
@ -296,12 +321,7 @@ static CGFloat const kAppLabelHeight = 20;
}
- (void)onToggleAudioLoop:(id)sender {
if (_audioPlayer.playing) {
[_audioPlayer stop];
} else {
[_audioPlayer play];
}
[self updateAudioLoopButton];
[_delegate mainViewDidToggleAudioLoop:self];
}
- (void)onStartCall:(id)sender {
@ -312,9 +332,10 @@ static CGFloat const kAppLabelHeight = 20;
}
room = [room stringByReplacingOccurrencesOfString:@"-" withString:@""];
[_delegate mainView:self
didInputRoom:room
isLoopback:_loopbackSwitch.isOn
isAudioOnly:_audioOnlySwitch.isOn];
didInputRoom:room
isLoopback:_loopbackSwitch.isOn
isAudioOnly:_audioOnlySwitch.isOn
shouldDelayAudioConfig:_audioConfigDelaySwitch.isOn];
}
@end

View File

@ -11,7 +11,4 @@
#import <UIKit/UIKit.h>
@interface ARDMainViewController : UIViewController
- (void)applicationWillResignActive:(UIApplication *)application;
@end

View File

@ -10,32 +10,44 @@
#import "ARDMainViewController.h"
#import <AVFoundation/AVFoundation.h>
#import "webrtc/base/objc/RTCDispatcher.h"
#import "webrtc/base/objc/RTCLogging.h"
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSession.h"
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h"
#import "ARDAppClient.h"
#import "ARDMainView.h"
#import "ARDVideoCallViewController.h"
@interface ARDMainViewController () <ARDMainViewDelegate>
@interface ARDMainViewController () <
ARDMainViewDelegate,
RTCAudioSessionDelegate>
@end
@implementation ARDMainViewController
- (void)loadView {
ARDMainView *mainView = [[ARDMainView alloc] initWithFrame:CGRectZero];
mainView.delegate = self;
self.view = mainView;
@implementation ARDMainViewController {
ARDMainView *_mainView;
AVAudioPlayer *_audioPlayer;
BOOL _shouldDelayAudioConfig;
}
- (void)applicationWillResignActive:(UIApplication *)application {
// Terminate any calls when we aren't active.
[self dismissViewControllerAnimated:NO completion:nil];
- (void)loadView {
_mainView = [[ARDMainView alloc] initWithFrame:CGRectZero];
_mainView.delegate = self;
self.view = _mainView;
[self setupAudioSession];
[self setupAudioPlayer];
}
#pragma mark - ARDMainViewDelegate
- (void)mainView:(ARDMainView *)mainView
didInputRoom:(NSString *)room
isLoopback:(BOOL)isLoopback
isAudioOnly:(BOOL)isAudioOnly {
didInputRoom:(NSString *)room
isLoopback:(BOOL)isLoopback
isAudioOnly:(BOOL)isAudioOnly
shouldDelayAudioConfig:(BOOL)shouldDelayAudioConfig {
if (!room.length) {
[self showAlertWithMessage:@"Missing room name."];
return;
@ -65,6 +77,10 @@
return;
}
_shouldDelayAudioConfig = shouldDelayAudioConfig;
RTCAudioSession *session = [RTCAudioSession sharedInstance];
session.shouldDelayAudioConfiguration = _shouldDelayAudioConfig;
// Kick off the video call.
ARDVideoCallViewController *videoCallViewController =
[[ARDVideoCallViewController alloc] initForRoom:trimmedRoom
@ -77,8 +93,82 @@
completion:nil];
}
- (void)mainViewDidToggleAudioLoop:(ARDMainView *)mainView {
if (mainView.isAudioLoopPlaying) {
[_audioPlayer stop];
} else {
[_audioPlayer play];
}
mainView.isAudioLoopPlaying = _audioPlayer.playing;
}
#pragma mark - RTCAudioSessionDelegate
- (void)audioSessionShouldConfigure:(RTCAudioSession *)session {
// Won't get called unless audio config is delayed.
// Stop playback on main queue and then configure WebRTC.
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeMain
block:^{
if (_mainView.isAudioLoopPlaying) {
RTCLog(@"Stopping audio loop due to WebRTC start.");
[_audioPlayer stop];
}
// TODO(tkchin): Shouldn't lock on main queue. Figure out better way to
// check audio loop state.
[session lockForConfiguration];
[session configureWebRTCSession:nil];
[session unlockForConfiguration];
}];
}
- (void)audioSessionShouldUnconfigure:(RTCAudioSession *)session {
// Won't get called unless audio config is delayed.
[session lockForConfiguration];
[session unconfigureWebRTCSession:nil];
[session unlockForConfiguration];
}
- (void)audioSessionDidUnconfigure:(RTCAudioSession *)session {
// WebRTC is done with the audio session. Restart playback.
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeMain
block:^{
if (_mainView.isAudioLoopPlaying) {
RTCLog(@"Starting audio loop due to WebRTC end.");
[_audioPlayer play];
}
}];
}
#pragma mark - Private
- (void)setupAudioSession {
RTCAudioSessionConfiguration *configuration =
[[RTCAudioSessionConfiguration alloc] init];
configuration.category = AVAudioSessionCategoryAmbient;
configuration.categoryOptions = AVAudioSessionCategoryOptionDuckOthers;
configuration.mode = AVAudioSessionModeDefault;
RTCAudioSession *session = [RTCAudioSession sharedInstance];
[session addDelegate:self];
[session lockForConfiguration];
NSError *error = nil;
if (![session setConfiguration:configuration active:YES error:&error]) {
RTCLogError(@"Error setting configuration: %@", error.localizedDescription);
}
[session unlockForConfiguration];
}
- (void)setupAudioPlayer {
NSString *audioFilePath =
[[NSBundle mainBundle] pathForResource:@"mozart" ofType:@"mp3"];
NSURL *audioFileURL = [NSURL URLWithString:audioFilePath];
_audioPlayer = [[AVAudioPlayer alloc] initWithContentsOfURL:audioFileURL
error:nil];
_audioPlayer.numberOfLoops = -1;
_audioPlayer.volume = 1.0;
[_audioPlayer prepareToPlay];
}
- (void)showAlertWithMessage:(NSString*)message {
UIAlertView* alertView = [[UIAlertView alloc] initWithTitle:nil
message:message

View File

@ -162,6 +162,7 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
void OnInterruptionBegin() override;
void OnInterruptionEnd() override;
void OnValidRouteChange() override;
void OnConfiguredForWebRTC() override;
// VoiceProcessingAudioUnitObserver methods.
OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
@ -180,6 +181,7 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
void HandleInterruptionBegin();
void HandleInterruptionEnd();
void HandleValidRouteChange();
void HandleConfiguredForWebRTC();
// Uses current |playout_parameters_| and |record_parameters_| to inform the
// audio device buffer (ADB) about our internal audio parameters.
@ -282,9 +284,6 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
// Audio interruption observer instance.
RTCAudioSessionDelegateAdapter* audio_session_observer_;
// Contains the audio data format specification for a stream of audio.
AudioStreamBasicDescription application_format_;
};
} // namespace webrtc

View File

@ -191,8 +191,11 @@ int32_t AudioDeviceIOS::StartPlayout() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTC_DCHECK(play_is_initialized_);
RTC_DCHECK(!playing_);
fine_audio_buffer_->ResetPlayout();
if (!recording_) {
if (fine_audio_buffer_) {
fine_audio_buffer_->ResetPlayout();
}
if (!recording_ &&
audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
if (!audio_unit_->Start()) {
RTCLogError(@"StartPlayout failed to start audio unit.");
return -1;
@ -222,8 +225,11 @@ int32_t AudioDeviceIOS::StartRecording() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTC_DCHECK(rec_is_initialized_);
RTC_DCHECK(!recording_);
fine_audio_buffer_->ResetRecord();
if (!playing_) {
if (fine_audio_buffer_) {
fine_audio_buffer_->ResetRecord();
}
if (!playing_ &&
audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
if (!audio_unit_->Start()) {
RTCLogError(@"StartRecording failed to start audio unit.");
return -1;
@ -347,6 +353,18 @@ void AudioDeviceIOS::OnValidRouteChange() {
rtc::Bind(&webrtc::AudioDeviceIOS::HandleValidRouteChange, this));
}
void AudioDeviceIOS::OnConfiguredForWebRTC() {
RTC_DCHECK(async_invoker_);
RTC_DCHECK(thread_);
if (thread_->IsCurrent()) {
HandleValidRouteChange();
return;
}
async_invoker_->AsyncInvoke<void>(
thread_,
rtc::Bind(&webrtc::AudioDeviceIOS::HandleConfiguredForWebRTC, this));
}
OSStatus AudioDeviceIOS::OnDeliverRecordedData(
AudioUnitRenderActionFlags* flags,
const AudioTimeStamp* time_stamp,
@ -431,6 +449,7 @@ OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
void AudioDeviceIOS::HandleInterruptionBegin() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTCLog(@"Stopping the audio unit due to interruption begin.");
if (!audio_unit_->Stop()) {
RTCLogError(@"Failed to stop the audio unit.");
@ -440,6 +459,7 @@ void AudioDeviceIOS::HandleInterruptionBegin() {
void AudioDeviceIOS::HandleInterruptionEnd() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTCLog(@"Starting the audio unit due to interruption end.");
if (!audio_unit_->Start()) {
RTCLogError(@"Failed to start the audio unit.");
@ -469,6 +489,39 @@ void AudioDeviceIOS::HandleValidRouteChange() {
}
}
void AudioDeviceIOS::HandleConfiguredForWebRTC() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
// If we're not initialized we don't need to do anything. Audio unit will
// be initialized on initialization.
if (!rec_is_initialized_ && !play_is_initialized_)
return;
// If we're initialized, we must have an audio unit.
RTC_DCHECK(audio_unit_);
// Use configured audio session's settings to set up audio device buffer.
// TODO(tkchin): Use RTCAudioSessionConfiguration to pick up settings and
// pass it along.
SetupAudioBuffersForActiveAudioSession();
// Initialize the audio unit. This will affect any existing audio playback.
if (!audio_unit_->Initialize(playout_parameters_.sample_rate())) {
RTCLogError(@"Failed to initialize audio unit after configuration.");
return;
}
// If we haven't started playing or recording there's nothing more to do.
if (!playing_ && !recording_)
return;
// We are in a play or record state, start the audio unit.
if (!audio_unit_->Start()) {
RTCLogError(@"Failed to start audio unit after configuration.");
return;
}
}
void AudioDeviceIOS::UpdateAudioDeviceBuffer() {
LOGI() << "UpdateAudioDevicebuffer";
// AttachAudioBuffer() is called at construction by the main class but check
@ -603,32 +656,35 @@ bool AudioDeviceIOS::RestartAudioUnit(float sample_rate) {
bool AudioDeviceIOS::InitPlayOrRecord() {
LOGI() << "InitPlayOrRecord";
// Use the correct audio session configuration for WebRTC.
// This will attempt to activate the audio session.
RTCAudioSession* session = [RTCAudioSession sharedInstance];
[session lockForConfiguration];
NSError* error = nil;
if (![session configureWebRTCSession:&error]) {
RTCLogError(@"Failed to configure WebRTC session: %@",
error.localizedDescription);
[session unlockForConfiguration];
if (!CreateAudioUnit()) {
return false;
}
// Start observing audio session interruptions and route changes.
RTCAudioSession* session = [RTCAudioSession sharedInstance];
// Subscribe to audio session events.
[session pushDelegate:audio_session_observer_];
// Ensure that we got what what we asked for in our active audio session.
SetupAudioBuffersForActiveAudioSession();
// Create, setup and initialize a new Voice-Processing I/O unit.
// TODO(tkchin): Delay the initialization when needed.
if (!CreateAudioUnit() ||
!audio_unit_->Initialize(playout_parameters_.sample_rate())) {
[session setActive:NO error:nil];
// Lock the session to make configuration changes.
[session lockForConfiguration];
NSError* error = nil;
if (![session beginWebRTCSession:&error]) {
[session unlockForConfiguration];
RTCLogError(@"Failed to begin WebRTC session: %@",
error.localizedDescription);
return false;
}
// If we are already configured properly, we can initialize the audio unit.
if (session.isConfiguredForWebRTC) {
[session unlockForConfiguration];
SetupAudioBuffersForActiveAudioSession();
// Audio session has been marked ready for WebRTC so we can initialize the
// audio unit now.
audio_unit_->Initialize(playout_parameters_.sample_rate());
return true;
}
// Release the lock.
[session unlockForConfiguration];
return true;
@ -639,8 +695,6 @@ void AudioDeviceIOS::ShutdownPlayOrRecord() {
// Close and delete the voice-processing I/O unit.
if (audio_unit_) {
audio_unit_->Stop();
audio_unit_->Uninitialize();
audio_unit_.reset();
}
@ -651,7 +705,7 @@ void AudioDeviceIOS::ShutdownPlayOrRecord() {
// All I/O should be stopped or paused prior to deactivating the audio
// session, hence we deactivate as last action.
[session lockForConfiguration];
[session setActive:NO error:nil];
[session endWebRTCSession:nil];
[session unlockForConfiguration];
}

View File

@ -28,6 +28,9 @@ class AudioSessionObserver {
// Called when audio route changes.
virtual void OnValidRouteChange() = 0;
// Called when audio session has been configured for WebRTC.
virtual void OnConfiguredForWebRTC() = 0;
protected:
virtual ~AudioSessionObserver() {}
};

View File

@ -16,9 +16,17 @@
@implementation RTCAudioSession (Configuration)
- (BOOL)isConfiguredForWebRTC {
return self.savedConfiguration != nil;
}
- (BOOL)setConfiguration:(RTCAudioSessionConfiguration *)configuration
active:(BOOL)active
error:(NSError **)outError {
NSParameterAssert(configuration);
if (outError) {
*outError = nil;
}
if (![self checkLock:outError]) {
return NO;
}
@ -37,6 +45,8 @@
RTCLogError(@"Failed to set category: %@",
categoryError.localizedDescription);
error = categoryError;
} else {
RTCLog(@"Set category to: %@", configuration.category);
}
}
@ -46,6 +56,8 @@
RTCLogError(@"Failed to set mode: %@",
modeError.localizedDescription);
error = modeError;
} else {
RTCLog(@"Set mode to: %@", configuration.mode);
}
}
@ -57,6 +69,9 @@
RTCLogError(@"Failed to set preferred sample rate: %@",
sampleRateError.localizedDescription);
error = sampleRateError;
} else {
RTCLog(@"Set preferred sample rate to: %.2f",
configuration.sampleRate);
}
}
@ -69,6 +84,9 @@
RTCLogError(@"Failed to set preferred IO buffer duration: %@",
bufferDurationError.localizedDescription);
error = bufferDurationError;
} else {
RTCLog(@"Set preferred IO buffer duration to: %f",
configuration.ioBufferDuration);
}
}
@ -79,7 +97,9 @@
error = activeError;
}
if (self.isActive) {
if (self.isActive &&
// TODO(tkchin): Figure out which category/mode numChannels is valid for.
[self.mode isEqualToString:AVAudioSessionModeVoiceChat]) {
// Try to set the preferred number of hardware audio channels. These calls
// must be done after setting the audio sessions category and mode and
// activating the session.
@ -91,6 +111,9 @@
RTCLogError(@"Failed to set preferred input number of channels: %@",
inputChannelsError.localizedDescription);
error = inputChannelsError;
} else {
RTCLog(@"Set input number of channels to: %ld",
(long)inputNumberOfChannels);
}
}
NSInteger outputNumberOfChannels = configuration.outputNumberOfChannels;
@ -101,6 +124,9 @@
RTCLogError(@"Failed to set preferred output number of channels: %@",
outputChannelsError.localizedDescription);
error = outputChannelsError;
} else {
RTCLog(@"Set output number of channels to: %ld",
(long)outputNumberOfChannels);
}
}
}
@ -113,74 +139,81 @@
}
- (BOOL)configureWebRTCSession:(NSError **)outError {
if (outError) {
*outError = nil;
}
if (![self checkLock:outError]) {
return NO;
}
RTCLog(@"Configuring audio session for WebRTC.");
if (self.isConfiguredForWebRTC) {
RTCLogError(@"Already configured.");
if (outError) {
*outError =
[self configurationErrorWithDescription:@"Already configured."];
}
return NO;
}
// Configure the AVAudioSession and activate it.
// Provide an error even if there isn't one so we can log it.
BOOL hasSucceeded = YES;
NSError *error = nil;
RTCAudioSessionConfiguration *currentConfig =
[RTCAudioSessionConfiguration currentConfiguration];
RTCAudioSessionConfiguration *webRTCConfig =
[RTCAudioSessionConfiguration webRTCConfiguration];
self.savedConfiguration = currentConfig;
if (![self setConfiguration:webRTCConfig active:YES error:&error]) {
RTCLogError(@"Failed to set WebRTC audio configuration: %@",
error.localizedDescription);
// Attempt to restore previous state.
[self setConfiguration:currentConfig active:NO error:nil];
hasSucceeded = NO;
} else if (![self isConfiguredForWebRTC]) {
// Ensure that the active audio session has the correct category and mode.
// This should never happen - this means that we succeeded earlier but
// somehow the settings didn't apply.
RTCLogError(@"Failed to configure audio session.");
// Attempt to restore previous state.
[self setConfiguration:currentConfig active:NO error:nil];
error =
[[NSError alloc] initWithDomain:kRTCAudioSessionErrorDomain
code:kRTCAudioSessionErrorConfiguration
userInfo:nil];
hasSucceeded = NO;
[self unconfigureWebRTCSession:nil];
if (outError) {
*outError = error;
}
return NO;
}
if (outError) {
*outError = error;
}
return hasSucceeded;
}
#pragma mark - Private
- (BOOL)isConfiguredForWebRTC {
// Ensure that the device currently supports audio input.
// TODO(tkchin): Figure out if this is really necessary.
if (!self.inputAvailable) {
RTCLogError(@"No audio input path is available!");
[self unconfigureWebRTCSession:nil];
if (outError) {
*outError = [self configurationErrorWithDescription:@"No input path."];
}
return NO;
}
// Only check a minimal list of requirements for whether we have
// what we want.
RTCAudioSessionConfiguration *currentConfig =
[RTCAudioSessionConfiguration currentConfiguration];
RTCAudioSessionConfiguration *webRTCConfig =
[RTCAudioSessionConfiguration webRTCConfiguration];
// Give delegates a chance to process the event. In particular, the audio
// devices listening to this event will initialize their audio units.
[self notifyDidConfigure];
if (![currentConfig.category isEqualToString:webRTCConfig.category]) {
RTCLog(@"Current category %@ does not match %@",
currentConfig.category,
webRTCConfig.category);
return YES;
}
- (BOOL)unconfigureWebRTCSession:(NSError **)outError {
if (outError) {
*outError = nil;
}
if (![self checkLock:outError]) {
return NO;
}
RTCLog(@"Unconfiguring audio session for WebRTC.");
if (!self.isConfiguredForWebRTC) {
RTCLogError(@"Already unconfigured.");
if (outError) {
*outError =
[self configurationErrorWithDescription:@"Already unconfigured."];
}
return NO;
}
if (![currentConfig.mode isEqualToString:webRTCConfig.mode]) {
RTCLog(@"Current mode %@ does not match %@",
currentConfig.mode,
webRTCConfig.mode);
return NO;
}
[self setConfiguration:self.savedConfiguration active:NO error:outError];
self.savedConfiguration = nil;
[self notifyDidUnconfigure];
return YES;
}

View File

@ -14,12 +14,25 @@
NS_ASSUME_NONNULL_BEGIN
@class RTCAudioSessionConfiguration;
@interface RTCAudioSession ()
/** Number of times setActive:YES has succeeded without a balanced call to
* setActive:NO.
*/
@property(nonatomic, readonly) NSInteger activationCount;
@property(nonatomic, readonly) int activationCount;
/** The number of times |beginWebRTCSession| was called without a balanced call
* to |endWebRTCSession|.
*/
@property(nonatomic, readonly) int webRTCSessionCount;
/** The configuration of the audio session before configureWebRTCSession
* was first called.
*/
@property(nonatomic, strong, nullable)
RTCAudioSessionConfiguration *savedConfiguration;
- (BOOL)checkLock:(NSError **)outError;
@ -29,9 +42,26 @@ NS_ASSUME_NONNULL_BEGIN
*/
- (void)pushDelegate:(id<RTCAudioSessionDelegate>)delegate;
/** Signals RTCAudioSession that a WebRTC session is about to begin and
* audio configuration is needed. Will configure the audio session for WebRTC
* if not already configured and if configuration is not delayed.
* Successful calls must be balanced by a call to endWebRTCSession.
*/
- (BOOL)beginWebRTCSession:(NSError **)outError;
/** Signals RTCAudioSession that a WebRTC session is about to end and audio
* unconfiguration is needed. Will unconfigure the audio session for WebRTC
* if this is the last unmatched call and if configuration is not delayed.
*/
- (BOOL)endWebRTCSession:(NSError **)outError;
/** Returns a configuration error with the given description. */
- (NSError *)configurationErrorWithDescription:(NSString *)description;
// Properties and methods for tests.
@property(nonatomic, readonly)
std::vector<__weak id<RTCAudioSessionDelegate> > delegates;
- (void)notifyDidBeginInterruption;
- (void)notifyDidEndInterruptionWithShouldResumeSession:
(BOOL)shouldResumeSession;
@ -39,6 +69,10 @@ NS_ASSUME_NONNULL_BEGIN
previousRoute:(AVAudioSessionRouteDescription *)previousRoute;
- (void)notifyMediaServicesWereLost;
- (void)notifyMediaServicesWereReset;
- (void)notifyShouldConfigure;
- (void)notifyShouldUnconfigure;
- (void)notifyDidConfigure;
- (void)notifyDidUnconfigure;
@end

View File

@ -27,44 +27,66 @@ extern NSInteger const kRTCAudioSessionErrorConfiguration;
// at which point applications can perform additional processing if required.
@protocol RTCAudioSessionDelegate <NSObject>
/** Called when AVAudioSession starts an interruption event. */
@optional
/** Called on a system notification thread when AVAudioSession starts an
* interruption event.
*/
- (void)audioSessionDidBeginInterruption:(RTCAudioSession *)session;
/** Called when AVAudioSession ends an interruption event. */
/** Called on a system notification thread when AVAudioSession ends an
* interruption event.
*/
- (void)audioSessionDidEndInterruption:(RTCAudioSession *)session
shouldResumeSession:(BOOL)shouldResumeSession;
/** Called when AVAudioSession changes the route. */
/** Called on a system notification thread when AVAudioSession changes the
* route.
*/
- (void)audioSessionDidChangeRoute:(RTCAudioSession *)session
reason:(AVAudioSessionRouteChangeReason)reason
previousRoute:(AVAudioSessionRouteDescription *)previousRoute;
/** Called when AVAudioSession media server terminates. */
/** Called on a system notification thread when AVAudioSession media server
* terminates.
*/
- (void)audioSessionMediaServicesWereLost:(RTCAudioSession *)session;
/** Called when AVAudioSession media server restarts. */
/** Called on a system notification thread when AVAudioSession media server
* restarts.
*/
- (void)audioSessionMediaServicesWereReset:(RTCAudioSession *)session;
/** Called when WebRTC needs to take over audio. Applications should call
* -[RTCAudioSession configure] to allow WebRTC to play and record audio.
* TODO(tkchin): Implement this behavior in RTCAudioSession.
// TODO(tkchin): Maybe handle SilenceSecondaryAudioHintNotification.
/** Called on a WebRTC thread when WebRTC needs to take over audio. Applications
* should call -[RTCAudioSession configureWebRTCSession] to allow WebRTC to
* play and record audio. Will only occur if shouldDelayAudioConfiguration is
* set to YES.
*/
- (void)audioSessionShouldConfigure:(RTCAudioSession *)session;
/** Called when WebRTC no longer requires audio. Applications should restore
* their audio state at this point.
* TODO(tkchin): Implement this behavior in RTCAudioSession.
/** Called on a WebRTC thread when WebRTC no longer requires audio. Applications
* should call -[RTCAudioSession unconfigureWebRTCSession] to restore their
* audio session settings. Will only occur if shouldDelayAudioConfiguration is
* set to YES.
*/
- (void)audioSessionShouldUnconfigure:(RTCAudioSession *)session;
// TODO(tkchin): Maybe handle SilenceSecondaryAudioHintNotification.
/** Called on a WebRTC thread when WebRTC has configured the audio session for
* WebRTC audio.
*/
- (void)audioSessionDidConfigure:(RTCAudioSession *)session;
/** Called on a WebRTC thread when WebRTC has unconfigured the audio session for
* WebRTC audio.
*/
- (void)audioSessionDidUnconfigure:(RTCAudioSession *)session;
@end
/** Proxy class for AVAudioSession that adds a locking mechanism similar to
* AVCaptureDevice. This is used to that interleaving configurations between
* WebRTC and the application layer are avoided. Only setter methods are
* currently proxied. Getters can be accessed directly off AVAudioSession.
* WebRTC and the application layer are avoided.
*
* RTCAudioSession also coordinates activation so that the audio session is
* activated only once. See |setActive:error:|.
@ -87,8 +109,8 @@ extern NSInteger const kRTCAudioSessionErrorConfiguration;
/** If YES, WebRTC will not initialize the audio unit automatically when an
* audio track is ready for playout or recording. Instead, applications should
* listen to the delegate method |audioSessionShouldConfigure| and configure
* the session manually. This should be set before making WebRTC media calls.
* TODO(tkchin): Implement behavior. Currently this just stores a BOOL.
* the session manually. This should be set before making WebRTC media calls
* and should not be changed while a call is active.
*/
@property(nonatomic, assign) BOOL shouldDelayAudioConfiguration;
@ -119,8 +141,9 @@ extern NSInteger const kRTCAudioSessionErrorConfiguration;
@property(readonly) NSTimeInterval outputLatency;
@property(readonly) NSTimeInterval IOBufferDuration;
/** Default constructor. Do not call init. */
/** Default constructor. */
+ (instancetype)sharedInstance;
- (instancetype)init NS_UNAVAILABLE;
/** Adds a delegate, which is held weakly. */
- (void)addDelegate:(id<RTCAudioSessionDelegate>)delegate;
@ -173,6 +196,12 @@ extern NSInteger const kRTCAudioSessionErrorConfiguration;
@interface RTCAudioSession (Configuration)
/** Whether or not |configureWebRTCSession| has been called without a balanced
* call to |unconfigureWebRTCSession|. This is not an indication of whether the
* audio session has the right settings.
*/
@property(readonly) BOOL isConfiguredForWebRTC;
/** Applies the configuration to the current session. Attempts to set all
* properties even if previous ones fail. Only the last error will be
* returned. Also calls setActive with |active|.
@ -182,12 +211,22 @@ extern NSInteger const kRTCAudioSessionErrorConfiguration;
active:(BOOL)active
error:(NSError **)outError;
/** Configure the audio session for WebRTC. On failure, we will attempt to
* restore the previously used audio session configuration.
/** Configure the audio session for WebRTC. This call will fail if the session
* is already configured. On other failures, we will attempt to restore the
* previously used audio session configuration.
* |lockForConfiguration| must be called first.
* Successful calls to configureWebRTCSession must be matched by calls to
* |unconfigureWebRTCSession|.
*/
- (BOOL)configureWebRTCSession:(NSError **)outError;
/** Unconfigures the session for WebRTC. This will attempt to restore the
* audio session to the settings used before |configureWebRTCSession| was
* called.
* |lockForConfiguration| must be called first.
*/
- (BOOL)unconfigureWebRTCSession:(NSError **)outError;
@end
NS_ASSUME_NONNULL_END

View File

@ -10,6 +10,7 @@
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSession.h"
#include "webrtc/base/atomicops.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/modules/audio_device/ios/audio_device_ios.h"
@ -27,20 +28,22 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
@implementation RTCAudioSession {
rtc::CriticalSection _crit;
AVAudioSession *_session;
NSInteger _activationCount;
NSInteger _lockRecursionCount;
volatile int _activationCount;
volatile int _lockRecursionCount;
volatile int _webRTCSessionCount;
BOOL _isActive;
BOOL _shouldDelayAudioConfiguration;
}
@synthesize session = _session;
@synthesize delegates = _delegates;
@synthesize savedConfiguration = _savedConfiguration;
+ (instancetype)sharedInstance {
static dispatch_once_t onceToken;
static RTCAudioSession *sharedInstance = nil;
dispatch_once(&onceToken, ^{
sharedInstance = [[RTCAudioSession alloc] init];
sharedInstance = [[self alloc] init];
});
return sharedInstance;
}
@ -106,13 +109,13 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
}
- (BOOL)isLocked {
@synchronized(self) {
return _lockRecursionCount > 0;
}
return _lockRecursionCount > 0;
}
- (void)setShouldDelayAudioConfiguration:(BOOL)shouldDelayAudioConfiguration {
@synchronized(self) {
// No one should be changing this while an audio device is active.
RTC_DCHECK(!self.isConfiguredForWebRTC);
if (_shouldDelayAudioConfiguration == shouldDelayAudioConfiguration) {
return;
}
@ -126,6 +129,7 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
}
}
// TODO(tkchin): Check for duplicates.
- (void)addDelegate:(id<RTCAudioSessionDelegate>)delegate {
if (!delegate) {
return;
@ -150,18 +154,14 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
- (void)lockForConfiguration {
_crit.Enter();
@synchronized(self) {
++_lockRecursionCount;
}
rtc::AtomicOps::Increment(&_lockRecursionCount);
}
- (void)unlockForConfiguration {
// Don't let threads other than the one that called lockForConfiguration
// unlock.
if (_crit.TryEnter()) {
@synchronized(self) {
--_lockRecursionCount;
}
rtc::AtomicOps::Decrement(&_lockRecursionCount);
// One unlock for the tryLock, and another one to actually unlock. If this
// was called without anyone calling lock, we will hit an assertion.
_crit.Leave();
@ -262,7 +262,7 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
if (![self checkLock:outError]) {
return NO;
}
NSInteger activationCount = self.activationCount;
int activationCount = _activationCount;
if (!active && activationCount == 0) {
RTCLogWarning(@"Attempting to deactivate without prior activation.");
}
@ -304,7 +304,7 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
if (!active) {
[self decrementActivationCount];
}
RTCLog(@"Number of current activations: %ld", (long)self.activationCount);
RTCLog(@"Number of current activations: %d", _activationCount);
return success;
}
@ -496,6 +496,22 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
}
}
- (void)setSavedConfiguration:(RTCAudioSessionConfiguration *)configuration {
@synchronized(self) {
if (_savedConfiguration == configuration) {
return;
}
_savedConfiguration = configuration;
}
}
- (RTCAudioSessionConfiguration *)savedConfiguration {
@synchronized(self) {
return _savedConfiguration;
}
}
// TODO(tkchin): check for duplicates.
- (void)pushDelegate:(id<RTCAudioSessionDelegate>)delegate {
@synchronized(self) {
_delegates.insert(_delegates.begin(), delegate);
@ -512,24 +528,22 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
}
}
- (NSInteger)activationCount {
@synchronized(self) {
return _activationCount;
}
- (int)activationCount {
return _activationCount;
}
- (NSInteger)incrementActivationCount {
- (int)incrementActivationCount {
RTCLog(@"Incrementing activation count.");
@synchronized(self) {
return ++_activationCount;
}
return rtc::AtomicOps::Increment(&_activationCount);
}
- (NSInteger)decrementActivationCount {
RTCLog(@"Decrementing activation count.");
@synchronized(self) {
return --_activationCount;
}
return rtc::AtomicOps::Decrement(&_activationCount);
}
- (int)webRTCSessionCount {
return _webRTCSessionCount;
}
- (BOOL)checkLock:(NSError **)outError {
@ -544,6 +558,99 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
return YES;
}
- (BOOL)beginWebRTCSession:(NSError **)outError {
if (outError) {
*outError = nil;
}
if (![self checkLock:outError]) {
return NO;
}
NSInteger sessionCount = rtc::AtomicOps::Increment(&_webRTCSessionCount);
if (sessionCount > 1) {
// Should already be configured.
RTC_DCHECK(self.isConfiguredForWebRTC);
return YES;
}
// Only perform configuration steps once. Application might have already
// configured the session.
if (self.isConfiguredForWebRTC) {
// Nothing more to do, already configured.
return YES;
}
// If application has prevented automatic configuration, return here and wait
// for application to call configureWebRTCSession.
if (self.shouldDelayAudioConfiguration) {
[self notifyShouldConfigure];
return YES;
}
// Configure audio session.
NSError *error = nil;
if (![self configureWebRTCSession:&error]) {
RTCLogError(@"Error configuring audio session: %@",
error.localizedDescription);
if (outError) {
*outError = error;
}
return NO;
}
return YES;
}
- (BOOL)endWebRTCSession:(NSError **)outError {
if (outError) {
*outError = nil;
}
if (![self checkLock:outError]) {
return NO;
}
int sessionCount = rtc::AtomicOps::Decrement(&_webRTCSessionCount);
RTC_DCHECK_GE(sessionCount, 0);
if (sessionCount != 0) {
// Should still be configured.
RTC_DCHECK(self.isConfiguredForWebRTC);
return YES;
}
// Only unconfigure if application has not done it.
if (!self.isConfiguredForWebRTC) {
// Nothing more to do, already unconfigured.
return YES;
}
// If application has prevented automatic configuration, return here and wait
// for application to call unconfigureWebRTCSession.
if (self.shouldDelayAudioConfiguration) {
[self notifyShouldUnconfigure];
return YES;
}
// Unconfigure audio session.
NSError *error = nil;
if (![self unconfigureWebRTCSession:&error]) {
RTCLogError(@"Error unconfiguring audio session: %@",
error.localizedDescription);
if (outError) {
*outError = error;
}
return NO;
}
return YES;
}
- (NSError *)configurationErrorWithDescription:(NSString *)description {
NSDictionary* userInfo = @{
NSLocalizedDescriptionKey: description,
};
return [[NSError alloc] initWithDomain:kRTCAudioSessionErrorDomain
code:kRTCAudioSessionErrorConfiguration
userInfo:userInfo];
}
- (void)updateAudioSessionAfterEvent {
BOOL shouldActivate = self.activationCount > 0;
AVAudioSessionSetActiveOptions options = shouldActivate ?
@ -561,37 +668,87 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
- (void)notifyDidBeginInterruption {
for (auto delegate : self.delegates) {
[delegate audioSessionDidBeginInterruption:self];
SEL sel = @selector(audioSessionDidBeginInterruption:);
if ([delegate respondsToSelector:sel]) {
[delegate audioSessionDidBeginInterruption:self];
}
}
}
- (void)notifyDidEndInterruptionWithShouldResumeSession:
(BOOL)shouldResumeSession {
for (auto delegate : self.delegates) {
[delegate audioSessionDidEndInterruption:self
shouldResumeSession:shouldResumeSession];
SEL sel = @selector(audioSessionDidEndInterruption:shouldResumeSession:);
if ([delegate respondsToSelector:sel]) {
[delegate audioSessionDidEndInterruption:self
shouldResumeSession:shouldResumeSession];
}
}
}
- (void)notifyDidChangeRouteWithReason:(AVAudioSessionRouteChangeReason)reason
previousRoute:(AVAudioSessionRouteDescription *)previousRoute {
for (auto delegate : self.delegates) {
[delegate audioSessionDidChangeRoute:self
reason:reason
previousRoute:previousRoute];
SEL sel = @selector(audioSessionDidChangeRoute:reason:previousRoute:);
if ([delegate respondsToSelector:sel]) {
[delegate audioSessionDidChangeRoute:self
reason:reason
previousRoute:previousRoute];
}
}
}
- (void)notifyMediaServicesWereLost {
for (auto delegate : self.delegates) {
[delegate audioSessionMediaServicesWereLost:self];
SEL sel = @selector(audioSessionMediaServicesWereLost:);
if ([delegate respondsToSelector:sel]) {
[delegate audioSessionMediaServicesWereLost:self];
}
}
}
- (void)notifyMediaServicesWereReset {
for (auto delegate : self.delegates) {
[delegate audioSessionMediaServicesWereReset:self];
SEL sel = @selector(audioSessionMediaServicesWereReset:);
if ([delegate respondsToSelector:sel]) {
[delegate audioSessionMediaServicesWereReset:self];
}
}
}
- (void)notifyShouldConfigure {
for (auto delegate : self.delegates) {
SEL sel = @selector(audioSessionShouldConfigure:);
if ([delegate respondsToSelector:sel]) {
[delegate audioSessionShouldConfigure:self];
}
}
}
- (void)notifyShouldUnconfigure {
for (auto delegate : self.delegates) {
SEL sel = @selector(audioSessionShouldUnconfigure:);
if ([delegate respondsToSelector:sel]) {
[delegate audioSessionShouldUnconfigure:self];
}
}
}
- (void)notifyDidConfigure {
for (auto delegate : self.delegates) {
SEL sel = @selector(audioSessionDidConfigure:);
if ([delegate respondsToSelector:sel]) {
[delegate audioSessionDidConfigure:self];
}
}
}
- (void)notifyDidUnconfigure {
for (auto delegate : self.delegates) {
SEL sel = @selector(audioSessionDidUnconfigure:);
if ([delegate respondsToSelector:sel]) {
[delegate audioSessionDidUnconfigure:self];
}
}
}

View File

@ -100,6 +100,8 @@ const double kRTCAudioSessionLowComplexityIOBufferDuration = 0.06;
RTCAudioSession *session = [RTCAudioSession sharedInstance];
RTCAudioSessionConfiguration *config =
[[RTCAudioSessionConfiguration alloc] init];
config.category = session.category;
config.categoryOptions = session.categoryOptions;
config.mode = session.mode;
config.sampleRate = session.sampleRate;
config.ioBufferDuration = session.IOBufferDuration;

View File

@ -76,4 +76,8 @@
- (void)audioSessionShouldUnconfigure:(RTCAudioSession *)session {
}
- (void)audioSessionDidConfigure:(RTCAudioSession *)session {
_observer->OnConfiguredForWebRTC();
}
@end

View File

@ -49,6 +49,16 @@ class VoiceProcessingAudioUnit {
~VoiceProcessingAudioUnit();
// TODO(tkchin): enum for state and state checking.
enum State : int32_t {
// Init() should be called.
kInitRequired,
// Audio unit created but not initialized.
kUninitialized,
// Initialized but not started. Equivalent to stopped.
kInitialized,
// Initialized and started.
kStarted,
};
// Number of bytes per audio sample for 16-bit signed integer representation.
static const UInt32 kBytesPerSample;
@ -60,6 +70,8 @@ class VoiceProcessingAudioUnit {
// Does not intialize the audio unit.
bool Init();
VoiceProcessingAudioUnit::State GetState() const;
// Initializes the underlying audio unit with the given sample rate.
bool Initialize(Float64 sample_rate);
@ -118,6 +130,7 @@ class VoiceProcessingAudioUnit {
VoiceProcessingAudioUnitObserver* observer_;
AudioUnit vpio_unit_;
VoiceProcessingAudioUnit::State state_;
};
} // namespace webrtc

View File

@ -56,7 +56,7 @@ static const AudioUnitElement kOutputBus = 0;
VoiceProcessingAudioUnit::VoiceProcessingAudioUnit(
VoiceProcessingAudioUnitObserver* observer)
: observer_(observer), vpio_unit_(nullptr) {
: observer_(observer), vpio_unit_(nullptr), state_(kInitRequired) {
RTC_DCHECK(observer);
}
@ -67,7 +67,7 @@ VoiceProcessingAudioUnit::~VoiceProcessingAudioUnit() {
const UInt32 VoiceProcessingAudioUnit::kBytesPerSample = 2;
bool VoiceProcessingAudioUnit::Init() {
RTC_DCHECK(!vpio_unit_) << "Already called Init().";
RTC_DCHECK_EQ(state_, kInitRequired);
// Create an audio component description to identify the Voice Processing
// I/O audio unit.
@ -165,11 +165,16 @@ bool VoiceProcessingAudioUnit::Init() {
return false;
}
state_ = kUninitialized;
return true;
}
VoiceProcessingAudioUnit::State VoiceProcessingAudioUnit::GetState() const {
return state_;
}
bool VoiceProcessingAudioUnit::Initialize(Float64 sample_rate) {
RTC_DCHECK(vpio_unit_) << "Init() not called.";
RTC_DCHECK_GE(state_, kUninitialized);
RTCLog(@"Initializing audio unit.");
OSStatus result = noErr;
@ -224,11 +229,12 @@ bool VoiceProcessingAudioUnit::Initialize(Float64 sample_rate) {
result = AudioUnitInitialize(vpio_unit_);
}
RTCLog(@"Voice Processing I/O unit is now initialized.");
state_ = kInitialized;
return true;
}
bool VoiceProcessingAudioUnit::Start() {
RTC_DCHECK(vpio_unit_) << "Init() not called.";
RTC_DCHECK_GE(state_, kUninitialized);
RTCLog(@"Starting audio unit.");
OSStatus result = AudioOutputUnitStart(vpio_unit_);
@ -236,11 +242,12 @@ bool VoiceProcessingAudioUnit::Start() {
RTCLogError(@"Failed to start audio unit. Error=%ld", (long)result);
return false;
}
state_ = kStarted;
return true;
}
bool VoiceProcessingAudioUnit::Stop() {
RTC_DCHECK(vpio_unit_) << "Init() not called.";
RTC_DCHECK_GE(state_, kUninitialized);
RTCLog(@"Stopping audio unit.");
OSStatus result = AudioOutputUnitStop(vpio_unit_);
@ -248,11 +255,12 @@ bool VoiceProcessingAudioUnit::Stop() {
RTCLogError(@"Failed to stop audio unit. Error=%ld", (long)result);
return false;
}
state_ = kInitialized;
return true;
}
bool VoiceProcessingAudioUnit::Uninitialize() {
RTC_DCHECK(vpio_unit_) << "Init() not called.";
RTC_DCHECK_GE(state_, kUninitialized);
RTCLog(@"Unintializing audio unit.");
OSStatus result = AudioUnitUninitialize(vpio_unit_);
@ -347,6 +355,18 @@ AudioStreamBasicDescription VoiceProcessingAudioUnit::GetFormat(
void VoiceProcessingAudioUnit::DisposeAudioUnit() {
if (vpio_unit_) {
switch (state_) {
case kStarted:
Stop();
// Fall through.
case kInitialized:
Uninitialize();
break;
case kUninitialized:
case kInitRequired:
break;
}
OSStatus result = AudioComponentInstanceDispose(vpio_unit_);
if (result != noErr) {
RTCLogError(@"AudioComponentInstanceDispose failed. Error=%ld.",