Provide isAudioEnabled flag to control audio unit.

- Also removes async invoker usage in favor of thread posting

BUG=

Review-Url: https://codereview.webrtc.org/1945563003
Cr-Commit-Position: refs/heads/master@{#12651}
This commit is contained in:
tkchin 2016-05-06 18:54:15 -07:00 committed by Commit bot
parent 8f65cdf22b
commit d251196d37
16 changed files with 595 additions and 440 deletions

View File

@ -15,10 +15,10 @@
@protocol ARDMainViewDelegate <NSObject>
- (void)mainView:(ARDMainView *)mainView
didInputRoom:(NSString *)room
isLoopback:(BOOL)isLoopback
isAudioOnly:(BOOL)isAudioOnly
shouldDelayAudioConfig:(BOOL)shouldDelayAudioConfig;
didInputRoom:(NSString *)room
isLoopback:(BOOL)isLoopback
isAudioOnly:(BOOL)isAudioOnly
useManualAudio:(BOOL)useManualAudio;
- (void)mainViewDidToggleAudioLoop:(ARDMainView *)mainView;

View File

@ -122,8 +122,8 @@ static CGFloat const kAppLabelHeight = 20;
UILabel *_audioOnlyLabel;
UISwitch *_loopbackSwitch;
UILabel *_loopbackLabel;
UISwitch *_audioConfigDelaySwitch;
UILabel *_audioConfigDelayLabel;
UISwitch *_useManualAudioSwitch;
UILabel *_useManualAudioLabel;
UIButton *_startCallButton;
UIButton *_audioLoopButton;
}
@ -175,17 +175,17 @@ static CGFloat const kAppLabelHeight = 20;
[_loopbackLabel sizeToFit];
[self addSubview:_loopbackLabel];
_audioConfigDelaySwitch = [[UISwitch alloc] initWithFrame:CGRectZero];
[_audioConfigDelaySwitch sizeToFit];
_audioConfigDelaySwitch.on = YES;
[self addSubview:_audioConfigDelaySwitch];
_useManualAudioSwitch = [[UISwitch alloc] initWithFrame:CGRectZero];
[_useManualAudioSwitch sizeToFit];
_useManualAudioSwitch.on = YES;
[self addSubview:_useManualAudioSwitch];
_audioConfigDelayLabel = [[UILabel alloc] initWithFrame:CGRectZero];
_audioConfigDelayLabel.text = @"Delay audio config";
_audioConfigDelayLabel.font = controlFont;
_audioConfigDelayLabel.textColor = controlFontColor;
[_audioConfigDelayLabel sizeToFit];
[self addSubview:_audioConfigDelayLabel];
_useManualAudioLabel = [[UILabel alloc] initWithFrame:CGRectZero];
_useManualAudioLabel.text = @"Use manual audio config";
_useManualAudioLabel.font = controlFont;
_useManualAudioLabel.textColor = controlFontColor;
[_useManualAudioLabel sizeToFit];
[self addSubview:_useManualAudioLabel];
_startCallButton = [UIButton buttonWithType:UIButtonTypeSystem];
_startCallButton.backgroundColor = [UIColor blueColor];
@ -275,22 +275,22 @@ static CGFloat const kAppLabelHeight = 20;
_loopbackLabel.center = CGPointMake(loopbackModeLabelCenterX,
CGRectGetMidY(loopbackModeRect));
CGFloat audioConfigDelayTop =
CGFloat useManualAudioTop =
CGRectGetMaxY(_loopbackSwitch.frame) + kCallControlMargin;
CGRect audioConfigDelayRect =
CGRect useManualAudioRect =
CGRectMake(kCallControlMargin * 3,
audioConfigDelayTop,
_audioConfigDelaySwitch.frame.size.width,
_audioConfigDelaySwitch.frame.size.height);
_audioConfigDelaySwitch.frame = audioConfigDelayRect;
CGFloat audioConfigDelayLabelCenterX = CGRectGetMaxX(audioConfigDelayRect) +
kCallControlMargin + _audioConfigDelayLabel.frame.size.width / 2;
_audioConfigDelayLabel.center =
CGPointMake(audioConfigDelayLabelCenterX,
CGRectGetMidY(audioConfigDelayRect));
useManualAudioTop,
_useManualAudioSwitch.frame.size.width,
_useManualAudioSwitch.frame.size.height);
_useManualAudioSwitch.frame = useManualAudioRect;
CGFloat useManualAudioLabelCenterX = CGRectGetMaxX(useManualAudioRect) +
kCallControlMargin + _useManualAudioLabel.frame.size.width / 2;
_useManualAudioLabel.center =
CGPointMake(useManualAudioLabelCenterX,
CGRectGetMidY(useManualAudioRect));
CGFloat audioLoopTop =
CGRectGetMaxY(audioConfigDelayRect) + kCallControlMargin * 3;
CGRectGetMaxY(useManualAudioRect) + kCallControlMargin * 3;
_audioLoopButton.frame = CGRectMake(kCallControlMargin,
audioLoopTop,
_audioLoopButton.frame.size.width,
@ -335,7 +335,7 @@ static CGFloat const kAppLabelHeight = 20;
didInputRoom:room
isLoopback:_loopbackSwitch.isOn
isAudioOnly:_audioOnlySwitch.isOn
shouldDelayAudioConfig:_audioConfigDelaySwitch.isOn];
useManualAudio:_useManualAudioSwitch.isOn];
}
@end

View File

@ -23,13 +23,14 @@
@interface ARDMainViewController () <
ARDMainViewDelegate,
ARDVideoCallViewControllerDelegate,
RTCAudioSessionDelegate>
@end
@implementation ARDMainViewController {
ARDMainView *_mainView;
AVAudioPlayer *_audioPlayer;
BOOL _shouldDelayAudioConfig;
BOOL _useManualAudio;
}
- (void)loadView {
@ -37,17 +38,26 @@
_mainView.delegate = self;
self.view = _mainView;
[self setupAudioSession];
RTCAudioSessionConfiguration *webRTCConfig =
[RTCAudioSessionConfiguration webRTCConfiguration];
webRTCConfig.categoryOptions = webRTCConfig.categoryOptions |
AVAudioSessionCategoryOptionDefaultToSpeaker;
[RTCAudioSessionConfiguration setWebRTCConfiguration:webRTCConfig];
RTCAudioSession *session = [RTCAudioSession sharedInstance];
[session addDelegate:self];
[self configureAudioSession];
[self setupAudioPlayer];
}
#pragma mark - ARDMainViewDelegate
- (void)mainView:(ARDMainView *)mainView
didInputRoom:(NSString *)room
isLoopback:(BOOL)isLoopback
isAudioOnly:(BOOL)isAudioOnly
shouldDelayAudioConfig:(BOOL)shouldDelayAudioConfig {
didInputRoom:(NSString *)room
isLoopback:(BOOL)isLoopback
isAudioOnly:(BOOL)isAudioOnly
useManualAudio:(BOOL)useManualAudio {
if (!room.length) {
[self showAlertWithMessage:@"Missing room name."];
return;
@ -77,15 +87,16 @@
return;
}
_shouldDelayAudioConfig = shouldDelayAudioConfig;
RTCAudioSession *session = [RTCAudioSession sharedInstance];
session.shouldDelayAudioConfiguration = _shouldDelayAudioConfig;
session.useManualAudio = useManualAudio;
session.isAudioEnabled = NO;
// Kick off the video call.
ARDVideoCallViewController *videoCallViewController =
[[ARDVideoCallViewController alloc] initForRoom:trimmedRoom
isLoopback:isLoopback
isAudioOnly:isAudioOnly];
isAudioOnly:isAudioOnly
delegate:self];
videoCallViewController.modalTransitionStyle =
UIModalTransitionStyleCrossDissolve;
[self presentViewController:videoCallViewController
@ -102,10 +113,22 @@
mainView.isAudioLoopPlaying = _audioPlayer.playing;
}
#pragma mark - ARDVideoCallViewControllerDelegate
- (void)viewControllerDidFinish:(ARDVideoCallViewController *)viewController {
if (![viewController isBeingDismissed]) {
RTCLog(@"Dismissing VC");
[self dismissViewControllerAnimated:YES completion:^{
[self restartAudioPlayerIfNeeded];
}];
}
RTCAudioSession *session = [RTCAudioSession sharedInstance];
session.isAudioEnabled = NO;
}
#pragma mark - RTCAudioSessionDelegate
- (void)audioSessionShouldConfigure:(RTCAudioSession *)session {
// Won't get called unless audio config is delayed.
- (void)audioSessionDidStartPlayOrRecord:(RTCAudioSession *)session {
// Stop playback on main queue and then configure WebRTC.
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeMain
block:^{
@ -113,35 +136,23 @@
RTCLog(@"Stopping audio loop due to WebRTC start.");
[_audioPlayer stop];
}
// TODO(tkchin): Shouldn't lock on main queue. Figure out better way to
// check audio loop state.
[session lockForConfiguration];
[session configureWebRTCSession:nil];
[session unlockForConfiguration];
RTCLog(@"Setting isAudioEnabled to YES.");
session.isAudioEnabled = YES;
}];
}
- (void)audioSessionShouldUnconfigure:(RTCAudioSession *)session {
// Won't get called unless audio config is delayed.
[session lockForConfiguration];
[session unconfigureWebRTCSession:nil];
[session unlockForConfiguration];
}
- (void)audioSessionDidUnconfigure:(RTCAudioSession *)session {
- (void)audioSessionDidStopPlayOrRecord:(RTCAudioSession *)session {
// WebRTC is done with the audio session. Restart playback.
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeMain
block:^{
if (_mainView.isAudioLoopPlaying) {
RTCLog(@"Starting audio loop due to WebRTC end.");
[_audioPlayer play];
}
RTCLog(@"audioSessionDidStopPlayOrRecord");
[self restartAudioPlayerIfNeeded];
}];
}
#pragma mark - Private
- (void)setupAudioSession {
- (void)configureAudioSession {
RTCAudioSessionConfiguration *configuration =
[[RTCAudioSessionConfiguration alloc] init];
configuration.category = AVAudioSessionCategoryAmbient;
@ -149,10 +160,17 @@
configuration.mode = AVAudioSessionModeDefault;
RTCAudioSession *session = [RTCAudioSession sharedInstance];
[session addDelegate:self];
[session lockForConfiguration];
BOOL hasSucceeded = NO;
NSError *error = nil;
if (![session setConfiguration:configuration active:YES error:&error]) {
if (session.isActive) {
hasSucceeded = [session setConfiguration:configuration error:&error];
} else {
hasSucceeded = [session setConfiguration:configuration
active:YES
error:&error];
}
if (!hasSucceeded) {
RTCLogError(@"Error setting configuration: %@", error.localizedDescription);
}
[session unlockForConfiguration];
@ -169,6 +187,14 @@
[_audioPlayer prepareToPlay];
}
- (void)restartAudioPlayerIfNeeded {
if (_mainView.isAudioLoopPlaying && !self.presentedViewController) {
RTCLog(@"Starting audio loop due to WebRTC end.");
[self configureAudioSession];
[_audioPlayer play];
}
}
- (void)showAlertWithMessage:(NSString*)message {
UIAlertView* alertView = [[UIAlertView alloc] initWithTitle:nil
message:message

View File

@ -10,10 +10,20 @@
#import <UIKit/UIKit.h>
@class ARDVideoCallViewController;
@protocol ARDVideoCallViewControllerDelegate <NSObject>
- (void)viewControllerDidFinish:(ARDVideoCallViewController *)viewController;
@end
@interface ARDVideoCallViewController : UIViewController
@property(nonatomic, weak) id<ARDVideoCallViewControllerDelegate> delegate;
- (instancetype)initForRoom:(NSString *)room
isLoopback:(BOOL)isLoopback
isAudioOnly:(BOOL)isAudioOnly;
isAudioOnly:(BOOL)isAudioOnly
delegate:(id<ARDVideoCallViewControllerDelegate>)delegate;
@end

View File

@ -34,11 +34,14 @@
}
@synthesize videoCallView = _videoCallView;
@synthesize delegate = _delegate;
- (instancetype)initForRoom:(NSString *)room
isLoopback:(BOOL)isLoopback
isAudioOnly:(BOOL)isAudioOnly {
isAudioOnly:(BOOL)isAudioOnly
delegate:(id<ARDVideoCallViewControllerDelegate>)delegate {
if (self = [super init]) {
_delegate = delegate;
_client = [[ARDAppClient alloc] initWithDelegate:self];
[_client connectToRoomWithId:room
isLoopback:isLoopback
@ -177,10 +180,7 @@
self.remoteVideoTrack = nil;
self.localVideoTrack = nil;
[_client disconnect];
if (![self isBeingDismissed]) {
[self.presentingViewController dismissViewControllerAnimated:YES
completion:nil];
}
[_delegate viewControllerDidFinish:self];
}
- (void)switchCamera {

View File

@ -14,7 +14,6 @@
#include <memory>
#include "WebRTC/RTCMacros.h"
#include "webrtc/base/asyncinvoker.h"
#include "webrtc/base/thread.h"
#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
@ -42,7 +41,8 @@ class FineAudioBuffer;
// same thread.
class AudioDeviceIOS : public AudioDeviceGeneric,
public AudioSessionObserver,
public VoiceProcessingAudioUnitObserver {
public VoiceProcessingAudioUnitObserver,
public rtc::MessageHandler {
public:
AudioDeviceIOS();
~AudioDeviceIOS();
@ -162,7 +162,7 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
void OnInterruptionBegin() override;
void OnInterruptionEnd() override;
void OnValidRouteChange() override;
void OnConfiguredForWebRTC() override;
void OnCanPlayOrRecordChange(bool can_play_or_record) override;
// VoiceProcessingAudioUnitObserver methods.
OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
@ -176,12 +176,16 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
UInt32 num_frames,
AudioBufferList* io_data) override;
// Handles messages from posts.
void OnMessage(rtc::Message *msg) override;
private:
// Called by the relevant AudioSessionObserver methods on |thread_|.
void HandleInterruptionBegin();
void HandleInterruptionEnd();
void HandleValidRouteChange();
void HandleConfiguredForWebRTC();
void HandleCanPlayOrRecordChange(bool can_play_or_record);
void HandleSampleRateChange(float sample_rate);
// Uses current |playout_parameters_| and |record_parameters_| to inform the
// audio device buffer (ADB) about our internal audio parameters.
@ -197,9 +201,13 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
// Creates the audio unit.
bool CreateAudioUnit();
// Restarts active audio streams using a new sample rate. Required when e.g.
// a BT headset is enabled or disabled.
bool RestartAudioUnit(float sample_rate);
// Updates the audio unit state based on current state.
void UpdateAudioUnit(bool can_play_or_record);
// Configures the audio session for WebRTC.
void ConfigureAudioSession();
// Unconfigures the audio session.
void UnconfigureAudioSession();
// Activates our audio session, creates and initializes the voice-processing
// audio unit and verifies that we got the preferred native audio parameters.
@ -213,8 +221,6 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
rtc::ThreadChecker thread_checker_;
// Thread that this object is created on.
rtc::Thread* thread_;
// Invoker used to execute methods on thread_.
std::unique_ptr<rtc::AsyncInvoker> async_invoker_;
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
// AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
@ -284,6 +290,9 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
// Audio interruption observer instance.
RTCAudioSessionDelegateAdapter* audio_session_observer_;
// Set to true if we've activated the audio session.
bool has_configured_session_;
};
} // namespace webrtc

View File

@ -61,6 +61,13 @@ namespace webrtc {
const UInt16 kFixedPlayoutDelayEstimate = 30;
const UInt16 kFixedRecordDelayEstimate = 30;
enum AudioDeviceMessageType : uint32_t {
kMessageTypeInterruptionBegin,
kMessageTypeInterruptionEnd,
kMessageTypeValidRouteChange,
kMessageTypeCanPlayOrRecordChange,
};
using ios::CheckAndLogError;
#if !defined(NDEBUG)
@ -85,15 +92,15 @@ static void LogDeviceInfo() {
#endif // !defined(NDEBUG)
AudioDeviceIOS::AudioDeviceIOS()
: async_invoker_(new rtc::AsyncInvoker()),
audio_device_buffer_(nullptr),
: audio_device_buffer_(nullptr),
audio_unit_(nullptr),
recording_(0),
playing_(0),
initialized_(false),
rec_is_initialized_(false),
play_is_initialized_(false),
is_interrupted_(false) {
is_interrupted_(false),
has_configured_session_(false) {
LOGI() << "ctor" << ios::GetCurrentThreadDescription();
thread_ = rtc::Thread::Current();
audio_session_observer_ =
@ -191,6 +198,7 @@ int32_t AudioDeviceIOS::StartPlayout() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTC_DCHECK(play_is_initialized_);
RTC_DCHECK(!playing_);
RTC_DCHECK(audio_unit_);
if (fine_audio_buffer_) {
fine_audio_buffer_->ResetPlayout();
}
@ -209,7 +217,11 @@ int32_t AudioDeviceIOS::StartPlayout() {
int32_t AudioDeviceIOS::StopPlayout() {
LOGI() << "StopPlayout";
RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (!play_is_initialized_ || !playing_) {
if (!play_is_initialized_) {
return 0;
}
if (!playing_) {
play_is_initialized_ = false;
return 0;
}
if (!recording_) {
@ -225,6 +237,7 @@ int32_t AudioDeviceIOS::StartRecording() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTC_DCHECK(rec_is_initialized_);
RTC_DCHECK(!recording_);
RTC_DCHECK(audio_unit_);
if (fine_audio_buffer_) {
fine_audio_buffer_->ResetRecord();
}
@ -243,7 +256,11 @@ int32_t AudioDeviceIOS::StartRecording() {
int32_t AudioDeviceIOS::StopRecording() {
LOGI() << "StopRecording";
RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (!rec_is_initialized_ || !recording_) {
if (!rec_is_initialized_) {
return 0;
}
if (!recording_) {
rec_is_initialized_ = false;
return 0;
}
if (!playing_) {
@ -318,51 +335,24 @@ int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
}
void AudioDeviceIOS::OnInterruptionBegin() {
RTC_DCHECK(async_invoker_);
RTC_DCHECK(thread_);
if (thread_->IsCurrent()) {
HandleInterruptionBegin();
return;
}
async_invoker_->AsyncInvoke<void>(
thread_,
rtc::Bind(&webrtc::AudioDeviceIOS::HandleInterruptionBegin, this));
thread_->Post(this, kMessageTypeInterruptionBegin);
}
void AudioDeviceIOS::OnInterruptionEnd() {
RTC_DCHECK(async_invoker_);
RTC_DCHECK(thread_);
if (thread_->IsCurrent()) {
HandleInterruptionEnd();
return;
}
async_invoker_->AsyncInvoke<void>(
thread_,
rtc::Bind(&webrtc::AudioDeviceIOS::HandleInterruptionEnd, this));
thread_->Post(this, kMessageTypeInterruptionEnd);
}
void AudioDeviceIOS::OnValidRouteChange() {
RTC_DCHECK(async_invoker_);
RTC_DCHECK(thread_);
if (thread_->IsCurrent()) {
HandleValidRouteChange();
return;
}
async_invoker_->AsyncInvoke<void>(
thread_,
rtc::Bind(&webrtc::AudioDeviceIOS::HandleValidRouteChange, this));
thread_->Post(this, kMessageTypeValidRouteChange);
}
void AudioDeviceIOS::OnConfiguredForWebRTC() {
RTC_DCHECK(async_invoker_);
void AudioDeviceIOS::OnCanPlayOrRecordChange(bool can_play_or_record) {
RTC_DCHECK(thread_);
if (thread_->IsCurrent()) {
HandleValidRouteChange();
return;
}
async_invoker_->AsyncInvoke<void>(
thread_,
rtc::Bind(&webrtc::AudioDeviceIOS::HandleConfiguredForWebRTC, this));
thread_->Post(this, kMessageTypeCanPlayOrRecordChange,
new rtc::TypedMessageData<bool>(can_play_or_record));
}
OSStatus AudioDeviceIOS::OnDeliverRecordedData(
@ -385,6 +375,9 @@ OSStatus AudioDeviceIOS::OnDeliverRecordedData(
RTCLogWarning(@"Expected %u frames but got %u",
static_cast<unsigned int>(frames_per_buffer),
static_cast<unsigned int>(num_frames));
RTCAudioSession *session = [RTCAudioSession sharedInstance];
RTCLogWarning(@"Session:\n %@", session);
return result;
}
@ -447,12 +440,36 @@ OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
return noErr;
}
void AudioDeviceIOS::OnMessage(rtc::Message *msg) {
switch (msg->message_id) {
case kMessageTypeInterruptionBegin:
HandleInterruptionBegin();
break;
case kMessageTypeInterruptionEnd:
HandleInterruptionEnd();
break;
case kMessageTypeValidRouteChange:
HandleValidRouteChange();
break;
case kMessageTypeCanPlayOrRecordChange: {
rtc::TypedMessageData<bool>* data =
static_cast<rtc::TypedMessageData<bool>*>(msg->pdata);
HandleCanPlayOrRecordChange(data->data());
delete data;
break;
}
}
}
void AudioDeviceIOS::HandleInterruptionBegin() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTCLog(@"Stopping the audio unit due to interruption begin.");
if (!audio_unit_->Stop()) {
RTCLogError(@"Failed to stop the audio unit.");
if (audio_unit_ &&
audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
RTCLog(@"Stopping the audio unit due to interruption begin.");
if (!audio_unit_->Stop()) {
RTCLogError(@"Failed to stop the audio unit for interruption begin.");
}
}
is_interrupted_ = true;
}
@ -460,66 +477,95 @@ void AudioDeviceIOS::HandleInterruptionBegin() {
void AudioDeviceIOS::HandleInterruptionEnd() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTCLog(@"Starting the audio unit due to interruption end.");
if (!audio_unit_->Start()) {
RTCLogError(@"Failed to start the audio unit.");
}
is_interrupted_ = false;
RTCLog(@"Interruption ended. Updating audio unit state.");
UpdateAudioUnit([RTCAudioSession sharedInstance].canPlayOrRecord);
}
void AudioDeviceIOS::HandleValidRouteChange() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
// Don't do anything if we're interrupted.
if (is_interrupted_) {
return;
}
// Only restart audio for a valid route change if the session sample rate
// has changed.
RTCAudioSession* session = [RTCAudioSession sharedInstance];
const double current_sample_rate = playout_parameters_.sample_rate();
const double session_sample_rate = session.sampleRate;
if (current_sample_rate != session_sample_rate) {
RTCLog(@"Route changed caused sample rate to change from %f to %f. "
"Restarting audio unit.", current_sample_rate, session_sample_rate);
if (!RestartAudioUnit(session_sample_rate)) {
RTCLogError(@"Audio restart failed.");
}
}
HandleSampleRateChange(session.sampleRate);
}
void AudioDeviceIOS::HandleConfiguredForWebRTC() {
void AudioDeviceIOS::HandleCanPlayOrRecordChange(bool can_play_or_record) {
RTCLog(@"Handling CanPlayOrRecord change to: %d", can_play_or_record);
UpdateAudioUnit(can_play_or_record);
}
void AudioDeviceIOS::HandleSampleRateChange(float sample_rate) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTCLog(@"Handling sample rate change to %f.", sample_rate);
// If we're not initialized we don't need to do anything. Audio unit will
// be initialized on initialization.
if (!rec_is_initialized_ && !play_is_initialized_)
// Don't do anything if we're interrupted.
if (is_interrupted_) {
RTCLog(@"Ignoring sample rate change to %f due to interruption.",
sample_rate);
return;
}
// If we're initialized, we must have an audio unit.
RTC_DCHECK(audio_unit_);
// If we don't have an audio unit yet, or the audio unit is uninitialized,
// there is no work to do.
if (!audio_unit_ ||
audio_unit_->GetState() < VoiceProcessingAudioUnit::kInitialized) {
return;
}
// Use configured audio session's settings to set up audio device buffer.
// TODO(tkchin): Use RTCAudioSessionConfiguration to pick up settings and
// pass it along.
// The audio unit is already initialized or started.
// Check to see if the sample rate or buffer size has changed.
RTCAudioSession* session = [RTCAudioSession sharedInstance];
const double session_sample_rate = session.sampleRate;
const NSTimeInterval session_buffer_duration = session.IOBufferDuration;
const size_t session_frames_per_buffer =
static_cast<size_t>(session_sample_rate * session_buffer_duration + .5);
const double current_sample_rate = playout_parameters_.sample_rate();
const size_t current_frames_per_buffer =
playout_parameters_.frames_per_buffer();
RTCLog(@"Handling playout sample rate change to: %f\n"
" Session sample rate: %f frames_per_buffer: %lu\n"
" ADM sample rate: %f frames_per_buffer: %lu",
sample_rate,
session_sample_rate, (unsigned long)session_frames_per_buffer,
current_sample_rate, (unsigned long)current_frames_per_buffer);;
// Sample rate and buffer size are the same, no work to do.
if (abs(current_sample_rate - session_sample_rate) <= DBL_EPSILON &&
current_frames_per_buffer == session_frames_per_buffer) {
return;
}
// We need to adjust our format and buffer sizes.
// The stream format is about to be changed and it requires that we first
// stop and uninitialize the audio unit to deallocate its resources.
RTCLog(@"Stopping and uninitializing audio unit to adjust buffers.");
bool restart_audio_unit = false;
if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
audio_unit_->Stop();
restart_audio_unit = true;
}
if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
audio_unit_->Uninitialize();
}
// Allocate new buffers given the new stream format.
SetupAudioBuffersForActiveAudioSession();
// Initialize the audio unit. This will affect any existing audio playback.
if (!audio_unit_->Initialize(playout_parameters_.sample_rate())) {
RTCLogError(@"Failed to initialize audio unit after configuration.");
// Initialize the audio unit again with the new sample rate.
RTC_DCHECK_EQ(playout_parameters_.sample_rate(), session_sample_rate);
if (!audio_unit_->Initialize(session_sample_rate)) {
RTCLogError(@"Failed to initialize the audio unit with sample rate: %f",
session_sample_rate);
return;
}
// If we haven't started playing or recording there's nothing more to do.
if (!playing_ && !recording_)
return;
// We are in a play or record state, start the audio unit.
if (!audio_unit_->Start()) {
RTCLogError(@"Failed to start audio unit after configuration.");
// Restart the audio unit if it was already running.
if (restart_audio_unit && !audio_unit_->Start()) {
RTCLogError(@"Failed to start audio unit with sample rate: %f",
session_sample_rate);
return;
}
RTCLog(@"Successfully handled sample rate change.");
}
void AudioDeviceIOS::UpdateAudioDeviceBuffer() {
@ -597,6 +643,7 @@ void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
// at each input callback when calling AudioUnitRender().
const int data_byte_size = record_parameters_.GetBytesPerBuffer();
record_audio_buffer_.reset(new SInt8[data_byte_size]);
memset(record_audio_buffer_.get(), 0, data_byte_size);
audio_record_buffer_list_.mNumberBuffers = 1;
AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0];
audio_buffer->mNumberChannels = record_parameters_.channels();
@ -616,46 +663,117 @@ bool AudioDeviceIOS::CreateAudioUnit() {
return true;
}
bool AudioDeviceIOS::RestartAudioUnit(float sample_rate) {
RTCLog(@"Restarting audio unit with new sample rate: %f", sample_rate);
void AudioDeviceIOS::UpdateAudioUnit(bool can_play_or_record) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTCLog(@"Updating audio unit state. CanPlayOrRecord=%d IsInterrupted=%d",
can_play_or_record, is_interrupted_);
// Stop the active audio unit.
if (!audio_unit_->Stop()) {
RTCLogError(@"Failed to stop the audio unit.");
return false;
if (is_interrupted_) {
RTCLog(@"Ignoring audio unit update due to interruption.");
return;
}
// The stream format is about to be changed and it requires that we first
// uninitialize it to deallocate its resources.
if (!audio_unit_->Uninitialize()) {
RTCLogError(@"Failed to uninitialize the audio unit.");
return false;
// If we're not initialized we don't need to do anything. Audio unit will
// be initialized on initialization.
if (!rec_is_initialized_ && !play_is_initialized_)
return;
// If we're initialized, we must have an audio unit.
RTC_DCHECK(audio_unit_);
bool should_initialize_audio_unit = false;
bool should_uninitialize_audio_unit = false;
bool should_start_audio_unit = false;
bool should_stop_audio_unit = false;
switch (audio_unit_->GetState()) {
case VoiceProcessingAudioUnit::kInitRequired:
RTC_NOTREACHED();
break;
case VoiceProcessingAudioUnit::kUninitialized:
should_initialize_audio_unit = can_play_or_record;
should_start_audio_unit = should_initialize_audio_unit &&
(playing_ || recording_);
break;
case VoiceProcessingAudioUnit::kInitialized:
should_start_audio_unit =
can_play_or_record && (playing_ || recording_);
should_uninitialize_audio_unit = !can_play_or_record;
break;
case VoiceProcessingAudioUnit::kStarted:
RTC_DCHECK(playing_ || recording_);
should_stop_audio_unit = !can_play_or_record;
should_uninitialize_audio_unit = should_stop_audio_unit;
break;
}
// Allocate new buffers given the new stream format.
SetupAudioBuffersForActiveAudioSession();
// Initialize the audio unit again with the new sample rate.
RTC_DCHECK_EQ(playout_parameters_.sample_rate(), sample_rate);
if (!audio_unit_->Initialize(sample_rate)) {
RTCLogError(@"Failed to initialize the audio unit with sample rate: %f",
sample_rate);
return false;
if (should_initialize_audio_unit) {
RTCLog(@"Initializing audio unit for UpdateAudioUnit");
ConfigureAudioSession();
SetupAudioBuffersForActiveAudioSession();
if (!audio_unit_->Initialize(playout_parameters_.sample_rate())) {
RTCLogError(@"Failed to initialize audio unit.");
return;
}
}
// Restart the audio unit.
if (!audio_unit_->Start()) {
RTCLogError(@"Failed to start audio unit.");
return false;
if (should_start_audio_unit) {
RTCLog(@"Starting audio unit for UpdateAudioUnit");
if (!audio_unit_->Start()) {
RTCLogError(@"Failed to start audio unit.");
return;
}
}
RTCLog(@"Successfully restarted audio unit.");
return true;
if (should_stop_audio_unit) {
RTCLog(@"Stopping audio unit for UpdateAudioUnit");
if (!audio_unit_->Stop()) {
RTCLogError(@"Failed to stop audio unit.");
return;
}
}
if (should_uninitialize_audio_unit) {
RTCLog(@"Uninitializing audio unit for UpdateAudioUnit");
audio_unit_->Uninitialize();
UnconfigureAudioSession();
}
}
void AudioDeviceIOS::ConfigureAudioSession() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTCLog(@"Configuring audio session.");
if (has_configured_session_) {
RTCLogWarning(@"Audio session already configured.");
return;
}
RTCAudioSession* session = [RTCAudioSession sharedInstance];
[session lockForConfiguration];
[session configureWebRTCSession:nil];
[session unlockForConfiguration];
has_configured_session_ = true;
RTCLog(@"Configured audio session.");
}
void AudioDeviceIOS::UnconfigureAudioSession() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTCLog(@"Unconfiguring audio session.");
if (!has_configured_session_) {
RTCLogWarning(@"Audio session already unconfigured.");
return;
}
RTCAudioSession* session = [RTCAudioSession sharedInstance];
[session lockForConfiguration];
[session unconfigureWebRTCSession:nil];
[session unlockForConfiguration];
has_configured_session_ = false;
RTCLog(@"Unconfigured audio session.");
}
bool AudioDeviceIOS::InitPlayOrRecord() {
LOGI() << "InitPlayOrRecord";
// There should be no audio unit at this point.
if (!CreateAudioUnit()) {
return false;
}
@ -674,14 +792,11 @@ bool AudioDeviceIOS::InitPlayOrRecord() {
return false;
}
// If we are already configured properly, we can initialize the audio unit.
if (session.isConfiguredForWebRTC) {
[session unlockForConfiguration];
// If we are ready to play or record, initialize the audio unit.
if (session.canPlayOrRecord) {
ConfigureAudioSession();
SetupAudioBuffersForActiveAudioSession();
// Audio session has been marked ready for WebRTC so we can initialize the
// audio unit now.
audio_unit_->Initialize(playout_parameters_.sample_rate());
return true;
}
// Release the lock.
@ -694,9 +809,7 @@ void AudioDeviceIOS::ShutdownPlayOrRecord() {
LOGI() << "ShutdownPlayOrRecord";
// Close and delete the voice-processing I/O unit.
if (audio_unit_) {
audio_unit_.reset();
}
audio_unit_.reset();
// Remove audio session notification observers.
RTCAudioSession* session = [RTCAudioSession sharedInstance];
@ -705,6 +818,7 @@ void AudioDeviceIOS::ShutdownPlayOrRecord() {
// All I/O should be stopped or paused prior to deactivating the audio
// session, hence we deactivate as last action.
[session lockForConfiguration];
UnconfigureAudioSession();
[session endWebRTCSession:nil];
[session unlockForConfiguration];
}

View File

@ -28,8 +28,8 @@ class AudioSessionObserver {
// Called when audio route changes.
virtual void OnValidRouteChange() = 0;
// Called when audio session has been configured for WebRTC.
virtual void OnConfiguredForWebRTC() = 0;
// Called when the ability to play or record changes.
virtual void OnCanPlayOrRecordChange(bool can_play_or_record) = 0;
protected:
virtual ~AudioSessionObserver() {}

View File

@ -16,13 +16,29 @@
@implementation RTCAudioSession (Configuration)
- (BOOL)isConfiguredForWebRTC {
return self.savedConfiguration != nil;
- (BOOL)setConfiguration:(RTCAudioSessionConfiguration *)configuration
error:(NSError **)outError {
return [self setConfiguration:configuration
active:NO
shouldSetActive:NO
error:outError];
}
- (BOOL)setConfiguration:(RTCAudioSessionConfiguration *)configuration
active:(BOOL)active
error:(NSError **)outError {
return [self setConfiguration:configuration
active:active
shouldSetActive:YES
error:outError];
}
#pragma mark - Private
- (BOOL)setConfiguration:(RTCAudioSessionConfiguration *)configuration
active:(BOOL)active
shouldSetActive:(BOOL)shouldSetActive
error:(NSError **)outError {
NSParameterAssert(configuration);
if (outError) {
*outError = nil;
@ -61,8 +77,22 @@
}
}
// self.sampleRate is accurate only if the audio session is active.
if (!self.isActive || self.sampleRate != configuration.sampleRate) {
// Sometimes category options don't stick after setting mode.
if (self.categoryOptions != configuration.categoryOptions) {
NSError *categoryError = nil;
if (![self setCategory:configuration.category
withOptions:configuration.categoryOptions
error:&categoryError]) {
RTCLogError(@"Failed to set category options: %@",
categoryError.localizedDescription);
error = categoryError;
} else {
RTCLog(@"Set category options to: %ld",
(long)configuration.categoryOptions);
}
}
if (self.preferredSampleRate != configuration.sampleRate) {
NSError *sampleRateError = nil;
if (![self setPreferredSampleRate:configuration.sampleRate
error:&sampleRateError]) {
@ -75,9 +105,7 @@
}
}
// self.IOBufferDuration is accurate only if the audio session is active.
if (!self.isActive ||
self.IOBufferDuration != configuration.ioBufferDuration) {
if (self.preferredIOBufferDuration != configuration.ioBufferDuration) {
NSError *bufferDurationError = nil;
if (![self setPreferredIOBufferDuration:configuration.ioBufferDuration
error:&bufferDurationError]) {
@ -90,11 +118,13 @@
}
}
NSError *activeError = nil;
if (![self setActive:active error:&activeError]) {
RTCLogError(@"Failed to setActive to %d: %@",
active, activeError.localizedDescription);
error = activeError;
if (shouldSetActive) {
NSError *activeError = nil;
if (![self setActive:active error:&activeError]) {
RTCLogError(@"Failed to setActive to %d: %@",
active, activeError.localizedDescription);
error = activeError;
}
}
if (self.isActive &&
@ -138,84 +168,4 @@
return error == nil;
}
- (BOOL)configureWebRTCSession:(NSError **)outError {
if (outError) {
*outError = nil;
}
if (![self checkLock:outError]) {
return NO;
}
RTCLog(@"Configuring audio session for WebRTC.");
if (self.isConfiguredForWebRTC) {
RTCLogError(@"Already configured.");
if (outError) {
*outError =
[self configurationErrorWithDescription:@"Already configured."];
}
return NO;
}
// Configure the AVAudioSession and activate it.
// Provide an error even if there isn't one so we can log it.
NSError *error = nil;
RTCAudioSessionConfiguration *currentConfig =
[RTCAudioSessionConfiguration currentConfiguration];
RTCAudioSessionConfiguration *webRTCConfig =
[RTCAudioSessionConfiguration webRTCConfiguration];
self.savedConfiguration = currentConfig;
if (![self setConfiguration:webRTCConfig active:YES error:&error]) {
RTCLogError(@"Failed to set WebRTC audio configuration: %@",
error.localizedDescription);
[self unconfigureWebRTCSession:nil];
if (outError) {
*outError = error;
}
return NO;
}
// Ensure that the device currently supports audio input.
// TODO(tkchin): Figure out if this is really necessary.
if (!self.inputAvailable) {
RTCLogError(@"No audio input path is available!");
[self unconfigureWebRTCSession:nil];
if (outError) {
*outError = [self configurationErrorWithDescription:@"No input path."];
}
return NO;
}
// Give delegates a chance to process the event. In particular, the audio
// devices listening to this event will initialize their audio units.
[self notifyDidConfigure];
return YES;
}
- (BOOL)unconfigureWebRTCSession:(NSError **)outError {
if (outError) {
*outError = nil;
}
if (![self checkLock:outError]) {
return NO;
}
RTCLog(@"Unconfiguring audio session for WebRTC.");
if (!self.isConfiguredForWebRTC) {
RTCLogError(@"Already unconfigured.");
if (outError) {
*outError =
[self configurationErrorWithDescription:@"Already unconfigured."];
}
return NO;
}
[self setConfiguration:self.savedConfiguration active:NO error:outError];
self.savedConfiguration = nil;
[self notifyDidUnconfigure];
return YES;
}
@end

View File

@ -28,11 +28,8 @@ NS_ASSUME_NONNULL_BEGIN
*/
@property(nonatomic, readonly) int webRTCSessionCount;
/** The configuration of the audio session before configureWebRTCSession
* was first called.
*/
@property(nonatomic, strong, nullable)
RTCAudioSessionConfiguration *savedConfiguration;
/** Convenience BOOL that checks useManualAudio and isAudioEnebled. */
@property(readonly) BOOL canPlayOrRecord;
- (BOOL)checkLock:(NSError **)outError;
@ -55,6 +52,22 @@ NS_ASSUME_NONNULL_BEGIN
*/
- (BOOL)endWebRTCSession:(NSError **)outError;
/** Configure the audio session for WebRTC. This call will fail if the session
* is already configured. On other failures, we will attempt to restore the
* previously used audio session configuration.
* |lockForConfiguration| must be called first.
* Successful calls to configureWebRTCSession must be matched by calls to
* |unconfigureWebRTCSession|.
*/
- (BOOL)configureWebRTCSession:(NSError **)outError;
/** Unconfigures the session for WebRTC. This will attempt to restore the
* audio session to the settings used before |configureWebRTCSession| was
* called.
* |lockForConfiguration| must be called first.
*/
- (BOOL)unconfigureWebRTCSession:(NSError **)outError;
/** Returns a configuration error with the given description. */
- (NSError *)configurationErrorWithDescription:(NSString *)description;
@ -69,10 +82,9 @@ NS_ASSUME_NONNULL_BEGIN
previousRoute:(AVAudioSessionRouteDescription *)previousRoute;
- (void)notifyMediaServicesWereLost;
- (void)notifyMediaServicesWereReset;
- (void)notifyShouldConfigure;
- (void)notifyShouldUnconfigure;
- (void)notifyDidConfigure;
- (void)notifyDidUnconfigure;
- (void)notifyDidChangeCanPlayOrRecord:(BOOL)canPlayOrRecord;
- (void)notifyDidStartPlayOrRecord;
- (void)notifyDidStopPlayOrRecord;
@end

View File

@ -58,29 +58,18 @@ extern NSInteger const kRTCAudioSessionErrorConfiguration;
// TODO(tkchin): Maybe handle SilenceSecondaryAudioHintNotification.
/** Called on a WebRTC thread when WebRTC needs to take over audio. Applications
* should call -[RTCAudioSession configureWebRTCSession] to allow WebRTC to
* play and record audio. Will only occur if shouldDelayAudioConfiguration is
* set to YES.
*/
- (void)audioSessionShouldConfigure:(RTCAudioSession *)session;
- (void)audioSession:(RTCAudioSession *)session
didChangeCanPlayOrRecord:(BOOL)canPlayOrRecord;
/** Called on a WebRTC thread when WebRTC no longer requires audio. Applications
* should call -[RTCAudioSession unconfigureWebRTCSession] to restore their
* audio session settings. Will only occur if shouldDelayAudioConfiguration is
* set to YES.
/** Called on a WebRTC thread when the audio device is notified to begin
* playback or recording.
*/
- (void)audioSessionShouldUnconfigure:(RTCAudioSession *)session;
- (void)audioSessionDidStartPlayOrRecord:(RTCAudioSession *)session;
/** Called on a WebRTC thread when WebRTC has configured the audio session for
* WebRTC audio.
/** Called on a WebRTC thread when the audio device is notified to stop
* playback or recording.
*/
- (void)audioSessionDidConfigure:(RTCAudioSession *)session;
/** Called on a WebRTC thread when WebRTC has unconfigured the audio session for
* WebRTC audio.
*/
- (void)audioSessionDidUnconfigure:(RTCAudioSession *)session;
- (void)audioSessionDidStopPlayOrRecord:(RTCAudioSession *)session;
@end
@ -108,11 +97,24 @@ extern NSInteger const kRTCAudioSessionErrorConfiguration;
/** If YES, WebRTC will not initialize the audio unit automatically when an
* audio track is ready for playout or recording. Instead, applications should
* listen to the delegate method |audioSessionShouldConfigure| and configure
* the session manually. This should be set before making WebRTC media calls
* and should not be changed while a call is active.
* call setIsAudioEnabled. If NO, WebRTC will initialize the audio unit
* as soon as an audio track is ready for playout or recording.
*/
@property(nonatomic, assign) BOOL shouldDelayAudioConfiguration;
@property(nonatomic, assign) BOOL useManualAudio;
/** This property is only effective if useManualAudio is YES.
* Represents permission for WebRTC to initialize the VoIP audio unit.
* When set to NO, if the VoIP audio unit used by WebRTC is active, it will be
* stopped and uninitialized. This will stop incoming and outgoing audio.
* When set to YES, WebRTC will initialize and start the audio unit when it is
* needed (e.g. due to establishing an audio connection).
* This property was introduced to work around an issue where if an AVPlayer is
* playing audio while the VoIP audio unit is initialized, its audio would be
* either cut off completely or played at a reduced volume. By preventing
* the audio unit from being initialized until after the audio has completed,
* we are able to prevent the abrupt cutoff.
*/
@property(nonatomic, assign) BOOL isAudioEnabled;
// Proxy properties.
@property(readonly) NSString *category;
@ -134,12 +136,14 @@ extern NSInteger const kRTCAudioSessionErrorConfiguration;
@property(readonly, nullable)
AVAudioSessionDataSourceDescription *outputDataSource;
@property(readonly) double sampleRate;
@property(readonly) double preferredSampleRate;
@property(readonly) NSInteger inputNumberOfChannels;
@property(readonly) NSInteger outputNumberOfChannels;
@property(readonly) float outputVolume;
@property(readonly) NSTimeInterval inputLatency;
@property(readonly) NSTimeInterval outputLatency;
@property(readonly) NSTimeInterval IOBufferDuration;
@property(readonly) NSTimeInterval preferredIOBufferDuration;
/** Default constructor. */
+ (instancetype)sharedInstance;
@ -196,37 +200,21 @@ extern NSInteger const kRTCAudioSessionErrorConfiguration;
@interface RTCAudioSession (Configuration)
/** Whether or not |configureWebRTCSession| has been called without a balanced
* call to |unconfigureWebRTCSession|. This is not an indication of whether the
* audio session has the right settings.
*/
@property(readonly) BOOL isConfiguredForWebRTC;
/** Applies the configuration to the current session. Attempts to set all
* properties even if previous ones fail. Only the last error will be
* returned. Also calls setActive with |active|.
* returned.
* |lockForConfiguration| must be called first.
*/
- (BOOL)setConfiguration:(RTCAudioSessionConfiguration *)configuration
error:(NSError **)outError;
/** Convenience method that calls both setConfiguration and setActive.
* |lockForConfiguration| must be called first.
*/
- (BOOL)setConfiguration:(RTCAudioSessionConfiguration *)configuration
active:(BOOL)active
error:(NSError **)outError;
/** Configure the audio session for WebRTC. This call will fail if the session
* is already configured. On other failures, we will attempt to restore the
* previously used audio session configuration.
* |lockForConfiguration| must be called first.
* Successful calls to configureWebRTCSession must be matched by calls to
* |unconfigureWebRTCSession|.
*/
- (BOOL)configureWebRTCSession:(NSError **)outError;
/** Unconfigures the session for WebRTC. This will attempt to restore the
* audio session to the settings used before |configureWebRTCSession| was
* called.
* |lockForConfiguration| must be called first.
*/
- (BOOL)unconfigureWebRTCSession:(NSError **)outError;
@end
NS_ASSUME_NONNULL_END

View File

@ -17,6 +17,7 @@
#import "WebRTC/RTCLogging.h"
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSession+Private.h"
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h"
NSString * const kRTCAudioSessionErrorDomain = @"org.webrtc.RTCAudioSession";
NSInteger const kRTCAudioSessionErrorLockRequired = -1;
@ -32,12 +33,13 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
volatile int _lockRecursionCount;
volatile int _webRTCSessionCount;
BOOL _isActive;
BOOL _shouldDelayAudioConfiguration;
BOOL _useManualAudio;
BOOL _isAudioEnabled;
BOOL _canPlayOrRecord;
}
@synthesize session = _session;
@synthesize delegates = _delegates;
@synthesize savedConfiguration = _savedConfiguration;
+ (instancetype)sharedInstance {
static dispatch_once_t onceToken;
@ -81,6 +83,9 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
- (NSString *)description {
NSString *format =
@"RTCAudioSession: {\n"
" category: %@\n"
" categoryOptions: %ld\n"
" mode: %@\n"
" isActive: %d\n"
" sampleRate: %.2f\n"
" IOBufferDuration: %f\n"
@ -90,6 +95,7 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
" inputLatency: %f\n"
"}";
NSString *description = [NSString stringWithFormat:format,
self.category, (long)self.categoryOptions, self.mode,
self.isActive, self.sampleRate, self.IOBufferDuration,
self.outputNumberOfChannels, self.inputNumberOfChannels,
self.outputLatency, self.inputLatency];
@ -112,20 +118,35 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
return _lockRecursionCount > 0;
}
- (void)setShouldDelayAudioConfiguration:(BOOL)shouldDelayAudioConfiguration {
- (void)setUseManualAudio:(BOOL)useManualAudio {
@synchronized(self) {
// No one should be changing this while an audio device is active.
RTC_DCHECK(!self.isConfiguredForWebRTC);
if (_shouldDelayAudioConfiguration == shouldDelayAudioConfiguration) {
if (_useManualAudio == useManualAudio) {
return;
}
_shouldDelayAudioConfiguration = shouldDelayAudioConfiguration;
_useManualAudio = useManualAudio;
}
[self updateCanPlayOrRecord];
}
- (BOOL)useManualAudio {
@synchronized(self) {
return _useManualAudio;
}
}
- (BOOL)shouldDelayAudioConfiguration {
- (void)setIsAudioEnabled:(BOOL)isAudioEnabled {
@synchronized(self) {
return _shouldDelayAudioConfiguration;
if (_isAudioEnabled == isAudioEnabled) {
return;
}
_isAudioEnabled = isAudioEnabled;
}
[self updateCanPlayOrRecord];
}
- (BOOL)isAudioEnabled {
@synchronized(self) {
return _isAudioEnabled;
}
}
@ -232,6 +253,10 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
return self.session.sampleRate;
}
- (double)preferredSampleRate {
return self.session.preferredSampleRate;
}
- (NSInteger)inputNumberOfChannels {
return self.session.inputNumberOfChannels;
}
@ -256,6 +281,10 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
return self.session.IOBufferDuration;
}
- (NSTimeInterval)preferredIOBufferDuration {
return self.session.preferredIOBufferDuration;
}
// TODO(tkchin): Simplify the amount of locking happening here. Likely that we
// can just do atomic increments / decrements.
- (BOOL)setActive:(BOOL)active
@ -497,21 +526,6 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
}
}
- (void)setSavedConfiguration:(RTCAudioSessionConfiguration *)configuration {
@synchronized(self) {
if (_savedConfiguration == configuration) {
return;
}
_savedConfiguration = configuration;
}
}
- (RTCAudioSessionConfiguration *)savedConfiguration {
@synchronized(self) {
return _savedConfiguration;
}
}
// TODO(tkchin): check for duplicates.
- (void)pushDelegate:(id<RTCAudioSessionDelegate>)delegate {
@synchronized(self) {
@ -547,6 +561,10 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
return _webRTCSessionCount;
}
- (BOOL)canPlayOrRecord {
return !self.useManualAudio || self.isAudioEnabled;
}
- (BOOL)checkLock:(NSError **)outError {
// Check ivar instead of trying to acquire lock so that we won't accidentally
// acquire lock if it hasn't already been called.
@ -566,38 +584,8 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
if (![self checkLock:outError]) {
return NO;
}
NSInteger sessionCount = rtc::AtomicOps::Increment(&_webRTCSessionCount);
if (sessionCount > 1) {
// Should already be configured.
RTC_DCHECK(self.isConfiguredForWebRTC);
return YES;
}
// Only perform configuration steps once. Application might have already
// configured the session.
if (self.isConfiguredForWebRTC) {
// Nothing more to do, already configured.
return YES;
}
// If application has prevented automatic configuration, return here and wait
// for application to call configureWebRTCSession.
if (self.shouldDelayAudioConfiguration) {
[self notifyShouldConfigure];
return YES;
}
// Configure audio session.
NSError *error = nil;
if (![self configureWebRTCSession:&error]) {
RTCLogError(@"Error configuring audio session: %@",
error.localizedDescription);
if (outError) {
*outError = error;
}
return NO;
}
rtc::AtomicOps::Increment(&_webRTCSessionCount);
[self notifyDidStartPlayOrRecord];
return YES;
}
@ -608,38 +596,59 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
if (![self checkLock:outError]) {
return NO;
}
int sessionCount = rtc::AtomicOps::Decrement(&_webRTCSessionCount);
RTC_DCHECK_GE(sessionCount, 0);
if (sessionCount != 0) {
// Should still be configured.
RTC_DCHECK(self.isConfiguredForWebRTC);
return YES;
}
rtc::AtomicOps::Decrement(&_webRTCSessionCount);
[self notifyDidStopPlayOrRecord];
return YES;
}
// Only unconfigure if application has not done it.
if (!self.isConfiguredForWebRTC) {
// Nothing more to do, already unconfigured.
return YES;
- (BOOL)configureWebRTCSession:(NSError **)outError {
if (outError) {
*outError = nil;
}
// If application has prevented automatic configuration, return here and wait
// for application to call unconfigureWebRTCSession.
if (self.shouldDelayAudioConfiguration) {
[self notifyShouldUnconfigure];
return YES;
if (![self checkLock:outError]) {
return NO;
}
RTCLog(@"Configuring audio session for WebRTC.");
// Unconfigure audio session.
// Configure the AVAudioSession and activate it.
// Provide an error even if there isn't one so we can log it.
NSError *error = nil;
if (![self unconfigureWebRTCSession:&error]) {
RTCLogError(@"Error unconfiguring audio session: %@",
RTCAudioSessionConfiguration *webRTCConfig =
[RTCAudioSessionConfiguration webRTCConfiguration];
if (![self setConfiguration:webRTCConfig active:YES error:&error]) {
RTCLogError(@"Failed to set WebRTC audio configuration: %@",
error.localizedDescription);
[self unconfigureWebRTCSession:nil];
if (outError) {
*outError = error;
}
return NO;
}
// Ensure that the device currently supports audio input.
// TODO(tkchin): Figure out if this is really necessary.
if (!self.inputAvailable) {
RTCLogError(@"No audio input path is available!");
[self unconfigureWebRTCSession:nil];
if (outError) {
*outError = [self configurationErrorWithDescription:@"No input path."];
}
return NO;
}
return YES;
}
- (BOOL)unconfigureWebRTCSession:(NSError **)outError {
if (outError) {
*outError = nil;
}
if (![self checkLock:outError]) {
return NO;
}
RTCLog(@"Unconfiguring audio session for WebRTC.");
[self setActive:NO error:outError];
return YES;
}
@ -667,6 +676,22 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
}
}
- (void)updateCanPlayOrRecord {
BOOL canPlayOrRecord = NO;
BOOL shouldNotify = NO;
@synchronized(self) {
canPlayOrRecord = !self.useManualAudio || self.isAudioEnabled;
if (_canPlayOrRecord == canPlayOrRecord) {
return;
}
_canPlayOrRecord = canPlayOrRecord;
shouldNotify = YES;
}
if (shouldNotify) {
[self notifyDidChangeCanPlayOrRecord:canPlayOrRecord];
}
}
- (void)notifyDidBeginInterruption {
for (auto delegate : self.delegates) {
SEL sel = @selector(audioSessionDidBeginInterruption:);
@ -717,38 +742,29 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
}
}
- (void)notifyShouldConfigure {
- (void)notifyDidChangeCanPlayOrRecord:(BOOL)canPlayOrRecord {
for (auto delegate : self.delegates) {
SEL sel = @selector(audioSessionShouldConfigure:);
SEL sel = @selector(audioSession:didChangeCanPlayOrRecord:);
if ([delegate respondsToSelector:sel]) {
[delegate audioSessionShouldConfigure:self];
[delegate audioSession:self didChangeCanPlayOrRecord:canPlayOrRecord];
}
}
}
- (void)notifyShouldUnconfigure {
- (void)notifyDidStartPlayOrRecord {
for (auto delegate : self.delegates) {
SEL sel = @selector(audioSessionShouldUnconfigure:);
SEL sel = @selector(audioSessionDidStartPlayOrRecord:);
if ([delegate respondsToSelector:sel]) {
[delegate audioSessionShouldUnconfigure:self];
[delegate audioSessionDidStartPlayOrRecord:self];
}
}
}
- (void)notifyDidConfigure {
- (void)notifyDidStopPlayOrRecord {
for (auto delegate : self.delegates) {
SEL sel = @selector(audioSessionDidConfigure:);
SEL sel = @selector(audioSessionDidStopPlayOrRecord:);
if ([delegate respondsToSelector:sel]) {
[delegate audioSessionDidConfigure:self];
}
}
}
- (void)notifyDidUnconfigure {
for (auto delegate : self.delegates) {
SEL sel = @selector(audioSessionDidUnconfigure:);
if ([delegate respondsToSelector:sel]) {
[delegate audioSessionDidUnconfigure:self];
[delegate audioSessionDidStopPlayOrRecord:self];
}
}
}

View File

@ -37,6 +37,8 @@ extern const double kRTCAudioSessionLowComplexityIOBufferDuration;
+ (instancetype)currentConfiguration;
/** Returns the configuration that WebRTC needs. */
+ (instancetype)webRTCConfiguration;
/** Provide a way to override the default configuration. */
+ (void)setWebRTCConfiguration:(RTCAudioSessionConfiguration *)configuration;
@end

View File

@ -10,6 +10,8 @@
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h"
#import "WebRTC/RTCDispatcher.h"
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSession.h"
// Try to use mono to save resources. Also avoids channel format conversion
@ -49,6 +51,8 @@ const double kRTCAudioSessionHighPerformanceIOBufferDuration = 0.01;
// TODO(henrika): monitor this size and determine if it should be modified.
const double kRTCAudioSessionLowComplexityIOBufferDuration = 0.06;
static RTCAudioSessionConfiguration *gWebRTCConfiguration = nil;
@implementation RTCAudioSessionConfiguration
@synthesize category = _category;
@ -96,6 +100,10 @@ const double kRTCAudioSessionLowComplexityIOBufferDuration = 0.06;
return self;
}
+ (void)initialize {
gWebRTCConfiguration = [[self alloc] init];
}
+ (instancetype)currentConfiguration {
RTCAudioSession *session = [RTCAudioSession sharedInstance];
RTCAudioSessionConfiguration *config =
@ -111,7 +119,15 @@ const double kRTCAudioSessionLowComplexityIOBufferDuration = 0.06;
}
+ (instancetype)webRTCConfiguration {
return [[self alloc] init];
@synchronized(self) {
return (RTCAudioSessionConfiguration *)gWebRTCConfiguration;
}
}
+ (void)setWebRTCConfiguration:(RTCAudioSessionConfiguration *)configuration {
@synchronized(self) {
gWebRTCConfiguration = configuration;
}
}
@end

View File

@ -70,14 +70,15 @@
- (void)audioSessionMediaServicesWereReset:(RTCAudioSession *)session {
}
- (void)audioSessionShouldConfigure:(RTCAudioSession *)session {
- (void)audioSession:(RTCAudioSession *)session
didChangeCanPlayOrRecord:(BOOL)canPlayOrRecord {
_observer->OnCanPlayOrRecordChange(canPlayOrRecord);
}
- (void)audioSessionShouldUnconfigure:(RTCAudioSession *)session {
- (void)audioSessionDidStartPlayOrRecord:(RTCAudioSession *)session {
}
- (void)audioSessionDidConfigure:(RTCAudioSession *)session {
_observer->OnConfiguredForWebRTC();
- (void)audioSessionDidStopPlayOrRecord:(RTCAudioSession *)session {
}
@end

View File

@ -175,7 +175,7 @@ VoiceProcessingAudioUnit::State VoiceProcessingAudioUnit::GetState() const {
bool VoiceProcessingAudioUnit::Initialize(Float64 sample_rate) {
RTC_DCHECK_GE(state_, kUninitialized);
RTCLog(@"Initializing audio unit.");
RTCLog(@"Initializing audio unit with sample rate: %f", sample_rate);
OSStatus result = noErr;
AudioStreamBasicDescription format = GetFormat(sample_rate);
@ -228,7 +228,9 @@ bool VoiceProcessingAudioUnit::Initialize(Float64 sample_rate) {
[NSThread sleepForTimeInterval:0.1f];
result = AudioUnitInitialize(vpio_unit_);
}
RTCLog(@"Voice Processing I/O unit is now initialized.");
if (result == noErr) {
RTCLog(@"Voice Processing I/O unit is now initialized.");
}
state_ = kInitialized;
return true;
}
@ -241,6 +243,8 @@ bool VoiceProcessingAudioUnit::Start() {
if (result != noErr) {
RTCLogError(@"Failed to start audio unit. Error=%ld", (long)result);
return false;
} else {
RTCLog(@"Started audio unit");
}
state_ = kStarted;
return true;
@ -254,7 +258,10 @@ bool VoiceProcessingAudioUnit::Stop() {
if (result != noErr) {
RTCLogError(@"Failed to stop audio unit. Error=%ld", (long)result);
return false;
} else {
RTCLog(@"Stopped audio unit");
}
state_ = kInitialized;
return true;
}
@ -267,7 +274,11 @@ bool VoiceProcessingAudioUnit::Uninitialize() {
if (result != noErr) {
RTCLogError(@"Failed to uninitialize audio unit. Error=%ld", (long)result);
return false;
} else {
RTCLog(@"Uninitialized audio unit.");
}
state_ = kUninitialized;
return true;
}