diff --git a/webrtc/modules/audio_device/android/audio_device_opensles_android.cc b/webrtc/modules/audio_device/android/audio_device_opensles_android.cc index 8d8e1f3fe2..e0744d6a93 100644 --- a/webrtc/modules/audio_device/android/audio_device_opensles_android.cc +++ b/webrtc/modules/audio_device/android/audio_device_opensles_android.cc @@ -8,1662 +8,1428 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include "modules/audio_device/android/audio_device_opensles_android.h" + +#ifdef WEBRTC_ANDROID_DEBUG +#include +#endif #include #include #include #include -#include "audio_device_utility.h" -#include "audio_device_opensles_android.h" -#include "audio_device_config.h" - -#include "trace.h" -#include "thread_wrapper.h" -#include "event_wrapper.h" +#include "modules/audio_device/audio_device_utility.h" +#include "system_wrappers/interface/event_wrapper.h" +#include "system_wrappers/interface/thread_wrapper.h" +#include "system_wrappers/interface/trace.h" #ifdef WEBRTC_ANDROID_DEBUG -#include -#define WEBRTC_TRACE(a,b,c,...) __android_log_print( \ - ANDROID_LOG_DEBUG, "WebRTC ADM OpenSLES", __VA_ARGS__) +#define WEBRTC_OPENSL_TRACE(a, b, c, ...) \ + __android_log_print(ANDROID_LOG_DEBUG, "WebRTC OpenSLES", __VA_ARGS__) +#else +#define WEBRTC_OPENSL_TRACE WEBRTC_TRACE #endif namespace webrtc { -// ============================================================================ -// Construction & Destruction -// ============================================================================ - -// ---------------------------------------------------------------------------- -// AudioDeviceAndroidOpenSLES - ctor -// ---------------------------------------------------------------------------- - -AudioDeviceAndroidOpenSLES::AudioDeviceAndroidOpenSLES(const WebRtc_Word32 id) : - _ptrAudioBuffer(NULL), - _critSect(*CriticalSectionWrapper::CreateCriticalSection()), - _id(id), - _slEngineObject(NULL), - _slPlayer(NULL), - _slEngine(NULL), - _slPlayerPlay(NULL), - _slOutputMixObject(NULL), - _slSpeakerVolume(NULL), - _slRecorder(NULL), - _slRecorderRecord(NULL), - _slAudioIODeviceCapabilities(NULL), - _slRecorderSimpleBufferQueue(NULL), - _slMicVolume(NULL), - _micDeviceId(0), - _recQueueSeq(0), - _timeEventRec(*EventWrapper::Create()), - _ptrThreadRec(NULL), - _recThreadID(0), - _playQueueSeq(0), - _recordingDeviceIsSpecified(false), - _playoutDeviceIsSpecified(false), - _initialized(false), - _recording(false), - _playing(false), - _recIsInitialized(false), - _playIsInitialized(false), - _micIsInitialized(false), - _speakerIsInitialized(false), - _playWarning(0), - _playError(0), - _recWarning(0), - _recError(0), - _playoutDelay(0), - _recordingDelay(0), - _AGC(false), - _adbSampleRate(0), - _samplingRateIn(SL_SAMPLINGRATE_16), - _samplingRateOut(SL_SAMPLINGRATE_16), - _maxSpeakerVolume(0), - _minSpeakerVolume(0), - _loudSpeakerOn(false), - is_thread_priority_set_(false) { - WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", - __FUNCTION__); - memset(_playQueueBuffer, 0, sizeof(_playQueueBuffer)); +AudioDeviceAndroidOpenSLES::AudioDeviceAndroidOpenSLES(const WebRtc_Word32 id) + : voe_audio_buffer_(NULL), + crit_sect_(*CriticalSectionWrapper::CreateCriticalSection()), + id_(id), + sles_engine_(NULL), + sles_player_(NULL), + sles_engine_itf_(NULL), + sles_player_itf_(NULL), + sles_player_sbq_itf_(NULL), + sles_output_mixer_(NULL), + sles_speaker_volume_(NULL), + sles_recorder_(NULL), + sles_recorder_itf_(NULL), + sles_recorder_sbq_itf_(NULL), + sles_mic_volume_(NULL), + mic_dev_id_(0), + play_warning_(0), + play_error_(0), + rec_warning_(0), + rec_error_(0), + is_recording_dev_specified_(false), + is_playout_dev_specified_(false), + is_initialized_(false), + is_recording_(false), + is_playing_(false), + is_rec_initialized_(false), + is_play_initialized_(false), + is_mic_initialized_(false), + is_speaker_initialized_(false), + playout_delay_(0), + recording_delay_(0), + agc_enabled_(false), + rec_timer_(*EventWrapper::Create()), + mic_sampling_rate_(N_REC_SAMPLES_PER_SEC * 1000), + speaker_sampling_rate_(N_PLAY_SAMPLES_PER_SEC * 1000), + max_speaker_vol_(0), + min_speaker_vol_(0), + loundspeaker_on_(false) { + WEBRTC_OPENSL_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", + __FUNCTION__); + memset(rec_buf_, 0, sizeof(rec_buf_)); + memset(play_buf_, 0, sizeof(play_buf_)); } AudioDeviceAndroidOpenSLES::~AudioDeviceAndroidOpenSLES() { - WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", - __FUNCTION__); + WEBRTC_OPENSL_TRACE(kTraceMemory, kTraceAudioDevice, id_, "%s destroyed", + __FUNCTION__); - Terminate(); + Terminate(); - delete &_timeEventRec; - delete &_critSect; + delete &crit_sect_; + delete &rec_timer_; } -// ============================================================================ -// API -// ============================================================================ - void AudioDeviceAndroidOpenSLES::AttachAudioBuffer( AudioDeviceBuffer* audioBuffer) { - CriticalSectionScoped lock(&_critSect); + CriticalSectionScoped lock(&crit_sect_); - _ptrAudioBuffer = audioBuffer; + voe_audio_buffer_ = audioBuffer; - // inform the AudioBuffer about default settings for this implementation - _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); - _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC); - _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS); - _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS); + // Inform the AudioBuffer about default settings for this implementation. + voe_audio_buffer_->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); + voe_audio_buffer_->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC); + voe_audio_buffer_->SetRecordingChannels(N_REC_CHANNELS); + voe_audio_buffer_->SetPlayoutChannels(N_PLAY_CHANNELS); } WebRtc_Word32 AudioDeviceAndroidOpenSLES::ActiveAudioLayer( AudioDeviceModule::AudioLayer& audioLayer) const { - audioLayer = AudioDeviceModule::kPlatformDefaultAudio; + audioLayer = AudioDeviceModule::kPlatformDefaultAudio; - return 0; + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::Init() { + CriticalSectionScoped lock(&crit_sect_); - CriticalSectionScoped lock(&_critSect); - - if (_initialized) { - return 0; - } - - _playWarning = 0; - _playError = 0; - _recWarning = 0; - _recError = 0; - - SLEngineOption EngineOption[] = { - { (SLuint32) SL_ENGINEOPTION_THREADSAFE, (SLuint32) SL_BOOLEAN_TRUE }, - }; - WebRtc_Word32 res = slCreateEngine(&_slEngineObject, 1, EngineOption, 0, - NULL, NULL); - //WebRtc_Word32 res = slCreateEngine( &_slEngineObject, 0, NULL, 0, NULL, - // NULL); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to create SL Engine Object"); - return -1; - } - /* Realizing the SL Engine in synchronous mode. */ - if ((*_slEngineObject)->Realize(_slEngineObject, SL_BOOLEAN_FALSE) - != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to Realize SL Engine"); - return -1; - } - - if ((*_slEngineObject)->GetInterface(_slEngineObject, SL_IID_ENGINE, - (void*) &_slEngine) - != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to get SL Engine interface"); - return -1; - } - - // Check the sample rate to be used for playback and recording - if (InitSampleRate() != 0) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "%s: Failed to init samplerate", __FUNCTION__); - return -1; - } - - // Set the audio device buffer sampling rate, we assume we get the same - // for play and record - if (_ptrAudioBuffer->SetRecordingSampleRate(_adbSampleRate) < 0) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Could not set audio device buffer recording " - "sampling rate (%d)", _adbSampleRate); - } - if (_ptrAudioBuffer->SetPlayoutSampleRate(_adbSampleRate) < 0) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Could not set audio device buffer playout sampling " - "rate (%d)", _adbSampleRate); - } - - _initialized = true; - + if (is_initialized_) return 0; + + SLEngineOption EngineOption[] = { + { SL_ENGINEOPTION_THREADSAFE, static_cast(SL_BOOLEAN_TRUE) }, + }; + WebRtc_Word32 res = slCreateEngine(&sles_engine_, 1, EngineOption, 0, + NULL, NULL); + + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to create SL Engine Object"); + return -1; + } + + // Realizing the SL Engine in synchronous mode. + if ((*sles_engine_)->Realize(sles_engine_, SL_BOOLEAN_FALSE) + != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to Realize SL Engine"); + return -1; + } + + if ((*sles_engine_)->GetInterface( + sles_engine_, + SL_IID_ENGINE, + &sles_engine_itf_) != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to get SL Engine interface"); + return -1; + } + + // Check the sample rate to be used for playback and recording + if (InitSampleRate() != 0) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + "%s: Failed to init samplerate", __FUNCTION__); + return -1; + } + + // Set the audio device buffer sampling rate, we assume we get the same + // for play and record. + if (voe_audio_buffer_->SetRecordingSampleRate(mic_sampling_rate_) < 0) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Could not set mic audio device buffer " + "sampling rate (%d)", mic_sampling_rate_); + } + if (voe_audio_buffer_->SetPlayoutSampleRate(speaker_sampling_rate_) < 0) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Could not set speaker audio device buffer " + "sampling rate (%d)", speaker_sampling_rate_); + } + + is_initialized_ = true; + + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::Terminate() { + CriticalSectionScoped lock(&crit_sect_); - CriticalSectionScoped lock(&_critSect); - - if (!_initialized) { - return 0; - } - - // RECORDING - StopRecording(); - - _micIsInitialized = false; - _recordingDeviceIsSpecified = false; - - // PLAYOUT - StopPlayout(); - - if (_slEngineObject != NULL) { - (*_slEngineObject)->Destroy(_slEngineObject); - _slEngineObject = NULL; - _slEngine = NULL; - } - - _initialized = false; - + if (!is_initialized_) return 0; + + // RECORDING + StopRecording(); + + is_mic_initialized_ = false; + is_recording_dev_specified_ = false; + + // PLAYOUT + StopPlayout(); + + if (sles_engine_ != NULL) { + (*sles_engine_)->Destroy(sles_engine_); + sles_engine_ = NULL; + sles_engine_itf_ = NULL; + } + + is_initialized_ = false; + return 0; } bool AudioDeviceAndroidOpenSLES::Initialized() const { - - return (_initialized); + return (is_initialized_); } -WebRtc_Word32 AudioDeviceAndroidOpenSLES::SpeakerIsAvailable(bool& available) { +WebRtc_Word32 AudioDeviceAndroidOpenSLES::SpeakerIsAvailable( + bool& available) { + // We always assume it's available + available = true; - // We always assume it's available - available = true; - - return 0; + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::InitSpeaker() { + CriticalSectionScoped lock(&crit_sect_); - CriticalSectionScoped lock(&_critSect); + if (is_playing_) { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " Playout already started"); + return -1; + } - if (_playing) { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " Playout already started"); - return -1; - } + if (!is_playout_dev_specified_) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Playout device is not specified"); + return -1; + } - if (!_playoutDeviceIsSpecified) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Playout device is not specified"); - return -1; - } + // Nothing needs to be done here, we use a flag to have consistent + // behavior with other platforms. + is_speaker_initialized_ = true; - // Nothing needs to be done here, we use a flag to have consistent - // behavior with other platforms - _speakerIsInitialized = true; - - return 0; + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::MicrophoneIsAvailable( bool& available) { - - // We always assume it's available - available = true; - - return 0; + // We always assume it's available. + available = true; + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::InitMicrophone() { + CriticalSectionScoped lock(&crit_sect_); + if (is_recording_) { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " Recording already started"); + return -1; + } + if (!is_recording_dev_specified_) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Recording device is not specified"); + return -1; + } - CriticalSectionScoped lock(&_critSect); - - if (_recording) { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " Recording already started"); - return -1; - } - - if (!_recordingDeviceIsSpecified) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Recording device is not specified"); - return -1; - } - - // Nothing needs to be done here, we use a flag to have consistent - // behavior with other platforms - _micIsInitialized = true; - - return 0; + // Nothing needs to be done here, we use a flag to have consistent + // behavior with other platforms. + is_mic_initialized_ = true; + return 0; } bool AudioDeviceAndroidOpenSLES::SpeakerIsInitialized() const { - - return _speakerIsInitialized; + return is_speaker_initialized_; } bool AudioDeviceAndroidOpenSLES::MicrophoneIsInitialized() const { - - return _micIsInitialized; + return is_mic_initialized_; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::SpeakerVolumeIsAvailable( bool& available) { - - available = true; // We assume we are always be able to set/get volume - - return 0; + available = true; // We assume we are always be able to set/get volume. + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetSpeakerVolume( WebRtc_UWord32 volume) { + if (!is_speaker_initialized_) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Speaker not initialized"); + return -1; + } - if (!_speakerIsInitialized) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Speaker not initialized"); - return -1; - } + if (sles_engine_ == NULL) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + "SetSpeakerVolume, SL Engine object doesnt exist"); + return -1; + } - if (_slEngineObject == NULL) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "SetSpeakerVolume, SL Engine object doesnt exist"); - return -1; + if (sles_engine_itf_ == NULL) { + if ((*sles_engine_)->GetInterface( + sles_engine_, + SL_IID_ENGINE, + &sles_engine_itf_) != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to GetInterface SL Engine Interface"); + return -1; } - - if (_slEngine == NULL) { - // Get the SL Engine Interface which is implicit - if ((*_slEngineObject)->GetInterface(_slEngineObject, SL_IID_ENGINE, - (void*) &_slEngine) - != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to GetInterface SL Engine Interface"); - return -1; - } - } - return 0; + } + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::SpeakerVolume( WebRtc_UWord32& volume) const { - return 0; + return 0; } -// ---------------------------------------------------------------------------- -// SetWaveOutVolume -// ---------------------------------------------------------------------------- - WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetWaveOutVolume( - WebRtc_UWord16 /*volumeLeft*/, - WebRtc_UWord16 /*volumeRight*/) { - - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " API call not supported on this platform"); - return -1; + WebRtc_UWord16 volumeLeft, + WebRtc_UWord16 volumeRight) { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " API call not supported on this platform"); + return -1; } -// ---------------------------------------------------------------------------- -// WaveOutVolume -// ---------------------------------------------------------------------------- - WebRtc_Word32 AudioDeviceAndroidOpenSLES::WaveOutVolume( - WebRtc_UWord16& /*volumeLeft*/, - WebRtc_UWord16& /*volumeRight*/) const { - - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " API call not supported on this platform"); - return -1; + WebRtc_UWord16& volumeLeft, + WebRtc_UWord16& volumeRight) const { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " API call not supported on this platform"); + return -1; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::MaxSpeakerVolume( WebRtc_UWord32& maxVolume) const { + if (!is_speaker_initialized_) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Speaker not initialized"); + return -1; + } - if (!_speakerIsInitialized) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Speaker not initialized"); - return -1; - } + maxVolume = max_speaker_vol_; - maxVolume = _maxSpeakerVolume; - - return 0; + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::MinSpeakerVolume( WebRtc_UWord32& minVolume) const { - - if (!_speakerIsInitialized) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Speaker not initialized"); - return -1;// - } - - minVolume = _minSpeakerVolume; - - return 0; + if (!is_speaker_initialized_) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Speaker not initialized"); + return -1; + } + minVolume = min_speaker_vol_; + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::SpeakerVolumeStepSize( WebRtc_UWord16& stepSize) const { - - if (!_speakerIsInitialized) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Speaker not initialized"); - return -1; - } - stepSize = 1; - - return 0; + if (!is_speaker_initialized_) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Speaker not initialized"); + return -1; + } + stepSize = 1; + return 0; } -// ---------------------------------------------------------------------------- -// SpeakerMuteIsAvailable -// ---------------------------------------------------------------------------- - WebRtc_Word32 AudioDeviceAndroidOpenSLES::SpeakerMuteIsAvailable( bool& available) { - - available = false; // Speaker mute not supported on Android - - return 0; + available = false; // Speaker mute not supported on Android. + return 0; } -WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetSpeakerMute(bool /*enable*/) { - - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " API call not supported on this platform"); - return -1; +WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetSpeakerMute(bool enable) { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " API call not supported on this platform"); + return -1; } -WebRtc_Word32 AudioDeviceAndroidOpenSLES::SpeakerMute(bool& /*enabled*/) const { - - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " API call not supported on this platform"); - return -1; +WebRtc_Word32 AudioDeviceAndroidOpenSLES::SpeakerMute( + bool& enabled) const { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " API call not supported on this platform"); + return -1; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::MicrophoneMuteIsAvailable( bool& available) { - - available = false; // Mic mute not supported on Android - - return 0; + available = false; // Mic mute not supported on Android + return 0; } -WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetMicrophoneMute(bool /*enable*/) { - - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " API call not supported on this platform"); - return -1; +WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetMicrophoneMute(bool enable) { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " API call not supported on this platform"); + return -1; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::MicrophoneMute( - bool& /*enabled*/) const { - - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " API call not supported on this platform"); - return -1; + bool& enabled) const { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " API call not supported on this platform"); + return -1; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::MicrophoneBoostIsAvailable( bool& available) { - - available = false; // Mic boost not supported on Android - - return 0; + available = false; // Mic boost not supported on Android. + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetMicrophoneBoost(bool enable) { - - if (!_micIsInitialized) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Microphone not initialized"); - return -1; - } - - if (enable) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Enabling not available"); - return -1; - } - - return 0; + if (!is_mic_initialized_) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Microphone not initialized"); + return -1; + } + if (enable) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Enabling not available"); + return -1; + } + return 0; } -WebRtc_Word32 AudioDeviceAndroidOpenSLES::MicrophoneBoost(bool& enabled) const { - - if (!_micIsInitialized) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Microphone not initialized"); - return -1; - } - - enabled = false; - - return 0; +WebRtc_Word32 AudioDeviceAndroidOpenSLES::MicrophoneBoost( + bool& enabled) const { + if (!is_mic_initialized_) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Microphone not initialized"); + return -1; + } + enabled = false; + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::StereoRecordingIsAvailable( bool& available) { - - available = false; // Stereo recording not supported on Android - - return 0; + available = false; // Stereo recording not supported on Android. + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetStereoRecording(bool enable) { - - if (enable) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Enabling not available"); - return -1; - } - - return 0; + if (enable) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Enabling not available"); + return -1; + } + return 0; } -WebRtc_Word32 AudioDeviceAndroidOpenSLES::StereoRecording(bool& enabled) const { - - enabled = false; - - return 0; +WebRtc_Word32 AudioDeviceAndroidOpenSLES::StereoRecording( + bool& enabled) const { + enabled = false; + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::StereoPlayoutIsAvailable( bool& available) { - - available = false; // Stereo playout not supported on Android - - return 0; + // TODO(leozwang): This api is called before initplayout, we need + // to detect audio device to find out if stereo is supported or not. + available = false; + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetStereoPlayout(bool enable) { - - if (enable) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Enabling not available"); - return -1; - } - + if (enable) { return 0; + } else { + // TODO(leozwang): Enforce mono. + return 0; + } } -WebRtc_Word32 AudioDeviceAndroidOpenSLES::StereoPlayout(bool& enabled) const { - - enabled = false; - - return 0; +WebRtc_Word32 AudioDeviceAndroidOpenSLES::StereoPlayout( + bool& enabled) const { + enabled = (player_pcm_.numChannels == 2 ? true : false); + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetAGC(bool enable) { - - _AGC = enable; - - return 0; + agc_enabled_ = enable; + return 0; } bool AudioDeviceAndroidOpenSLES::AGC() const { - - return _AGC; + return agc_enabled_; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::MicrophoneVolumeIsAvailable( bool& available) { - - available = true; - - return 0; + available = true; + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetMicrophoneVolume( WebRtc_UWord32 volume) { - - if (_slEngineObject == NULL) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "SetMicrophoneVolume, SL Engine Object doesnt exist"); - return -1; - } - - /* Get the optional DEVICE VOLUME interface from the engine */ - if (_slMicVolume == NULL) { - // Get the optional DEVICE VOLUME interface from the engine - if ((*_slEngineObject)->GetInterface(_slEngineObject, - SL_IID_DEVICEVOLUME, - (void*) &_slMicVolume) - != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to create Output Mix object"); - } - } - - if (_slMicVolume != NULL) { - WebRtc_Word32 vol(0); - vol = ((volume * (_maxSpeakerVolume - _minSpeakerVolume) + - (int) (255 / 2)) / (255)) + _minSpeakerVolume; - if ((*_slMicVolume)->SetVolume(_slMicVolume, _micDeviceId, vol) - != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to create Output Mix object"); - } - } - - return 0; + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " OpenSL doesn't support contolling Mic volume yet"); + // TODO(leozwang): Add microphone volume control when OpenSL apis + // are available. + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::MicrophoneVolume( - WebRtc_UWord32& /*volume*/) const { - return -1; + WebRtc_UWord32& volume) const { + return -1; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::MaxMicrophoneVolume( - WebRtc_UWord32& /*maxVolume*/) const { - return 0; + WebRtc_UWord32& maxVolume) const { + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::MinMicrophoneVolume( WebRtc_UWord32& minVolume) const { - - minVolume = 0; - return 0; + minVolume = 0; + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::MicrophoneVolumeStepSize( WebRtc_UWord16& stepSize) const { - - stepSize = 1; - return 0; + stepSize = 1; + return 0; } WebRtc_Word16 AudioDeviceAndroidOpenSLES::PlayoutDevices() { - - return 1; + return 1; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetPlayoutDevice( WebRtc_UWord16 index) { + if (is_play_initialized_) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Playout already initialized"); + return -1; + } + if (0 != index) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Device index is out of range [0,0]"); + return -1; + } - if (_playIsInitialized) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Playout already initialized"); - return -1; - } + // Do nothing but set a flag, this is to have consistent behaviour + // with other platforms. + is_playout_dev_specified_ = true; - if (0 != index) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Device index is out of range [0,0]"); - return -1; - } - - // Do nothing but set a flag, this is to have consistent behaviour - // with other platforms - _playoutDeviceIsSpecified = true; - - return 0; + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetPlayoutDevice( - AudioDeviceModule::WindowsDeviceType /*device*/) { + AudioDeviceModule::WindowsDeviceType device) { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " API call not supported on this platform"); - return -1; + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " API call not supported on this platform"); + return -1; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::PlayoutDeviceName( WebRtc_UWord16 index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) { + if (0 != index) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Device index is out of range [0,0]"); + return -1; + } - if (0 != index) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Device index is out of range [0,0]"); - return -1; - } + // Return empty string + memset(name, 0, kAdmMaxDeviceNameSize); - // Return empty string - memset(name, 0, kAdmMaxDeviceNameSize); - - if (guid) { - memset(guid, 0, kAdmMaxGuidSize); - } - - return 0; + if (guid) { + memset(guid, 0, kAdmMaxGuidSize); + } + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::RecordingDeviceName( WebRtc_UWord16 index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) { + if (0 != index) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Device index is out of range [0,0]"); + return -1; + } - if (0 != index) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Device index is out of range [0,0]"); - return -1; - } + // Return empty string + memset(name, 0, kAdmMaxDeviceNameSize); - // Return empty string - memset(name, 0, kAdmMaxDeviceNameSize); - - if (guid) { - memset(guid, 0, kAdmMaxGuidSize); - } - - return 0; + if (guid) { + memset(guid, 0, kAdmMaxGuidSize); + } + return 0; } WebRtc_Word16 AudioDeviceAndroidOpenSLES::RecordingDevices() { - - return 1; + return 1; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetRecordingDevice( WebRtc_UWord16 index) { + if (is_rec_initialized_) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Recording already initialized"); + return -1; + } - if (_recIsInitialized) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Recording already initialized"); - return -1; - } + if (0 != index) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Device index is out of range [0,0]"); + return -1; + } - if (0 != index) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Device index is out of range [0,0]"); - return -1; - } - - // Do nothing but set a flag, this is to have consistent behaviour with - // other platforms - _recordingDeviceIsSpecified = true; - - return 0; + // Do nothing but set a flag, this is to have consistent behaviour with + // other platforms. + is_recording_dev_specified_ = true; + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetRecordingDevice( - AudioDeviceModule::WindowsDeviceType /*device*/) { - - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " API call not supported on this platform"); - return -1; + AudioDeviceModule::WindowsDeviceType device) { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " API call not supported on this platform"); + return -1; } -WebRtc_Word32 AudioDeviceAndroidOpenSLES::PlayoutIsAvailable(bool& available) { - - available = false; - - // Try to initialize the playout side - WebRtc_Word32 res = InitPlayout(); - - // Cancel effect of initialization - StopPlayout(); - - if (res != -1) { - available = true; - } - - return res; +WebRtc_Word32 AudioDeviceAndroidOpenSLES::PlayoutIsAvailable( + bool& available) { + available = false; + WebRtc_Word32 res = InitPlayout(); + StopPlayout(); + if (res != -1) { + available = true; + } + return res; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::RecordingIsAvailable( bool& available) { - - available = false; - - // Try to initialize the playout side - WebRtc_Word32 res = InitRecording(); - - // Cancel effect of initialization - StopRecording(); - - if (res != -1) { - available = true; - } - - return res; + available = false; + WebRtc_Word32 res = InitRecording(); + StopRecording(); + if (res != -1) { + available = true; + } + return res; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::InitPlayout() { + CriticalSectionScoped lock(&crit_sect_); + if (!is_initialized_) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Not initialized"); + return -1; + } - CriticalSectionScoped lock(&_critSect); + if (is_playing_) { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " Playout already started"); + return -1; + } - if (!_initialized) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Not initialized"); - return -1; - } + if (!is_playout_dev_specified_) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Playout device is not specified"); + return -1; + } - if (_playing) { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " Playout already started"); - return -1; - } - - if (!_playoutDeviceIsSpecified) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Playout device is not specified"); - return -1; - } - - if (_playIsInitialized) { - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - " Playout already initialized"); - return 0; - } - - // Initialize the speaker - if (InitSpeaker() == -1) { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " InitSpeaker() failed"); - } - - if (_slEngineObject == NULL || _slEngine == NULL) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " SLObject or Engiine is NULL"); - return -1; - } - - WebRtc_Word32 res = -1; - SLDataFormat_PCM pcm; - SLDataSource audioSource; - SLDataLocator_AndroidSimpleBufferQueue simpleBufferQueue; - SLDataSink audioSink; - SLDataLocator_OutputMix locator_outputmix; - - // Create Output Mix object to be used by player - SLInterfaceID ids[N_MAX_INTERFACES]; - SLboolean req[N_MAX_INTERFACES]; - for (unsigned int i = 0; i < N_MAX_INTERFACES; i++) { - ids[i] = SL_IID_NULL; - req[i] = SL_BOOLEAN_FALSE; - } - ids[0] = SL_IID_ENVIRONMENTALREVERB; - res = (*_slEngine)->CreateOutputMix(_slEngine, &_slOutputMixObject, 1, ids, - req); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to get SL Output Mix object"); - return -1; - } - // Realizing the Output Mix object in synchronous mode. - res = (*_slOutputMixObject)->Realize(_slOutputMixObject, SL_BOOLEAN_FALSE); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to realize SL Output Mix object"); - return -1; - } - - // The code below can be moved to startplayout instead - /* Setup the data source structure for the buffer queue */ - simpleBufferQueue.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE; - /* Two buffers in our buffer queue, to have low latency*/ - simpleBufferQueue.numBuffers = N_PLAY_QUEUE_BUFFERS; - // TODO(xians), figure out if we should support stereo playout for android - /* Setup the format of the content in the buffer queue */ - pcm.formatType = SL_DATAFORMAT_PCM; - pcm.numChannels = 1; - // _samplingRateOut is initilized in InitSampleRate() - pcm.samplesPerSec = SL_SAMPLINGRATE_16; - pcm.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16; - pcm.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16; - pcm.channelMask = SL_SPEAKER_FRONT_CENTER; - pcm.endianness = SL_BYTEORDER_LITTLEENDIAN; - audioSource.pFormat = (void *) &pcm; - audioSource.pLocator = (void *) &simpleBufferQueue; - /* Setup the data sink structure */ - locator_outputmix.locatorType = SL_DATALOCATOR_OUTPUTMIX; - locator_outputmix.outputMix = _slOutputMixObject; - audioSink.pLocator = (void *) &locator_outputmix; - audioSink.pFormat = NULL; - - // Set arrays required[] and iidArray[] for SEEK interface - // (PlayItf is implicit) - ids[0] = SL_IID_BUFFERQUEUE; - ids[1] = SL_IID_EFFECTSEND; - req[0] = SL_BOOLEAN_TRUE; - req[1] = SL_BOOLEAN_TRUE; - // Create the music player - res = (*_slEngine)->CreateAudioPlayer(_slEngine, &_slPlayer, &audioSource, - &audioSink, 2, ids, req); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to create Audio Player"); - return -1; - } - - // Realizing the player in synchronous mode. - res = (*_slPlayer)->Realize(_slPlayer, SL_BOOLEAN_FALSE); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to realize the player"); - return -1; - } - // Get seek and play interfaces - res = (*_slPlayer)->GetInterface(_slPlayer, SL_IID_PLAY, - (void*) &_slPlayerPlay); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to get Player interface"); - return -1; - } - res = (*_slPlayer)->GetInterface(_slPlayer, SL_IID_BUFFERQUEUE, - (void*) &_slPlayerSimpleBufferQueue); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to get Player Simple Buffer Queue interface"); - return -1; - } - - // Setup to receive buffer queue event callbacks - res = (*_slPlayerSimpleBufferQueue)->RegisterCallback( - _slPlayerSimpleBufferQueue, - PlayerSimpleBufferQueueCallback, - this); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to register Player Callback"); - return -1; - } - _playIsInitialized = true; + if (is_play_initialized_) { + WEBRTC_OPENSL_TRACE(kTraceInfo, kTraceAudioDevice, id_, + " Playout already initialized"); return 0; + } + + // Initialize the speaker + if (InitSpeaker() == -1) { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " InitSpeaker() failed"); + } + + if (sles_engine_ == NULL || sles_engine_itf_ == NULL) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " SLObject or Engiine is NULL"); + return -1; + } + + SLDataLocator_AndroidSimpleBufferQueue simple_buf_queue = { + SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, + static_cast(N_PLAY_QUEUE_BUFFERS) + }; + SLDataSource audio_source = { &simple_buf_queue, &player_pcm_ }; + SLDataLocator_OutputMix locator_outputmix; + SLDataSink audio_sink = { &locator_outputmix, NULL }; + + // Create Output Mix object to be used by player. + WebRtc_Word32 res = -1; + res = (*sles_engine_itf_)->CreateOutputMix(sles_engine_itf_, + &sles_output_mixer_, + 0, + NULL, + NULL); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to get SL Output Mix object"); + return -1; + } + // Realizing the Output Mix object in synchronous mode. + res = (*sles_output_mixer_)->Realize(sles_output_mixer_, SL_BOOLEAN_FALSE); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to realize SL Output Mix object"); + return -1; + } + + // The code below can be moved to startplayout instead + // Setup the data source structure for the buffer queue. + player_pcm_.formatType = SL_DATAFORMAT_PCM; + player_pcm_.numChannels = N_PLAY_CHANNELS; + if (speaker_sampling_rate_ == 44000) { + player_pcm_.samplesPerSec = 44100 * 1000; + } else { + player_pcm_.samplesPerSec = speaker_sampling_rate_ * 1000; + } + player_pcm_.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16; + player_pcm_.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16; + if (1 == player_pcm_.numChannels) { + player_pcm_.channelMask = SL_SPEAKER_FRONT_CENTER; + } else if (2 == player_pcm_.numChannels) { + player_pcm_.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT; + } else { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " %d player channels not supported", N_PLAY_CHANNELS); + } + + player_pcm_.endianness = SL_BYTEORDER_LITTLEENDIAN; + // Setup the data sink structure. + locator_outputmix.locatorType = SL_DATALOCATOR_OUTPUTMIX; + locator_outputmix.outputMix = sles_output_mixer_; + + SLInterfaceID ids[N_MAX_INTERFACES] = { + SL_IID_BUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION }; + SLboolean req[N_MAX_INTERFACES] = { + SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE }; + res = (*sles_engine_itf_)->CreateAudioPlayer(sles_engine_itf_, + &sles_player_, &audio_source, + &audio_sink, 2, ids, req); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to create AudioPlayer"); + return -1; + } + + // Realizing the player in synchronous mode. + res = (*sles_player_)->Realize(sles_player_, SL_BOOLEAN_FALSE); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to realize the player"); + return -1; + } + res = (*sles_player_)->GetInterface( + sles_player_, SL_IID_PLAY, + static_cast(&sles_player_itf_)); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to get Player interface"); + return -1; + } + res = (*sles_player_)->GetInterface( + sles_player_, SL_IID_BUFFERQUEUE, + static_cast(&sles_player_sbq_itf_)); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to get Player SimpleBufferQueue interface"); + return -1; + } + + // Setup to receive buffer queue event callbacks + res = (*sles_player_sbq_itf_)->RegisterCallback( + sles_player_sbq_itf_, + PlayerSimpleBufferQueueCallback, + this); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to register Player Callback"); + return -1; + } + is_play_initialized_ = true; + return 0; } -// ---------------------------------------------------------------------------- -// InitRecording -// ---------------------------------------------------------------------------- - WebRtc_Word32 AudioDeviceAndroidOpenSLES::InitRecording() { + CriticalSectionScoped lock(&crit_sect_); - CriticalSectionScoped lock(&_critSect); + if (!is_initialized_) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Not initialized"); + return -1; + } - if (!_initialized) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Not initialized"); - return -1; - } + if (is_recording_) { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " Recording already started"); + return -1; + } - if (_recording) { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " Recording already started"); - return -1; - } + if (!is_recording_dev_specified_) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Recording device is not specified"); + return -1; + } - if (!_recordingDeviceIsSpecified) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Recording device is not specified"); - return -1; - } - - if (_recIsInitialized) { - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - " Recording already initialized"); - return 0; - } - - // Initialize the microphone - if (InitMicrophone() == -1) { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " InitMicrophone() failed"); - } - - if (_slEngineObject == NULL || _slEngine == NULL) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Recording object is NULL"); - return -1; - } - - WebRtc_Word32 res(-1); - SLDataSource audioSource; - SLDataLocator_IODevice micLocator; - SLDataSink audioSink; - SLDataFormat_PCM pcm; - SLDataLocator_AndroidSimpleBufferQueue simpleBufferQueue; - - // Setup the data source structure - micLocator.locatorType = SL_DATALOCATOR_IODEVICE; - micLocator.deviceType = SL_IODEVICE_AUDIOINPUT; - micLocator.deviceID = SL_DEFAULTDEVICEID_AUDIOINPUT; //micDeviceID; - micLocator.device = NULL; - audioSource.pLocator = (void *) &micLocator; - audioSource.pFormat = NULL; - - /* Setup the data source structure for the buffer queue */ - simpleBufferQueue.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE; - simpleBufferQueue.numBuffers = N_REC_QUEUE_BUFFERS; - /* Setup the format of the content in the buffer queue */ - pcm.formatType = SL_DATAFORMAT_PCM; - pcm.numChannels = 1; - // _samplingRateIn is initialized in initSampleRate() - pcm.samplesPerSec = SL_SAMPLINGRATE_16; - pcm.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16; - pcm.containerSize = 16; - pcm.channelMask = SL_SPEAKER_FRONT_CENTER; - pcm.endianness = SL_BYTEORDER_LITTLEENDIAN; - audioSink.pFormat = (void *) &pcm; - audioSink.pLocator = (void *) &simpleBufferQueue; - - const SLInterfaceID id[1] = { SL_IID_ANDROIDSIMPLEBUFFERQUEUE }; - const SLboolean req[1] = { SL_BOOLEAN_TRUE }; - res = (*_slEngine)->CreateAudioRecorder(_slEngine, &_slRecorder, - &audioSource, &audioSink, 1, id, - req); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to create Recorder"); - return -1; - } - - // Realizing the recorder in synchronous mode. - res = (*_slRecorder)->Realize(_slRecorder, SL_BOOLEAN_FALSE); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to realize Recorder"); - return -1; - } - - // Get the RECORD interface - it is an implicit interface - res = (*_slRecorder)->GetInterface(_slRecorder, SL_IID_RECORD, - (void*) &_slRecorderRecord); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to get Recorder interface"); - return -1; - } - - // Get the simpleBufferQueue interface - res = (*_slRecorder)->GetInterface(_slRecorder, - SL_IID_ANDROIDSIMPLEBUFFERQUEUE, - (void*) &_slRecorderSimpleBufferQueue); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to get Recorder Simple Buffer Queue"); - return -1; - } - - // Setup to receive buffer queue event callbacks - res = (*_slRecorderSimpleBufferQueue)->RegisterCallback( - _slRecorderSimpleBufferQueue, - RecorderSimpleBufferQueueCallback, - this); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to register Recorder Callback"); - return -1; - } - - _recIsInitialized = true; + if (is_rec_initialized_) { + WEBRTC_OPENSL_TRACE(kTraceInfo, kTraceAudioDevice, id_, + " Recording already initialized"); return 0; + } + + // Initialize the microphone + if (InitMicrophone() == -1) { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " InitMicrophone() failed"); + } + + if (sles_engine_ == NULL || sles_engine_itf_ == NULL) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Recording object is NULL"); + return -1; + } + + SLDataLocator_IODevice micLocator = { + SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT, + SL_DEFAULTDEVICEID_AUDIOINPUT, NULL }; + SLDataSource audio_source = { &micLocator, NULL }; + SLDataLocator_AndroidSimpleBufferQueue simple_buf_queue = { + SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, + static_cast(N_REC_QUEUE_BUFFERS) + }; + SLDataSink audio_sink = { &simple_buf_queue, &record_pcm_ }; + + // Setup the format of the content in the buffer queue + record_pcm_.formatType = SL_DATAFORMAT_PCM; + record_pcm_.numChannels = N_REC_CHANNELS; + if (speaker_sampling_rate_ == 44000) { + record_pcm_.samplesPerSec = 44100 * 1000; + } else { + record_pcm_.samplesPerSec = speaker_sampling_rate_ * 1000; + } + record_pcm_.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16; + record_pcm_.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16; + if (1 == record_pcm_.numChannels) { + record_pcm_.channelMask = SL_SPEAKER_FRONT_CENTER; + } else if (2 == record_pcm_.numChannels) { + record_pcm_.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT; + } else { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " %d rec channels not supported", N_REC_CHANNELS); + } + record_pcm_.endianness = SL_BYTEORDER_LITTLEENDIAN; + + const SLInterfaceID id[2] = { + SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION }; + const SLboolean req[2] = { + SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE }; + WebRtc_Word32 res = -1; + res = (*sles_engine_itf_)->CreateAudioRecorder(sles_engine_itf_, + &sles_recorder_, + &audio_source, + &audio_sink, + 2, + id, + req); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to create Recorder"); + return -1; + } + + // Realizing the recorder in synchronous mode. + res = (*sles_recorder_)->Realize(sles_recorder_, SL_BOOLEAN_FALSE); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to realize Recorder"); + return -1; + } + + // Get the RECORD interface - it is an implicit interface + res = (*sles_recorder_)->GetInterface( + sles_recorder_, SL_IID_RECORD, + static_cast(&sles_recorder_itf_)); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to get Recorder interface"); + return -1; + } + + // Get the simpleBufferQueue interface + res = (*sles_recorder_)->GetInterface( + sles_recorder_, + SL_IID_ANDROIDSIMPLEBUFFERQUEUE, + static_cast(&sles_recorder_sbq_itf_)); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to get Recorder Simple Buffer Queue"); + return -1; + } + + // Setup to receive buffer queue event callbacks + res = (*sles_recorder_sbq_itf_)->RegisterCallback( + sles_recorder_sbq_itf_, + RecorderSimpleBufferQueueCallback, + this); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to register Recorder Callback"); + return -1; + } + + is_rec_initialized_ = true; + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::StartRecording() { + CriticalSectionScoped lock(&crit_sect_); - CriticalSectionScoped lock(&_critSect); - - if (!_recIsInitialized) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Recording not initialized"); - return -1; - } - - if (_recording) { - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - " Recording already started"); - return 0; - } - - if (_slRecorderRecord == NULL) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " RecordITF is NULL"); - return -1; - } - - if (_slRecorderSimpleBufferQueue == NULL) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Recorder Simple Buffer Queue is NULL"); - return -1; - } - - // Make sure the queues are empty. - assert(rec_callback_queue_.empty()); - assert(rec_available_queue_.empty()); - assert(rec_worker_queue_.empty()); - - // Reset recording buffer and put them to the available buffer queue. - memset(rec_buffer_, 0, sizeof(rec_buffer_)); // empty the queue - for (int i = 0; i < N_REC_BUFFERS; ++i) { - rec_available_queue_.push(rec_buffer_[i]); - } - - const char* threadName = "sles_audio_capture_thread"; - _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc, this, - kRealtimePriority, threadName); - if (_ptrThreadRec == NULL) - { - WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, - " failed to create the rec audio thread"); - return -1; - } - - unsigned int threadID(0); - if (!_ptrThreadRec->Start(threadID)) - { - WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, - " failed to start the rec audio thread"); - delete _ptrThreadRec; - _ptrThreadRec = NULL; - return -1; - } - _recThreadID = threadID; - _recThreadIsInitialized = true; - _recWarning = 0; - _recError = 0; - - // Enqueue N_REC_QUEUE_BUFFERS-1 zero buffers to get the ball rolling - // find out how it behaves when the sample rate is 44100 - WebRtc_Word32 res(-1); - WebRtc_UWord32 nSample10ms = _adbSampleRate / 100; - for (int i = 0; i < (N_REC_QUEUE_BUFFERS - 1); ++i) { - int8_t* buf = rec_available_queue_.front(); - rec_available_queue_.pop(); - rec_callback_queue_.push(buf); - // We assign 10ms buffer to each queue, size given in bytes. - res = (*_slRecorderSimpleBufferQueue)->Enqueue( - _slRecorderSimpleBufferQueue, - buf, - 2 * nSample10ms); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to Enqueue Empty Buffer to recorder"); - return -1; - } - } - - // Record the audio - res = (*_slRecorderRecord)->SetRecordState(_slRecorderRecord, - SL_RECORDSTATE_RECORDING); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to start recording"); - return -1; - } - _recording = true; + if (!is_rec_initialized_) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Recording not initialized"); + return -1; + } + if (is_recording_) { + WEBRTC_OPENSL_TRACE(kTraceInfo, kTraceAudioDevice, id_, + " Recording already started"); return 0; + } + + if (sles_recorder_itf_ == NULL) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " RecordITF is NULL"); + return -1; + } + + if (sles_recorder_sbq_itf_ == NULL) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Recorder Simple Buffer Queue is NULL"); + return -1; + } + + memset(rec_buf_, 0, sizeof(rec_buf_)); + memset(rec_voe_buf_, 0, sizeof(rec_voe_buf_)); + WebRtc_UWord32 num_bytes = + N_REC_CHANNELS * sizeof(int16_t) * mic_sampling_rate_ / 100; + + while (!rec_queue_.empty()) + rec_queue_.pop(); + while (!rec_voe_audio_queue_.empty()) + rec_voe_audio_queue_.pop(); + while (!rec_voe_ready_queue_.empty()) + rec_voe_ready_queue_.pop(); + + for (int i = 0; i < N_REC_QUEUE_BUFFERS; ++i) { + rec_voe_ready_queue_.push(rec_voe_buf_[i]); + } + + WebRtc_Word32 res = -1; + for (int i = 0; i < N_REC_QUEUE_BUFFERS; ++i) { + // We assign 10ms buffer to each queue, size given in bytes. + res = (*sles_recorder_sbq_itf_)->Enqueue( + sles_recorder_sbq_itf_, + static_cast(rec_buf_[i]), + num_bytes); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + "Recorder Enqueue failed:%d,%d", i, res); + break; + } else { + rec_queue_.push(rec_buf_[i]); + } + } + + // Record the audio + res = (*sles_recorder_itf_)->SetRecordState(sles_recorder_itf_, + SL_RECORDSTATE_RECORDING); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to start recording"); + return -1; + } + + // Start rec thread and playout thread + rec_thread_ = ThreadWrapper::CreateThread( + RecThreadFunc, + this, + kRealtimePriority, + "opensl_capture_thread"); + if (rec_thread_ == NULL) { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, id_, + " failed to create the rec audio thread"); + return -1; + } + + unsigned int thread_id = 0; + if (!rec_thread_->Start(thread_id)) { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, id_, + " failed to start the rec audio thread"); + delete rec_thread_; + rec_thread_ = NULL; + return -1; + } + rec_thread_id_ = thread_id; + + is_recording_ = true; + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::StopRecording() { { - CriticalSectionScoped lock(&_critSect); + CriticalSectionScoped lock(&crit_sect_); - if (!_recIsInitialized) { - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - " Recording is not initialized"); - return 0; + if (!is_rec_initialized_) { + WEBRTC_OPENSL_TRACE(kTraceInfo, kTraceAudioDevice, id_, + " Recording is not initialized"); + return 0; + } + + if ((sles_recorder_itf_ != NULL) && (sles_recorder_ != NULL)) { + WebRtc_Word32 res = (*sles_recorder_itf_)->SetRecordState( + sles_recorder_itf_, + SL_RECORDSTATE_STOPPED); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to stop recording"); + return -1; + } + res = (*sles_recorder_sbq_itf_)->Clear( + sles_recorder_sbq_itf_); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to clear recorder buffer queue"); + return -1; + } + + // Destroy the recorder object + (*sles_recorder_)->Destroy(sles_recorder_); + sles_recorder_ = NULL; + sles_recorder_itf_ = NULL; } } - // Stop the recording thread - if (_ptrThreadRec != NULL) { - bool res = _ptrThreadRec->Stop(); - if (!res) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "Failed to stop Capture thread "); + // Stop the playout thread + if (rec_thread_) { + if (rec_thread_->Stop()) { + delete rec_thread_; + rec_thread_ = NULL; } else { - delete _ptrThreadRec; - _ptrThreadRec = NULL; - _recThreadIsInitialized = false; + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, + "Failed to stop recording thread "); + return -1; } } - CriticalSectionScoped lock(&_critSect); - if ((_slRecorderRecord != NULL) && (_slRecorder != NULL)) { - // Record the audio - int res = (*_slRecorderRecord)->SetRecordState(_slRecorderRecord, - SL_RECORDSTATE_STOPPED); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to stop recording"); - return -1; - } - res = (*_slRecorderSimpleBufferQueue)->Clear(_slRecorderSimpleBufferQueue); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to clear recorder buffer queue"); - return -1; - } - - // Destroy the recorder object - (*_slRecorder)->Destroy(_slRecorder); - _slRecorder = NULL; - _slRecorderRecord = NULL; - _slRecorderRecord = NULL; - } - - _recIsInitialized = false; - _recording = false; - _recWarning = 0; - _recError = 0; - is_thread_priority_set_ = false; - - // Clear the callback queue. - while(!rec_callback_queue_.empty()) - rec_callback_queue_.pop(); - - // Clear the available buffer queue. - while(!rec_available_queue_.empty()) - rec_available_queue_.pop(); - - // Clear the buffer queue. - while(!rec_worker_queue_.empty()) - rec_worker_queue_.pop(); + CriticalSectionScoped lock(&crit_sect_); + is_rec_initialized_ = false; + is_recording_ = false; + rec_warning_ = 0; + rec_error_ = 0; return 0; } bool AudioDeviceAndroidOpenSLES::RecordingIsInitialized() const { - - return _recIsInitialized; + return is_rec_initialized_; } - bool AudioDeviceAndroidOpenSLES::Recording() const { - - return _recording; + return is_recording_; } bool AudioDeviceAndroidOpenSLES::PlayoutIsInitialized() const { - - return _playIsInitialized; + return is_play_initialized_; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::StartPlayout() { + int i; + CriticalSectionScoped lock(&crit_sect_); - CriticalSectionScoped lock(&_critSect); - - if (!_playIsInitialized) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " Playout not initialized"); - return -1; - } - - if (_playing) { - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - " Playout already started"); - return 0; - } - - if (_slPlayerPlay == NULL) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " PlayItf is NULL"); - return -1; - } - if (_slPlayerSimpleBufferQueue == NULL) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " PlayerSimpleBufferQueue is NULL"); - return -1; - } - - _recQueueSeq = 0; - - WebRtc_Word32 res(-1); - /* Enqueue a set of zero buffers to get the ball rolling */ - WebRtc_UWord32 nSample10ms = _adbSampleRate / 100; - WebRtc_Word8 playBuffer[2 * nSample10ms]; - WebRtc_UWord32 noSamplesOut(0); - { - noSamplesOut = _ptrAudioBuffer->RequestPlayoutData(nSample10ms); - //Lock(); - // Get data from Audio Device Buffer - noSamplesOut = _ptrAudioBuffer->GetPlayoutData(playBuffer); - // Insert what we have in data buffer - memcpy(_playQueueBuffer[_playQueueSeq], playBuffer, 2 * noSamplesOut); - //UnLock(); - - //WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - // "_playQueueSeq (%u) noSamplesOut (%d)", _playQueueSeq, - //noSamplesOut); - // write the buffer data we got from VoE into the device - res = (*_slPlayerSimpleBufferQueue)->Enqueue( - _slPlayerSimpleBufferQueue, - (void*) _playQueueBuffer[_playQueueSeq], - 2 * noSamplesOut); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " player simpler buffer queue Enqueue failed, %d", - noSamplesOut); - //return ; dong return - } - _playQueueSeq = (_playQueueSeq + 1) % N_PLAY_QUEUE_BUFFERS; - } - - // Play the PCM samples using a buffer queue - res = (*_slPlayerPlay)->SetPlayState(_slPlayerPlay, SL_PLAYSTATE_PLAYING); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to start playout"); - return -1; - } - - _playWarning = 0; - _playError = 0; - _playing = true; + if (!is_play_initialized_) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Playout not initialized"); + return -1; + } + if (is_playing_) { + WEBRTC_OPENSL_TRACE(kTraceInfo, kTraceAudioDevice, id_, + " Playout already started"); return 0; + } + + if (sles_player_itf_ == NULL) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " PlayItf is NULL"); + return -1; + } + if (sles_player_sbq_itf_ == NULL) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " PlayerSimpleBufferQueue is NULL"); + return -1; + } + + WebRtc_UWord32 num_bytes = + N_PLAY_CHANNELS * sizeof(int16_t) * speaker_sampling_rate_ / 100; + + memset(play_buf_, 0, sizeof(play_buf_)); + + while (!play_queue_.empty()) + play_queue_.pop(); + + WebRtc_Word32 res = -1; + for (i = 0; i < std::min(2, static_cast(N_PLAY_QUEUE_BUFFERS)); ++i) { + res = (*sles_player_sbq_itf_)->Enqueue( + sles_player_sbq_itf_, + static_cast(play_buf_[i]), + num_bytes); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " player simpler buffer Enqueue failed:%d,%d", + i, res); + break; + } else { + play_queue_.push(play_buf_[i]); + } + } + + res = (*sles_player_itf_)->SetPlayState( + sles_player_itf_, SL_PLAYSTATE_PLAYING); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to start playout"); + return -1; + } + + play_warning_ = 0; + play_error_ = 0; + is_playing_ = true; + + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::StopPlayout() { - - CriticalSectionScoped lock(&_critSect); - - if (!_playIsInitialized) { - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - " Playout is not initialized"); - return 0; + { + CriticalSectionScoped lock(&crit_sect_); + if (!is_play_initialized_) { + WEBRTC_OPENSL_TRACE(kTraceInfo, kTraceAudioDevice, id_, + " Playout is not initialized"); + return 0; } - if ((_slPlayerPlay != NULL) && (_slOutputMixObject == NULL) && (_slPlayer - == NULL)) { - // Make sure player is stopped - WebRtc_Word32 res = - (*_slPlayerPlay)->SetPlayState(_slPlayerPlay, - SL_PLAYSTATE_STOPPED); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to stop playout"); - return -1; - } - res = (*_slPlayerSimpleBufferQueue)->Clear(_slPlayerSimpleBufferQueue); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to clear recorder buffer queue"); - return -1; - } + if (!sles_player_itf_ && !sles_output_mixer_ && !sles_player_) { + // Make sure player is stopped + WebRtc_Word32 res = + (*sles_player_itf_)->SetPlayState(sles_player_itf_, + SL_PLAYSTATE_STOPPED); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to stop playout"); + return -1; + } + res = (*sles_player_sbq_itf_)->Clear(sles_player_sbq_itf_); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " failed to clear player buffer queue"); + return -1; + } - // Destroy the player - (*_slPlayer)->Destroy(_slPlayer); - // Destroy Output Mix object - (*_slOutputMixObject)->Destroy(_slOutputMixObject); - _slPlayer = NULL; - _slPlayerPlay = NULL; - _slPlayerSimpleBufferQueue = NULL; - _slOutputMixObject = NULL; + // Destroy the player + (*sles_player_)->Destroy(sles_player_); + // Destroy Output Mix object + (*sles_output_mixer_)->Destroy(sles_output_mixer_); + sles_player_ = NULL; + sles_player_itf_ = NULL; + sles_player_sbq_itf_ = NULL; + sles_output_mixer_ = NULL; } + } - _playIsInitialized = false; - _playing = false; - _playWarning = 0; - _playError = 0; - _playQueueSeq = 0; + CriticalSectionScoped lock(&crit_sect_); + is_play_initialized_ = false; + is_playing_ = false; + play_warning_ = 0; + play_error_ = 0; - return 0; + return 0; } -WebRtc_Word32 AudioDeviceAndroidOpenSLES::PlayoutDelay(WebRtc_UWord16& delayMS) const { - delayMS = _playoutDelay; - - return 0; +WebRtc_Word32 AudioDeviceAndroidOpenSLES::PlayoutDelay( + WebRtc_UWord16& delayMS) const { + delayMS = playout_delay_; + return 0; } -WebRtc_Word32 AudioDeviceAndroidOpenSLES::RecordingDelay(WebRtc_UWord16& delayMS) const { - delayMS = _recordingDelay; - - return 0; +WebRtc_Word32 AudioDeviceAndroidOpenSLES::RecordingDelay( + WebRtc_UWord16& delayMS) const { + delayMS = recording_delay_; + return 0; } bool AudioDeviceAndroidOpenSLES::Playing() const { - - return _playing; + return is_playing_; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetPlayoutBuffer( - const AudioDeviceModule::BufferType /*type*/, - WebRtc_UWord16 /*sizeMS*/) { - - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " API call not supported on this platform"); - return -1; + const AudioDeviceModule::BufferType type, + WebRtc_UWord16 sizeMS) { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " API call not supported on this platform"); + return -1; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::PlayoutBuffer( AudioDeviceModule::BufferType& type, WebRtc_UWord16& sizeMS) const { - - type = AudioDeviceModule::kAdaptiveBufferSize; - sizeMS = _playoutDelay; // Set to current playout delay - - return 0; + type = AudioDeviceModule::kAdaptiveBufferSize; + sizeMS = playout_delay_; // Set to current playout delay + return 0; } -WebRtc_Word32 AudioDeviceAndroidOpenSLES::CPULoad(WebRtc_UWord16& /*load*/) const { - - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " API call not supported on this platform"); - return -1; +WebRtc_Word32 AudioDeviceAndroidOpenSLES::CPULoad( + WebRtc_UWord16& load) const { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " API call not supported on this platform"); + return -1; } bool AudioDeviceAndroidOpenSLES::PlayoutWarning() const { - return (_playWarning > 0); + return (play_warning_ > 0); } bool AudioDeviceAndroidOpenSLES::PlayoutError() const { - return (_playError > 0); + return (play_error_ > 0); } bool AudioDeviceAndroidOpenSLES::RecordingWarning() const { - return (_recWarning > 0); + return (rec_warning_ > 0); } bool AudioDeviceAndroidOpenSLES::RecordingError() const { - return (_recError > 0); + return (rec_error_ > 0); } void AudioDeviceAndroidOpenSLES::ClearPlayoutWarning() { - _playWarning = 0; + play_warning_ = 0; } void AudioDeviceAndroidOpenSLES::ClearPlayoutError() { - _playError = 0; + play_error_ = 0; } void AudioDeviceAndroidOpenSLES::ClearRecordingWarning() { - _recWarning = 0; + rec_warning_ = 0; } void AudioDeviceAndroidOpenSLES::ClearRecordingError() { - _recError = 0; + rec_error_ = 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::SetLoudspeakerStatus(bool enable) { - _loudSpeakerOn = enable; - return 0; + loundspeaker_on_ = enable; + return 0; } WebRtc_Word32 AudioDeviceAndroidOpenSLES::GetLoudspeakerStatus( bool& enabled) const { - - enabled = _loudSpeakerOn; - return 0; + enabled = loundspeaker_on_; + return 0; } -// ============================================================================ -// Private Methods -// ============================================================================ - void AudioDeviceAndroidOpenSLES::PlayerSimpleBufferQueueCallback( - SLAndroidSimpleBufferQueueItf queueItf, - void *pContext) { - AudioDeviceAndroidOpenSLES* ptrThis = - static_cast (pContext); - ptrThis->PlayerSimpleBufferQueueCallbackHandler(queueItf); + SLAndroidSimpleBufferQueueItf queue_itf, + void* p_context) { + AudioDeviceAndroidOpenSLES* audio_device = + static_cast (p_context); + audio_device->PlayerSimpleBufferQueueCallbackHandler(queue_itf); } void AudioDeviceAndroidOpenSLES::PlayerSimpleBufferQueueCallbackHandler( - SLAndroidSimpleBufferQueueItf queueItf) { - WebRtc_Word32 res; - //Lock(); - //WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - //"_playQueueSeq (%u)", _playQueueSeq); - if (_playing && (_playQueueSeq < N_PLAY_QUEUE_BUFFERS)) { - //WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, - //_id, "playout callback "); - unsigned int noSamp10ms = _adbSampleRate / 100; - // Max 10 ms @ samplerate kHz / 16 bit - WebRtc_Word8 playBuffer[2 * noSamp10ms]; - int noSamplesOut = 0; + SLAndroidSimpleBufferQueueItf queue_itf) { + if (is_playing_) { + const unsigned int num_samples = speaker_sampling_rate_ / 100; + const unsigned int num_bytes = + N_PLAY_CHANNELS * num_samples * sizeof(int16_t); + WebRtc_Word8 buf[PLAY_MAX_TEMP_BUF_SIZE_PER_10ms]; + WebRtc_Word8* audio; - // Assumption for implementation - // assert(PLAYBUFSIZESAMPLES == noSamp10ms); + audio = play_queue_.front(); + play_queue_.pop(); - // TODO(xians), update the playout delay - //UnLock(); - - noSamplesOut = _ptrAudioBuffer->RequestPlayoutData(noSamp10ms); - //Lock(); - // Get data from Audio Device Buffer - noSamplesOut = _ptrAudioBuffer->GetPlayoutData(playBuffer); - // Cast OK since only equality comparison - if (noSamp10ms != (unsigned int) noSamplesOut) { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - "noSamp10ms (%u) != noSamplesOut (%d)", noSamp10ms, - noSamplesOut); - - if (_playWarning > 0) { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " Pending play warning exists"); - } - _playWarning = 1; - } - // Insert what we have in data buffer - memcpy(_playQueueBuffer[_playQueueSeq], playBuffer, 2 * noSamplesOut); - //UnLock(); - - //WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - //"_playQueueSeq (%u) noSamplesOut (%d)", _playQueueSeq, noSamplesOut); - // write the buffer data we got from VoE into the device - res = (*_slPlayerSimpleBufferQueue)->Enqueue( - _slPlayerSimpleBufferQueue, - _playQueueBuffer[_playQueueSeq], - 2 * noSamplesOut); - if (res != SL_RESULT_SUCCESS) { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - " player simpler buffer queue Enqueue failed, %d", - noSamplesOut); - return; - } - // update the playout delay - UpdatePlayoutDelay(noSamplesOut); - // update the play buffer sequency - _playQueueSeq = (_playQueueSeq + 1) % N_PLAY_QUEUE_BUFFERS; + int num_out = voe_audio_buffer_->RequestPlayoutData(num_samples); + num_out = voe_audio_buffer_->GetPlayoutData(buf); + if (num_samples != static_cast(num_out)) { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + "num (%u) != num_out (%d)", num_samples, num_out); + play_warning_ = 1; } + memcpy(audio, buf, num_bytes); + UpdatePlayoutDelay(num_out); + + int res = (*queue_itf)->Enqueue(queue_itf, + audio, + num_bytes); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " player callback Enqueue failed, %d", res); + play_warning_ = 1; + } else { + play_queue_.push(audio); + } + } +} + +bool AudioDeviceAndroidOpenSLES::RecThreadFunc(void* context) { + return (static_cast( + context)->RecThreadFuncImpl()); } void AudioDeviceAndroidOpenSLES::RecorderSimpleBufferQueueCallback( - SLAndroidSimpleBufferQueueItf queueItf, - void *pContext) { - AudioDeviceAndroidOpenSLES* ptrThis = - static_cast (pContext); - ptrThis->RecorderSimpleBufferQueueCallbackHandler(queueItf); + SLAndroidSimpleBufferQueueItf queue_itf, + void* p_context) { + AudioDeviceAndroidOpenSLES* audio_device = + static_cast(p_context); + audio_device->RecorderSimpleBufferQueueCallbackHandler(queue_itf); } -void AudioDeviceAndroidOpenSLES::RecorderSimpleBufferQueueCallbackHandler( - SLAndroidSimpleBufferQueueItf queueItf) { - if (_recording) { - const unsigned int samples_10_ms = _adbSampleRate / 100; +bool AudioDeviceAndroidOpenSLES::RecThreadFuncImpl() { + if (is_recording_) { + // TODO(leozwang): Add seting correct scheduling and thread priority. + + const unsigned int num_samples = mic_sampling_rate_ / 100; + const unsigned int num_bytes = + N_REC_CHANNELS * num_samples * sizeof(int16_t); + const unsigned int total_bytes = num_bytes; + WebRtc_Word8 buf[REC_MAX_TEMP_BUF_SIZE_PER_10ms]; - // Move the buffer from the callback queue to buffer queue so that VoE can - // process the data in RecThreadProcess(). - int8_t* buf = rec_callback_queue_.front(); - rec_callback_queue_.pop(); - int8_t* new_buf = NULL; { - // |rec_available_queue_| and |rec_worker_queue_| are accessed by - // callback thread and recording thread, so we need a lock here to - // protect them. - CriticalSectionScoped lock(&_critSect); - if (!rec_available_queue_.empty()) { - // Put the data to buffer queue for VoE to process the data. - rec_worker_queue_.push(buf); - new_buf = rec_available_queue_.front(); - rec_available_queue_.pop(); - // TODO(xians): Remove the following test code once we are sure it - // won't happen anymore. - if (rec_worker_queue_.size() > 10) { - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, - "Number of buffers pending in the recording thread" - " has been increased to %d", rec_worker_queue_.size()); - } - } else { - // Didn't find an empty buffer, probably VoE is slowed on processing - // the data. Put the buffer back to the callback queue so that we can - // keep the recording rolling. But this means we are losing 10ms data. - // TODO(xians): Enlarge the buffer instead of dropping data? - new_buf = buf; - - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - "No available buffer slot in |rec_available_queue_|" - " It will lose 10ms data"); - _recWarning = 1; + CriticalSectionScoped lock(&crit_sect_); + if (rec_voe_audio_queue_.size() <= 0) { + rec_timer_.Wait(1); + return true; } + + WebRtc_Word8* audio = rec_voe_audio_queue_.front(); + rec_voe_audio_queue_.pop(); + memcpy(buf, audio, total_bytes); + memset(audio, 0, total_bytes); + rec_voe_ready_queue_.push(audio); } - // Clear the new buffer and enqueue for new data. - memset(new_buf, 0, 2 * REC_BUF_SIZE_IN_SAMPLES); - rec_callback_queue_.push(new_buf); - if (SL_RESULT_SUCCESS != (*_slRecorderSimpleBufferQueue)->Enqueue( - _slRecorderSimpleBufferQueue, - static_cast(new_buf), - 2 * samples_10_ms)) { - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, - "Failed on Enqueue()"); - _recWarning = 1; - } - - // wake up the recording thread - _timeEventRec.Set(); - } -} - -void AudioDeviceAndroidOpenSLES::CheckErr(SLresult res) { - if (res != SL_RESULT_SUCCESS) { - // Debug printing to be placed here - exit(-1); - } -} - -void AudioDeviceAndroidOpenSLES::UpdatePlayoutDelay( - WebRtc_UWord32 nSamplePlayed) { - // currently just do some simple calculation, should we setup a timer for - // the callback to have a more accurate delay - // Android CCD asks for 10ms as the maximum warm output latency, so we - // simply add (nPlayQueueBuffer -1 + 0.5)*10ms - // This playout delay should be seldom changed - _playoutDelay = (N_PLAY_QUEUE_BUFFERS - 0.5) * 10 + N_PLAY_QUEUE_BUFFERS - * nSamplePlayed / (_adbSampleRate / 1000); -} - -void AudioDeviceAndroidOpenSLES::UpdateRecordingDelay() { - // Android CCD asks for 10ms as the maximum warm input latency, - // so we simply add 10ms - int max_warm_input_latency = 10; - int samples_per_queue_in_ms = 10; - _recordingDelay = max_warm_input_latency + ((rec_worker_queue_.size() + - N_REC_QUEUE_BUFFERS) * samples_per_queue_in_ms); -} - -WebRtc_Word32 AudioDeviceAndroidOpenSLES::InitSampleRate() { - if (_slEngineObject == NULL) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " SL Object is NULL"); - return -1; - } - - _samplingRateIn = SL_SAMPLINGRATE_16; - _samplingRateOut = SL_SAMPLINGRATE_16; - _adbSampleRate = 16000; - - WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id, - " sample rate set to (%d)", _adbSampleRate); - return 0; - -} - -// ============================================================================ -// Thread Methods -// ============================================================================ - -bool AudioDeviceAndroidOpenSLES::RecThreadFunc(void* pThis) { - return (static_cast(pThis)->RecThreadProcess()); -} - -bool AudioDeviceAndroidOpenSLES::RecThreadProcess() { - if (!is_thread_priority_set_) { - // TODO(xians): Move the thread setting code to thread_posix.cc. Figure out - // if we should raise the priority to THREAD_PRIORITY_URGENT_AUDIO(-19). - int nice_value = -16; // THREAD_PRIORITY_AUDIO in Android. - if (setpriority(PRIO_PROCESS, syscall(__NR_gettid), nice_value)) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1, - "Failed to set nice value of thread to %d ", nice_value); - } - - is_thread_priority_set_ = true; - } - - // Wait for 12ms for the signal from device callback. In case no callback - // comes in 12ms, we check the buffer anyway. - _timeEventRec.Wait(12); - - const unsigned int noSamp10ms = _adbSampleRate / 100; - bool buffer_available = true; - while (buffer_available) { - { - CriticalSectionScoped lock(&_critSect); - if (rec_worker_queue_.empty()) - break; - - // Release the buffer from the |rec_worker_queue_| and pass the data to - // VoE. - int8_t* buf = rec_worker_queue_.front(); - rec_worker_queue_.pop(); - buffer_available = !rec_worker_queue_.empty(); - // Set the recorded buffer. - _ptrAudioBuffer->SetRecordedBuffer(buf, noSamp10ms); - - // Put the free buffer to |rec_available_queue_|. - rec_available_queue_.push(buf); - - // Update the recording delay. - UpdateRecordingDelay(); - } - - // Set VQE info, use clockdrift == 0 - _ptrAudioBuffer->SetVQEData(_playoutDelay, _recordingDelay, 0); - - // Deliver recorded samples at specified sample rate, mic level - // etc. to the observer using callback. - _ptrAudioBuffer->DeliverRecordedData(); + UpdateRecordingDelay(); + voe_audio_buffer_->SetRecordedBuffer(buf, num_samples); + voe_audio_buffer_->SetVQEData(playout_delay_, recording_delay_, 0); + voe_audio_buffer_->DeliverRecordedData(); } return true; } -} // namespace webrtc +void AudioDeviceAndroidOpenSLES::RecorderSimpleBufferQueueCallbackHandler( + SLAndroidSimpleBufferQueueItf queue_itf) { + if (is_recording_) { + const unsigned int num_samples = mic_sampling_rate_ / 100; + const unsigned int num_bytes = + N_REC_CHANNELS * num_samples * sizeof(int16_t); + const unsigned int total_bytes = num_bytes; + WebRtc_Word8* audio; + + { + CriticalSectionScoped lock(&crit_sect_); + audio = rec_queue_.front(); + rec_queue_.pop(); + rec_voe_audio_queue_.push(audio); + + if (rec_voe_ready_queue_.size() <= 0) { + // Log Error. + rec_error_ = 1; + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " Audio Rec thread buffers underrun"); + } else { + audio = rec_voe_ready_queue_.front(); + rec_voe_ready_queue_.pop(); + } + } + + WebRtc_Word32 res = (*queue_itf)->Enqueue(queue_itf, + audio, + total_bytes); + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_, + " recorder callback Enqueue failed, %d", res); + rec_warning_ = 1; + return; + } else { + rec_queue_.push(audio); + } + + // TODO(leozwang): OpenSL ES doesn't support AudioRecorder + // volume control now, add it when it's ready. + } +} + +void AudioDeviceAndroidOpenSLES::CheckErr(SLresult res) { + if (res != SL_RESULT_SUCCESS) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " AudioDeviceAndroidOpenSLES::CheckErr(%d)", res); + exit(-1); + } +} + +void AudioDeviceAndroidOpenSLES::UpdatePlayoutDelay( + WebRtc_UWord32 nSamplePlayed) { + // TODO(leozwang): Add accurate delay estimat. + playout_delay_ = (N_PLAY_QUEUE_BUFFERS - 0.5) * 10 + + N_PLAY_QUEUE_BUFFERS * nSamplePlayed / (speaker_sampling_rate_ / 1000); +} + +void AudioDeviceAndroidOpenSLES::UpdateRecordingDelay() { + // TODO(leozwang): Add accurate delay estimat. + recording_delay_ = 10; + const WebRtc_UWord32 noSamp10ms = mic_sampling_rate_ / 100; + recording_delay_ += (N_REC_QUEUE_BUFFERS * noSamp10ms) / + (mic_sampling_rate_ / 1000); +} + +WebRtc_Word32 AudioDeviceAndroidOpenSLES::InitSampleRate() { + if (sles_engine_ == NULL) { + WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_, + " SL Object is NULL"); + return -1; + } + + mic_sampling_rate_ = N_REC_SAMPLES_PER_SEC; + speaker_sampling_rate_ = N_PLAY_SAMPLES_PER_SEC; + + WEBRTC_OPENSL_TRACE(kTraceStateInfo, kTraceAudioDevice, id_, + " mic sample rate (%d), speaker sample rate (%d)", + mic_sampling_rate_, speaker_sampling_rate_); + return 0; +} + +} // namespace webrtc diff --git a/webrtc/modules/audio_device/android/audio_device_opensles_android.h b/webrtc/modules/audio_device/android/audio_device_opensles_android.h index fabdc124fb..2b2d746380 100644 --- a/webrtc/modules/audio_device/android/audio_device_opensles_android.h +++ b/webrtc/modules/audio_device/android/audio_device_opensles_android.h @@ -8,14 +8,10 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_OPENSLES_ANDROID_H -#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_OPENSLES_ANDROID_H +#ifndef SRC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_ +#define SRC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_ -#include "audio_device_generic.h" -#include "critical_section_wrapper.h" - -#include // For accessing AudioDeviceAndroid.java -#include +#include #include #include @@ -23,294 +19,299 @@ #include #include -namespace webrtc -{ +#include + +#include "modules/audio_device/audio_device_generic.h" +#include "system_wrappers/interface/critical_section_wrapper.h" + +namespace webrtc { + class EventWrapper; const WebRtc_UWord32 N_MAX_INTERFACES = 3; const WebRtc_UWord32 N_MAX_OUTPUT_DEVICES = 6; const WebRtc_UWord32 N_MAX_INPUT_DEVICES = 3; -const WebRtc_UWord32 N_REC_SAMPLES_PER_SEC = 16000;//44000; // Default fs -const WebRtc_UWord32 N_PLAY_SAMPLES_PER_SEC = 16000;//44000; // Default fs +const WebRtc_UWord32 N_REC_SAMPLES_PER_SEC = 16000; // Default fs +const WebRtc_UWord32 N_PLAY_SAMPLES_PER_SEC = 16000; // Default fs -const WebRtc_UWord32 N_REC_CHANNELS = 1; // default is mono recording -const WebRtc_UWord32 N_PLAY_CHANNELS = 1; // default is mono playout +const WebRtc_UWord32 N_REC_CHANNELS = 1; +const WebRtc_UWord32 N_PLAY_CHANNELS = 1; -const WebRtc_UWord32 REC_BUF_SIZE_IN_SAMPLES = 480; // Handle max 10 ms @ 48 kHz +const WebRtc_UWord32 REC_BUF_SIZE_IN_SAMPLES = 480; const WebRtc_UWord32 PLAY_BUF_SIZE_IN_SAMPLES = 480; +const WebRtc_UWord32 REC_MAX_TEMP_BUF_SIZE_PER_10ms = + N_REC_CHANNELS * REC_BUF_SIZE_IN_SAMPLES * sizeof(int16_t); + +const WebRtc_UWord32 PLAY_MAX_TEMP_BUF_SIZE_PER_10ms = + N_PLAY_CHANNELS * PLAY_BUF_SIZE_IN_SAMPLES * sizeof(int16_t); + // Number of the buffers in playout queue -const WebRtc_UWord16 N_PLAY_QUEUE_BUFFERS = 2; +const WebRtc_UWord16 N_PLAY_QUEUE_BUFFERS = 8; // Number of buffers in recording queue -const WebRtc_UWord16 N_REC_QUEUE_BUFFERS = 2; -// Number of 10 ms recording blocks in rec buffer -const WebRtc_UWord16 N_REC_BUFFERS = 20; +// TODO(xian): Reduce the numbers of buffers to improve the latency. +const WebRtc_UWord16 N_REC_QUEUE_BUFFERS = 8; +// Some values returned from getMinBufferSize +// (Nexus S playout 72ms, recording 64ms) +// (Galaxy, 167ms, 44ms) +// (Nexus 7, 72ms, 48ms) +// (Xoom 92ms, 40ms) class ThreadWrapper; -class AudioDeviceAndroidOpenSLES: public AudioDeviceGeneric -{ -public: - AudioDeviceAndroidOpenSLES(const WebRtc_Word32 id); - ~AudioDeviceAndroidOpenSLES(); +class AudioDeviceAndroidOpenSLES: public AudioDeviceGeneric { + public: + explicit AudioDeviceAndroidOpenSLES(const WebRtc_Word32 id); + ~AudioDeviceAndroidOpenSLES(); - // Retrieve the currently utilized audio layer - virtual WebRtc_Word32 - ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const; + // Retrieve the currently utilized audio layer + virtual WebRtc_Word32 + ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const; // NOLINT - // Main initializaton and termination - virtual WebRtc_Word32 Init(); - virtual WebRtc_Word32 Terminate(); - virtual bool Initialized() const; + // Main initializaton and termination + virtual WebRtc_Word32 Init(); + virtual WebRtc_Word32 Terminate(); + virtual bool Initialized() const; - // Device enumeration - virtual WebRtc_Word16 PlayoutDevices(); - virtual WebRtc_Word16 RecordingDevices(); - virtual WebRtc_Word32 - PlayoutDeviceName(WebRtc_UWord16 index, - char name[kAdmMaxDeviceNameSize], - char guid[kAdmMaxGuidSize]); - virtual WebRtc_Word32 - RecordingDeviceName(WebRtc_UWord16 index, - char name[kAdmMaxDeviceNameSize], - char guid[kAdmMaxGuidSize]); + // Device enumeration + virtual WebRtc_Word16 PlayoutDevices(); + virtual WebRtc_Word16 RecordingDevices(); + virtual WebRtc_Word32 + PlayoutDeviceName(WebRtc_UWord16 index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]); + virtual WebRtc_Word32 + RecordingDeviceName(WebRtc_UWord16 index, + char name[kAdmMaxDeviceNameSize], + char guid[kAdmMaxGuidSize]); - // Device selection - virtual WebRtc_Word32 SetPlayoutDevice(WebRtc_UWord16 index); - virtual WebRtc_Word32 - SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device); - virtual WebRtc_Word32 SetRecordingDevice(WebRtc_UWord16 index); - virtual WebRtc_Word32 - SetRecordingDevice(AudioDeviceModule::WindowsDeviceType device); + // Device selection + virtual WebRtc_Word32 SetPlayoutDevice(WebRtc_UWord16 index); + virtual WebRtc_Word32 + SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device); + virtual WebRtc_Word32 SetRecordingDevice(WebRtc_UWord16 index); + virtual WebRtc_Word32 + SetRecordingDevice(AudioDeviceModule::WindowsDeviceType device); - // Audio transport initialization - virtual WebRtc_Word32 PlayoutIsAvailable(bool& available); - virtual WebRtc_Word32 InitPlayout(); - virtual bool PlayoutIsInitialized() const; - virtual WebRtc_Word32 RecordingIsAvailable(bool& available); - virtual WebRtc_Word32 InitRecording(); - virtual bool RecordingIsInitialized() const; + // Audio transport initialization + virtual WebRtc_Word32 PlayoutIsAvailable(bool& available); // NOLINT + virtual WebRtc_Word32 InitPlayout(); + virtual bool PlayoutIsInitialized() const; + virtual WebRtc_Word32 RecordingIsAvailable(bool& available); // NOLINT + virtual WebRtc_Word32 InitRecording(); + virtual bool RecordingIsInitialized() const; - // Audio transport control - virtual WebRtc_Word32 StartPlayout(); - virtual WebRtc_Word32 StopPlayout(); - virtual bool Playing() const; - virtual WebRtc_Word32 StartRecording(); - virtual WebRtc_Word32 StopRecording(); - virtual bool Recording() const; + // Audio transport control + virtual WebRtc_Word32 StartPlayout(); + virtual WebRtc_Word32 StopPlayout(); + virtual bool Playing() const; + virtual WebRtc_Word32 StartRecording(); + virtual WebRtc_Word32 StopRecording(); + virtual bool Recording() const; - // Microphone Automatic Gain Control (AGC) - virtual WebRtc_Word32 SetAGC(bool enable); - virtual bool AGC() const; + // Microphone Automatic Gain Control (AGC) + virtual WebRtc_Word32 SetAGC(bool enable); + virtual bool AGC() const; - // Volume control based on the Windows Wave API (Windows only) - virtual WebRtc_Word32 SetWaveOutVolume(WebRtc_UWord16 volumeLeft, - WebRtc_UWord16 volumeRight); - virtual WebRtc_Word32 WaveOutVolume(WebRtc_UWord16& volumeLeft, - WebRtc_UWord16& volumeRight) const; + // Volume control based on the Windows Wave API (Windows only) + virtual WebRtc_Word32 SetWaveOutVolume(WebRtc_UWord16 volumeLeft, + WebRtc_UWord16 volumeRight); + virtual WebRtc_Word32 WaveOutVolume( + WebRtc_UWord16& volumeLeft, // NOLINT + WebRtc_UWord16& volumeRight) const; // NOLINT - // Audio mixer initialization - virtual WebRtc_Word32 SpeakerIsAvailable(bool& available); - virtual WebRtc_Word32 InitSpeaker(); - virtual bool SpeakerIsInitialized() const; - SLPlayItf playItf; - virtual WebRtc_Word32 MicrophoneIsAvailable(bool& available); - virtual WebRtc_Word32 InitMicrophone(); - virtual bool MicrophoneIsInitialized() const; + // Audio mixer initialization + virtual WebRtc_Word32 SpeakerIsAvailable(bool& available); // NOLINT + virtual WebRtc_Word32 InitSpeaker(); + virtual bool SpeakerIsInitialized() const; + virtual WebRtc_Word32 MicrophoneIsAvailable( + bool& available); + virtual WebRtc_Word32 InitMicrophone(); + virtual bool MicrophoneIsInitialized() const; - // Speaker volume controls - virtual WebRtc_Word32 SpeakerVolumeIsAvailable(bool& available); - virtual WebRtc_Word32 SetSpeakerVolume(WebRtc_UWord32 volume); - virtual WebRtc_Word32 SpeakerVolume(WebRtc_UWord32& volume) const; - virtual WebRtc_Word32 MaxSpeakerVolume(WebRtc_UWord32& maxVolume) const; - virtual WebRtc_Word32 MinSpeakerVolume(WebRtc_UWord32& minVolume) const; - virtual WebRtc_Word32 SpeakerVolumeStepSize(WebRtc_UWord16& stepSize) const; + // Speaker volume controls + virtual WebRtc_Word32 SpeakerVolumeIsAvailable( + bool& available); // NOLINT + virtual WebRtc_Word32 SetSpeakerVolume(WebRtc_UWord32 volume); + virtual WebRtc_Word32 SpeakerVolume( + WebRtc_UWord32& volume) const; // NOLINT + virtual WebRtc_Word32 MaxSpeakerVolume( + WebRtc_UWord32& maxVolume) const; // NOLINT + virtual WebRtc_Word32 MinSpeakerVolume( + WebRtc_UWord32& minVolume) const; // NOLINT + virtual WebRtc_Word32 SpeakerVolumeStepSize( + WebRtc_UWord16& stepSize) const; // NOLINT - // Microphone volume controls - virtual WebRtc_Word32 MicrophoneVolumeIsAvailable(bool& available); - virtual WebRtc_Word32 SetMicrophoneVolume(WebRtc_UWord32 volume); - virtual WebRtc_Word32 MicrophoneVolume(WebRtc_UWord32& volume) const; - virtual WebRtc_Word32 MaxMicrophoneVolume(WebRtc_UWord32& maxVolume) const; - virtual WebRtc_Word32 MinMicrophoneVolume(WebRtc_UWord32& minVolume) const; - virtual WebRtc_Word32 - MicrophoneVolumeStepSize(WebRtc_UWord16& stepSize) const; + // Microphone volume controls + virtual WebRtc_Word32 MicrophoneVolumeIsAvailable( + bool& available); // NOLINT + virtual WebRtc_Word32 SetMicrophoneVolume(WebRtc_UWord32 volume); + virtual WebRtc_Word32 MicrophoneVolume( + WebRtc_UWord32& volume) const; // NOLINT + virtual WebRtc_Word32 MaxMicrophoneVolume( + WebRtc_UWord32& maxVolume) const; // NOLINT + virtual WebRtc_Word32 MinMicrophoneVolume( + WebRtc_UWord32& minVolume) const; // NOLINT + virtual WebRtc_Word32 + MicrophoneVolumeStepSize(WebRtc_UWord16& stepSize) const; // NOLINT - // Speaker mute control - virtual WebRtc_Word32 SpeakerMuteIsAvailable(bool& available); - virtual WebRtc_Word32 SetSpeakerMute(bool enable); - virtual WebRtc_Word32 SpeakerMute(bool& enabled) const; + // Speaker mute control + virtual WebRtc_Word32 SpeakerMuteIsAvailable(bool& available); // NOLINT + virtual WebRtc_Word32 SetSpeakerMute(bool enable); + virtual WebRtc_Word32 SpeakerMute(bool& enabled) const; // NOLINT - // Microphone mute control - virtual WebRtc_Word32 MicrophoneMuteIsAvailable(bool& available); - virtual WebRtc_Word32 SetMicrophoneMute(bool enable); - virtual WebRtc_Word32 MicrophoneMute(bool& enabled) const; + // Microphone mute control + virtual WebRtc_Word32 MicrophoneMuteIsAvailable(bool& available); // NOLINT + virtual WebRtc_Word32 SetMicrophoneMute(bool enable); + virtual WebRtc_Word32 MicrophoneMute(bool& enabled) const; // NOLINT - // Microphone boost control - virtual WebRtc_Word32 MicrophoneBoostIsAvailable(bool& available); - virtual WebRtc_Word32 SetMicrophoneBoost(bool enable); - virtual WebRtc_Word32 MicrophoneBoost(bool& enabled) const; + // Microphone boost control + virtual WebRtc_Word32 MicrophoneBoostIsAvailable(bool& available); // NOLINT + virtual WebRtc_Word32 SetMicrophoneBoost(bool enable); + virtual WebRtc_Word32 MicrophoneBoost(bool& enabled) const; // NOLINT - // Stereo support - virtual WebRtc_Word32 StereoPlayoutIsAvailable(bool& available); - virtual WebRtc_Word32 SetStereoPlayout(bool enable); - virtual WebRtc_Word32 StereoPlayout(bool& enabled) const; - virtual WebRtc_Word32 StereoRecordingIsAvailable(bool& available); - virtual WebRtc_Word32 SetStereoRecording(bool enable); - virtual WebRtc_Word32 StereoRecording(bool& enabled) const; + // Stereo support + virtual WebRtc_Word32 StereoPlayoutIsAvailable(bool& available); // NOLINT + virtual WebRtc_Word32 SetStereoPlayout(bool enable); + virtual WebRtc_Word32 StereoPlayout(bool& enabled) const; // NOLINT + virtual WebRtc_Word32 StereoRecordingIsAvailable(bool& available); // NOLINT + virtual WebRtc_Word32 SetStereoRecording(bool enable); + virtual WebRtc_Word32 StereoRecording(bool& enabled) const; // NOLINT - // Delay information and control - virtual WebRtc_Word32 - SetPlayoutBuffer(const AudioDeviceModule::BufferType type, - WebRtc_UWord16 sizeMS); - virtual WebRtc_Word32 PlayoutBuffer(AudioDeviceModule::BufferType& type, - WebRtc_UWord16& sizeMS) const; - virtual WebRtc_Word32 PlayoutDelay(WebRtc_UWord16& delayMS) const; - virtual WebRtc_Word32 RecordingDelay(WebRtc_UWord16& delayMS) const; + // Delay information and control + virtual WebRtc_Word32 + SetPlayoutBuffer(const AudioDeviceModule::BufferType type, + WebRtc_UWord16 sizeMS); + virtual WebRtc_Word32 PlayoutBuffer( + AudioDeviceModule::BufferType& type, // NOLINT + WebRtc_UWord16& sizeMS) const; + virtual WebRtc_Word32 PlayoutDelay( + WebRtc_UWord16& delayMS) const; // NOLINT + virtual WebRtc_Word32 RecordingDelay( + WebRtc_UWord16& delayMS) const; // NOLINT - // CPU load - virtual WebRtc_Word32 CPULoad(WebRtc_UWord16& load) const; + // CPU load + virtual WebRtc_Word32 CPULoad(WebRtc_UWord16& load) const; // NOLINT - // Error and warning information - virtual bool PlayoutWarning() const; - virtual bool PlayoutError() const; - virtual bool RecordingWarning() const; - virtual bool RecordingError() const; - virtual void ClearPlayoutWarning(); - virtual void ClearPlayoutError(); - virtual void ClearRecordingWarning(); - virtual void ClearRecordingError(); + // Error and warning information + virtual bool PlayoutWarning() const; + virtual bool PlayoutError() const; + virtual bool RecordingWarning() const; + virtual bool RecordingError() const; + virtual void ClearPlayoutWarning(); + virtual void ClearPlayoutError(); + virtual void ClearRecordingWarning(); + virtual void ClearRecordingError(); - // Attach audio buffer - virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer); + // Attach audio buffer + virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer); - // Speaker audio routing - virtual WebRtc_Word32 SetLoudspeakerStatus(bool enable); - virtual WebRtc_Word32 GetLoudspeakerStatus(bool& enable) const; + // Speaker audio routing + virtual WebRtc_Word32 SetLoudspeakerStatus(bool enable); + virtual WebRtc_Word32 GetLoudspeakerStatus(bool& enable) const; // NOLINT -private: - // Lock - void Lock() - { - _critSect.Enter(); - }; - void UnLock() - { - _critSect.Leave(); - }; + private: + // Lock + void Lock() { + crit_sect_.Enter(); + }; + void UnLock() { + crit_sect_.Leave(); + }; - static void PlayerSimpleBufferQueueCallback( - SLAndroidSimpleBufferQueueItf queueItf, - void *pContext); - void PlayerSimpleBufferQueueCallbackHandler( - SLAndroidSimpleBufferQueueItf queueItf); - static void RecorderSimpleBufferQueueCallback( - SLAndroidSimpleBufferQueueItf queueItf, - void *pContext); - void RecorderSimpleBufferQueueCallbackHandler( - SLAndroidSimpleBufferQueueItf queueItf); - void CheckErr(SLresult res); + static void PlayerSimpleBufferQueueCallback( + SLAndroidSimpleBufferQueueItf queueItf, + void *pContext); + static void RecorderSimpleBufferQueueCallback( + SLAndroidSimpleBufferQueueItf queueItf, + void *pContext); + void PlayerSimpleBufferQueueCallbackHandler( + SLAndroidSimpleBufferQueueItf queueItf); + void RecorderSimpleBufferQueueCallbackHandler( + SLAndroidSimpleBufferQueueItf queueItf); + void CheckErr(SLresult res); - // Delay updates - void UpdateRecordingDelay(); - void UpdatePlayoutDelay(WebRtc_UWord32 nSamplePlayed); + // Delay updates + void UpdateRecordingDelay(); + void UpdatePlayoutDelay(WebRtc_UWord32 nSamplePlayed); - // Init - WebRtc_Word32 InitSampleRate(); + // Init + WebRtc_Word32 InitSampleRate(); - // Threads - static bool RecThreadFunc(void*); - static bool PlayThreadFunc(void*); - bool RecThreadProcess(); - bool PlayThreadProcess(); + // Misc + AudioDeviceBuffer* voe_audio_buffer_; + CriticalSectionWrapper& crit_sect_; + WebRtc_Word32 id_; - // Misc - AudioDeviceBuffer* _ptrAudioBuffer; - CriticalSectionWrapper& _critSect; - WebRtc_Word32 _id; + // audio unit + SLObjectItf sles_engine_; - // audio unit - SLObjectItf _slEngineObject; + // playout device + SLObjectItf sles_player_; + SLEngineItf sles_engine_itf_; + SLPlayItf sles_player_itf_; + SLAndroidSimpleBufferQueueItf sles_player_sbq_itf_; + SLObjectItf sles_output_mixer_; + SLVolumeItf sles_speaker_volume_; - // playout device - SLObjectItf _slPlayer; - SLEngineItf _slEngine; - SLPlayItf _slPlayerPlay; - SLAndroidSimpleBufferQueueItf _slPlayerSimpleBufferQueue; - SLObjectItf _slOutputMixObject; - SLVolumeItf _slSpeakerVolume; + // recording device + SLObjectItf sles_recorder_; + SLRecordItf sles_recorder_itf_; + SLAndroidSimpleBufferQueueItf sles_recorder_sbq_itf_; + SLDeviceVolumeItf sles_mic_volume_; + WebRtc_UWord32 mic_dev_id_; - // recording device - SLObjectItf _slRecorder; - SLRecordItf _slRecorderRecord; - SLAudioIODeviceCapabilitiesItf _slAudioIODeviceCapabilities; - SLAndroidSimpleBufferQueueItf _slRecorderSimpleBufferQueue; - SLDeviceVolumeItf _slMicVolume; + WebRtc_UWord32 play_warning_, play_error_; + WebRtc_UWord32 rec_warning_, rec_error_; - WebRtc_UWord32 _micDeviceId; - WebRtc_UWord32 _recQueueSeq; + // States + bool is_recording_dev_specified_; + bool is_playout_dev_specified_; + bool is_initialized_; + bool is_recording_; + bool is_playing_; + bool is_rec_initialized_; + bool is_play_initialized_; + bool is_mic_initialized_; + bool is_speaker_initialized_; - // Events - EventWrapper& _timeEventRec; - // Threads - ThreadWrapper* _ptrThreadRec; - WebRtc_UWord32 _recThreadID; - // TODO(xians), remove the following flag - bool _recThreadIsInitialized; + // Delay + WebRtc_UWord16 playout_delay_; + WebRtc_UWord16 recording_delay_; - // Playout buffer - WebRtc_Word8 _playQueueBuffer[N_PLAY_QUEUE_BUFFERS][2 - * PLAY_BUF_SIZE_IN_SAMPLES]; - WebRtc_UWord32 _playQueueSeq; + // AGC state + bool agc_enabled_; - // States - bool _recordingDeviceIsSpecified; - bool _playoutDeviceIsSpecified; - bool _initialized; - bool _recording; - bool _playing; - bool _recIsInitialized; - bool _playIsInitialized; - bool _micIsInitialized; - bool _speakerIsInitialized; + // Threads + ThreadWrapper* rec_thread_; + WebRtc_UWord32 rec_thread_id_; + static bool RecThreadFunc(void* context); + bool RecThreadFuncImpl(); + EventWrapper& rec_timer_; - // Warnings and errors - WebRtc_UWord16 _playWarning; - WebRtc_UWord16 _playError; - WebRtc_UWord16 _recWarning; - WebRtc_UWord16 _recError; + WebRtc_UWord32 mic_sampling_rate_; + WebRtc_UWord32 speaker_sampling_rate_; + WebRtc_UWord32 max_speaker_vol_; + WebRtc_UWord32 min_speaker_vol_; + bool loundspeaker_on_; - // Delay - WebRtc_UWord16 _playoutDelay; - WebRtc_UWord16 _recordingDelay; + SLDataFormat_PCM player_pcm_; + SLDataFormat_PCM record_pcm_; - // AGC state - bool _AGC; + std::queue rec_queue_; + std::queue rec_voe_audio_queue_; + std::queue rec_voe_ready_queue_; + WebRtc_Word8 rec_buf_[N_REC_QUEUE_BUFFERS][ + N_REC_CHANNELS * sizeof(int16_t) * REC_BUF_SIZE_IN_SAMPLES]; + WebRtc_Word8 rec_voe_buf_[N_REC_QUEUE_BUFFERS][ + N_REC_CHANNELS * sizeof(int16_t) * REC_BUF_SIZE_IN_SAMPLES]; - // The sampling rate to use with Audio Device Buffer - WebRtc_UWord32 _adbSampleRate; - // Stored device properties - WebRtc_UWord32 _samplingRateIn; // Sampling frequency for Mic - WebRtc_UWord32 _samplingRateOut; // Sampling frequency for Speaker - WebRtc_UWord32 _maxSpeakerVolume; // The maximum speaker volume value - WebRtc_UWord32 _minSpeakerVolume; // The minimum speaker volume value - bool _loudSpeakerOn; - - // Recording buffer used by the queues. - int8_t rec_buffer_[N_REC_BUFFERS][2 * REC_BUF_SIZE_IN_SAMPLES]; - - // Queues accessed by both callback thread and recording thread after - // recording has been started. - std::queue rec_worker_queue_; - std::queue rec_available_queue_; - - // Queue accssed by only callback thread after recording has been started. - std::queue rec_callback_queue_; - - // Flag to protect setting the recording thread priority multiple times. - bool is_thread_priority_set_; + std::queue play_queue_; + WebRtc_Word8 play_buf_[N_PLAY_QUEUE_BUFFERS][ + N_PLAY_CHANNELS * sizeof(int16_t) * PLAY_BUF_SIZE_IN_SAMPLES]; }; -} // namespace webrtc +} // namespace webrtc -#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_OPENSLES_ANDROID_H +#endif // SRC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_