From 1a07a1e8252daf088d1b94b56cce5e9a3e7cae67 Mon Sep 17 00:00:00 2001 From: Brave Yao Date: Thu, 21 May 2015 12:42:40 +0800 Subject: [PATCH] Solve data race in Pulse audio implementation. BUG=3056, 1320 TEST=AutoTest Mainly add threadchecker and remove unnecessary lock. And some more styling working. - audio_device_pulse_linux.cc: wrap lines longer than 80 chars. And add '.' to some comments around. Not do it to all places. - audio_mixer_manager_pulse_linux.cc: Here I adopt some chromium practice. We use to do many things to the failure of pulse operation, which causes most of the data race issue. In chromium, if we failed to call any pulse function, we just fail it w/o use the previous results. Here I did same. Please check if it's good. R=bjornv@webrtc.org, henrika@webrtc.org, tommi@webrtc.org Review URL: https://webrtc-codereview.appspot.com/52479004 Cr-Commit-Position: refs/heads/master@{#9243} --- .../linux/audio_device_pulse_linux.cc | 367 ++++++++-------- .../linux/audio_device_pulse_linux.h | 32 +- .../linux/audio_mixer_manager_pulse_linux.cc | 411 +++++++----------- .../linux/audio_mixer_manager_pulse_linux.h | 12 +- .../audio_processing/gain_control_impl.cc | 1 + 5 files changed, 366 insertions(+), 457 deletions(-) diff --git a/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc b/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc index 6fdc52f57f..519fb67759 100644 --- a/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc +++ b/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc @@ -10,6 +10,8 @@ #include +#include "webrtc/base/checks.h" + #include "webrtc/modules/audio_device/audio_device_config.h" #include "webrtc/modules/audio_device/linux/audio_device_pulse_linux.h" @@ -27,10 +29,6 @@ webrtc_adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable; namespace webrtc { -// ============================================================================ -// Static Methods -// ============================================================================ - AudioDeviceLinuxPulse::AudioDeviceLinuxPulse(const int32_t id) : _ptrAudioBuffer(NULL), _critSect(*CriticalSectionWrapper::CreateCriticalSection()), @@ -108,7 +106,7 @@ AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse() { WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", __FUNCTION__); - + DCHECK(thread_checker_.CalledOnValidThread()); Terminate(); if (_recBuffer) @@ -141,8 +139,7 @@ AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse() void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { - - CriticalSectionScoped lock(&_critSect); + DCHECK(thread_checker_.CalledOnValidThread()); _ptrAudioBuffer = audioBuffer; @@ -168,9 +165,7 @@ int32_t AudioDeviceLinuxPulse::ActiveAudioLayer( int32_t AudioDeviceLinuxPulse::Init() { - - CriticalSectionScoped lock(&_critSect); - + DCHECK(thread_checker_.CalledOnValidThread()); if (_initialized) { return 0; @@ -240,40 +235,32 @@ int32_t AudioDeviceLinuxPulse::Init() int32_t AudioDeviceLinuxPulse::Terminate() { - + DCHECK(thread_checker_.CalledOnValidThread()); if (!_initialized) { return 0; } - Lock(); - _mixerManager.Close(); // RECORDING if (_ptrThreadRec) { ThreadWrapper* tmpThread = _ptrThreadRec.release(); - UnLock(); _timeEventRec.Set(); tmpThread->Stop(); delete tmpThread; - // Lock again since we need to protect _ptrThreadPlay. - Lock(); } // PLAYOUT if (_ptrThreadPlay) { ThreadWrapper* tmpThread = _ptrThreadPlay.release(); - _critSect.Leave(); _timeEventPlay.Set(); tmpThread->Stop(); delete tmpThread; - } else { - UnLock(); } // Terminate PulseAudio @@ -299,13 +286,13 @@ int32_t AudioDeviceLinuxPulse::Terminate() bool AudioDeviceLinuxPulse::Initialized() const { + DCHECK(thread_checker_.CalledOnValidThread()); return (_initialized); } int32_t AudioDeviceLinuxPulse::InitSpeaker() { - - CriticalSectionScoped lock(&_critSect); + DCHECK(thread_checker_.CalledOnValidThread()); if (_playing) { @@ -349,9 +336,7 @@ int32_t AudioDeviceLinuxPulse::InitSpeaker() int32_t AudioDeviceLinuxPulse::InitMicrophone() { - - CriticalSectionScoped lock(&_critSect); - + DCHECK(thread_checker_.CalledOnValidThread()); if (_recording) { return -1; @@ -394,17 +379,19 @@ int32_t AudioDeviceLinuxPulse::InitMicrophone() bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const { + DCHECK(thread_checker_.CalledOnValidThread()); return (_mixerManager.SpeakerIsInitialized()); } bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const { + DCHECK(thread_checker_.CalledOnValidThread()); return (_mixerManager.MicrophoneIsInitialized()); } int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available) { - + DCHECK(thread_checker_.CalledOnValidThread()); bool wasInitialized = _mixerManager.SpeakerIsInitialized(); // Make an attempt to open up the @@ -417,7 +404,7 @@ int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available) return 0; } - // Given that InitSpeaker was successful, we know that a volume control exists + // Given that InitSpeaker was successful, we know volume control exists. available = true; // Close the initialized output mixer @@ -431,6 +418,7 @@ int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available) int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume) { + DCHECK(thread_checker_.CalledOnValidThread()); if (!_playing) { // Only update the volume if it's been set while we weren't playing. update_speaker_volume_at_startup_ = true; @@ -440,7 +428,7 @@ int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume) int32_t AudioDeviceLinuxPulse::SpeakerVolume(uint32_t& volume) const { - + DCHECK(thread_checker_.CalledOnValidThread()); uint32_t level(0); if (_mixerManager.SpeakerVolume(level) == -1) @@ -476,7 +464,7 @@ int32_t AudioDeviceLinuxPulse::WaveOutVolume( int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume( uint32_t& maxVolume) const { - + DCHECK(thread_checker_.CalledOnValidThread()); uint32_t maxVol(0); if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) @@ -492,7 +480,7 @@ int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume( int32_t AudioDeviceLinuxPulse::MinSpeakerVolume( uint32_t& minVolume) const { - + DCHECK(thread_checker_.CalledOnValidThread()); uint32_t minVol(0); if (_mixerManager.MinSpeakerVolume(minVol) == -1) @@ -508,7 +496,7 @@ int32_t AudioDeviceLinuxPulse::MinSpeakerVolume( int32_t AudioDeviceLinuxPulse::SpeakerVolumeStepSize( uint16_t& stepSize) const { - + DCHECK(thread_checker_.CalledOnValidThread()); uint16_t delta(0); if (_mixerManager.SpeakerVolumeStepSize(delta) == -1) @@ -523,7 +511,7 @@ int32_t AudioDeviceLinuxPulse::SpeakerVolumeStepSize( int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available) { - + DCHECK(thread_checker_.CalledOnValidThread()); bool isAvailable(false); bool wasInitialized = _mixerManager.SpeakerIsInitialized(); @@ -555,13 +543,13 @@ int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available) int32_t AudioDeviceLinuxPulse::SetSpeakerMute(bool enable) { - + DCHECK(thread_checker_.CalledOnValidThread()); return (_mixerManager.SetSpeakerMute(enable)); } int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const { - + DCHECK(thread_checker_.CalledOnValidThread()); bool muted(0); if (_mixerManager.SpeakerMute(muted) == -1) { @@ -574,7 +562,7 @@ int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available) { - + DCHECK(thread_checker_.CalledOnValidThread()); bool isAvailable(false); bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); @@ -583,9 +571,9 @@ int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available) // if (!wasInitialized && InitMicrophone() == -1) { - // If we end up here it means that the selected microphone has no volume - // control, hence it is safe to state that there is no boost control - // already at this stage. + // If we end up here it means that the selected microphone has no + // volume control, hence it is safe to state that there is no + // boost control already at this stage. available = false; return 0; } @@ -607,13 +595,13 @@ int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available) int32_t AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable) { - + DCHECK(thread_checker_.CalledOnValidThread()); return (_mixerManager.SetMicrophoneMute(enable)); } int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const { - + DCHECK(thread_checker_.CalledOnValidThread()); bool muted(0); if (_mixerManager.MicrophoneMute(muted) == -1) { @@ -626,7 +614,7 @@ int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const int32_t AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available) { - + DCHECK(thread_checker_.CalledOnValidThread()); bool isAvailable(false); bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); @@ -635,9 +623,9 @@ int32_t AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available) // if (!wasInitialized && InitMicrophone() == -1) { - // If we end up here it means that the selected microphone has no volume - // control, hence it is safe to state that there is no boost control - // already at this stage. + // If we end up here it means that the selected microphone has no + // volume control, hence it is safe to state that there is no + // boost control already at this stage. available = false; return 0; } @@ -657,13 +645,13 @@ int32_t AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available) int32_t AudioDeviceLinuxPulse::SetMicrophoneBoost(bool enable) { - + DCHECK(thread_checker_.CalledOnValidThread()); return (_mixerManager.SetMicrophoneBoost(enable)); } int32_t AudioDeviceLinuxPulse::MicrophoneBoost(bool& enabled) const { - + DCHECK(thread_checker_.CalledOnValidThread()); bool onOff(0); if (_mixerManager.MicrophoneBoost(onOff) == -1) @@ -678,7 +666,7 @@ int32_t AudioDeviceLinuxPulse::MicrophoneBoost(bool& enabled) const int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available) { - + DCHECK(thread_checker_.CalledOnValidThread()); if (_recChannels == 2 && _recording) { available = true; return 0; @@ -712,7 +700,7 @@ int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available) int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable) { - + DCHECK(thread_checker_.CalledOnValidThread()); if (enable) _recChannels = 2; else @@ -723,7 +711,7 @@ int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable) int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const { - + DCHECK(thread_checker_.CalledOnValidThread()); if (_recChannels == 2) enabled = true; else @@ -734,7 +722,7 @@ int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available) { - + DCHECK(thread_checker_.CalledOnValidThread()); if (_playChannels == 2 && _playing) { available = true; return 0; @@ -767,7 +755,7 @@ int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available) int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable) { - + DCHECK(thread_checker_.CalledOnValidThread()); if (enable) _playChannels = 2; else @@ -778,7 +766,7 @@ int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable) int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const { - + DCHECK(thread_checker_.CalledOnValidThread()); if (_playChannels == 2) enabled = true; else @@ -789,7 +777,7 @@ int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const int32_t AudioDeviceLinuxPulse::SetAGC(bool enable) { - + CriticalSectionScoped lock(&_critSect); _AGC = enable; return 0; @@ -797,28 +785,28 @@ int32_t AudioDeviceLinuxPulse::SetAGC(bool enable) bool AudioDeviceLinuxPulse::AGC() const { - + CriticalSectionScoped lock(&_critSect); return _AGC; } int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable( bool& available) { - + DCHECK(thread_checker_.CalledOnValidThread()); bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); // Make an attempt to open up the // input mixer corresponding to the currently selected output device. if (!wasInitialized && InitMicrophone() == -1) { - // If we end up here it means that the selected microphone has no volume - // control. + // If we end up here it means that the selected microphone has no + // volume control. available = false; return 0; } // Given that InitMicrophone was successful, we know that a volume control - // exists + // exists. available = true; // Close the initialized input mixer @@ -832,7 +820,6 @@ int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable( int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume) { - return (_mixerManager.SetMicrophoneVolume(volume)); } @@ -889,7 +876,7 @@ int32_t AudioDeviceLinuxPulse::MinMicrophoneVolume( int32_t AudioDeviceLinuxPulse::MicrophoneVolumeStepSize( uint16_t& stepSize) const { - + DCHECK(thread_checker_.CalledOnValidThread()); uint16_t delta(0); if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1) @@ -904,7 +891,7 @@ int32_t AudioDeviceLinuxPulse::MicrophoneVolumeStepSize( int16_t AudioDeviceLinuxPulse::PlayoutDevices() { - + DCHECK(thread_checker_.CalledOnValidThread()); PaLock(); pa_operation* paOperation = NULL; @@ -924,7 +911,7 @@ int16_t AudioDeviceLinuxPulse::PlayoutDevices() int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index) { - + DCHECK(thread_checker_.CalledOnValidThread()); if (_playIsInitialized) { return -1; @@ -961,7 +948,7 @@ int32_t AudioDeviceLinuxPulse::PlayoutDeviceName( char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) { - + DCHECK(thread_checker_.CalledOnValidThread()); const uint16_t nDevices = PlayoutDevices(); if ((index > (nDevices - 1)) || (name == NULL)) @@ -1003,7 +990,7 @@ int32_t AudioDeviceLinuxPulse::RecordingDeviceName( char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) { - + DCHECK(thread_checker_.CalledOnValidThread()); const uint16_t nDevices(RecordingDevices()); if ((index > (nDevices - 1)) || (name == NULL)) @@ -1042,7 +1029,7 @@ int32_t AudioDeviceLinuxPulse::RecordingDeviceName( int16_t AudioDeviceLinuxPulse::RecordingDevices() { - + DCHECK(thread_checker_.CalledOnValidThread()); PaLock(); pa_operation* paOperation = NULL; @@ -1062,7 +1049,7 @@ int16_t AudioDeviceLinuxPulse::RecordingDevices() int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index) { - + DCHECK(thread_checker_.CalledOnValidThread()); if (_recIsInitialized) { return -1; @@ -1096,7 +1083,7 @@ int32_t AudioDeviceLinuxPulse::SetRecordingDevice( int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available) { - + DCHECK(thread_checker_.CalledOnValidThread()); available = false; // Try to initialize the playout side @@ -1115,7 +1102,7 @@ int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available) int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available) { - + DCHECK(thread_checker_.CalledOnValidThread()); available = false; // Try to initialize the playout side @@ -1134,8 +1121,7 @@ int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available) int32_t AudioDeviceLinuxPulse::InitPlayout() { - - CriticalSectionScoped lock(&_critSect); + DCHECK(thread_checker_.CalledOnValidThread()); if (_playing) { @@ -1188,7 +1174,8 @@ int32_t AudioDeviceLinuxPulse::InitPlayout() } WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, - " stream state %d\n", LATE(pa_stream_get_state)(_playStream)); + " stream state %d\n", + LATE(pa_stream_get_state)(_playStream)); // Set stream flags _playStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE @@ -1198,9 +1185,9 @@ int32_t AudioDeviceLinuxPulse::InitPlayout() { // If configuring a specific latency then we want to specify // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters - // automatically to reach that target latency. However, that flag doesn't - // exist in Ubuntu 8.04 and many people still use that, so we have to check - // the protocol version of libpulse. + // automatically to reach that target latency. However, that flag + // doesn't exist in Ubuntu 8.04 and many people still use that, + // so we have to check the protocol version of libpulse. if (LATE(pa_context_get_protocol_version)(_paContext) >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) { @@ -1217,16 +1204,18 @@ int32_t AudioDeviceLinuxPulse::InitPlayout() } size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); - uint32_t latency = bytesPerSec - * WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS / WEBRTC_PA_MSECS_PER_SEC; + uint32_t latency = bytesPerSec * + WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS / + WEBRTC_PA_MSECS_PER_SEC; // Set the play buffer attributes _playBufferAttr.maxlength = latency; // num bytes stored in the buffer _playBufferAttr.tlength = latency; // target fill level of play buffer // minimum free num bytes before server request more data _playBufferAttr.minreq = latency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR; - _playBufferAttr.prebuf = _playBufferAttr.tlength - - _playBufferAttr.minreq; // prebuffer tlength before starting playout + // prebuffer tlength before starting playout + _playBufferAttr.prebuf = _playBufferAttr.tlength - + _playBufferAttr.minreq; _configuredLatencyPlay = latency; } @@ -1241,7 +1230,8 @@ int32_t AudioDeviceLinuxPulse::InitPlayout() PaStreamUnderflowCallback, this); // Set the state callback function for the stream - LATE(pa_stream_set_state_callback)(_playStream, PaStreamStateCallback, this); + LATE(pa_stream_set_state_callback)(_playStream, + PaStreamStateCallback, this); // Mark playout side as initialized _playIsInitialized = true; @@ -1253,8 +1243,7 @@ int32_t AudioDeviceLinuxPulse::InitPlayout() int32_t AudioDeviceLinuxPulse::InitRecording() { - - CriticalSectionScoped lock(&_critSect); + DCHECK(thread_checker_.CalledOnValidThread()); if (_recording) { @@ -1312,9 +1301,9 @@ int32_t AudioDeviceLinuxPulse::InitRecording() // If configuring a specific latency then we want to specify // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters - // automatically to reach that target latency. However, that flag doesn't - // exist in Ubuntu 8.04 and many people still use that, so we have to check - // the protocol version of libpulse. + // automatically to reach that target latency. However, that flag + // doesn't exist in Ubuntu 8.04 and many people still use that, + // so we have to check the protocol version of libpulse. if (LATE(pa_context_get_protocol_version)(_paContext) >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) { @@ -1349,11 +1338,14 @@ int32_t AudioDeviceLinuxPulse::InitRecording() _recBuffer = new int8_t[_recordBufferSize]; // Enable overflow callback - LATE(pa_stream_set_overflow_callback)(_recStream, PaStreamOverflowCallback, + LATE(pa_stream_set_overflow_callback)(_recStream, + PaStreamOverflowCallback, this); // Set the state callback function for the stream - LATE(pa_stream_set_state_callback)(_recStream, PaStreamStateCallback, this); + LATE(pa_stream_set_state_callback)(_recStream, + PaStreamStateCallback, + this); // Mark recording side as initialized _recIsInitialized = true; @@ -1363,7 +1355,7 @@ int32_t AudioDeviceLinuxPulse::InitRecording() int32_t AudioDeviceLinuxPulse::StartRecording() { - + DCHECK(thread_checker_.CalledOnValidThread()); if (!_recIsInitialized) { return -1; @@ -1374,10 +1366,10 @@ int32_t AudioDeviceLinuxPulse::StartRecording() return 0; } - // set state to ensure that the recording starts from the audio thread + // Set state to ensure that the recording starts from the audio thread. _startRec = true; - // the audio thread will signal when recording has started + // The audio thread will signal when recording has started. _timeEventRec.Set(); if (kEventTimeout == _recStartEvent.Wait(10000)) { @@ -1395,7 +1387,8 @@ int32_t AudioDeviceLinuxPulse::StartRecording() CriticalSectionScoped lock(&_critSect); if (_recording) { - // the recording state is set by the audio thread after recording has started + // The recording state is set by the audio thread after recording + // has started. } else { WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, @@ -1409,7 +1402,7 @@ int32_t AudioDeviceLinuxPulse::StartRecording() int32_t AudioDeviceLinuxPulse::StopRecording() { - + DCHECK(thread_checker_.CalledOnValidThread()); CriticalSectionScoped lock(&_critSect); if (!_recIsInitialized) @@ -1472,22 +1465,26 @@ int32_t AudioDeviceLinuxPulse::StopRecording() bool AudioDeviceLinuxPulse::RecordingIsInitialized() const { + DCHECK(thread_checker_.CalledOnValidThread()); return (_recIsInitialized); } bool AudioDeviceLinuxPulse::Recording() const { - CriticalSectionScoped lock(&_critSect); + DCHECK(thread_checker_.CalledOnValidThread()); return (_recording); } bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const { + DCHECK(thread_checker_.CalledOnValidThread()); return (_playIsInitialized); } int32_t AudioDeviceLinuxPulse::StartPlayout() { + DCHECK(thread_checker_.CalledOnValidThread()); + if (!_playIsInitialized) { return -1; @@ -1498,13 +1495,16 @@ int32_t AudioDeviceLinuxPulse::StartPlayout() return 0; } - // set state to ensure that playout starts from the audio thread - _startPlay = true; + // Set state to ensure that playout starts from the audio thread. + { + CriticalSectionScoped lock(&_critSect); + _startPlay = true; + } // Both |_startPlay| and |_playing| needs protction since they are also // accessed on the playout thread. - // the audio thread will signal when playout has started + // The audio thread will signal when playout has started. _timeEventPlay.Set(); if (kEventTimeout == _playStartEvent.Wait(10000)) { @@ -1522,7 +1522,8 @@ int32_t AudioDeviceLinuxPulse::StartPlayout() CriticalSectionScoped lock(&_critSect); if (_playing) { - // the playing state is set by the audio thread after playout has started + // The playing state is set by the audio thread after playout + // has started. } else { WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, @@ -1536,7 +1537,7 @@ int32_t AudioDeviceLinuxPulse::StartPlayout() int32_t AudioDeviceLinuxPulse::StopPlayout() { - + DCHECK(thread_checker_.CalledOnValidThread()); CriticalSectionScoped lock(&_critSect); if (!_playIsInitialized) @@ -1608,14 +1609,14 @@ int32_t AudioDeviceLinuxPulse::PlayoutDelay(uint16_t& delayMS) const int32_t AudioDeviceLinuxPulse::RecordingDelay(uint16_t& delayMS) const { - CriticalSectionScoped lock(&_critSect); + DCHECK(thread_checker_.CalledOnValidThread()); delayMS = (uint16_t) _sndCardRecDelay; return 0; } bool AudioDeviceLinuxPulse::Playing() const { - CriticalSectionScoped lock(&_critSect); + DCHECK(thread_checker_.CalledOnValidThread()); return (_playing); } @@ -1623,7 +1624,7 @@ int32_t AudioDeviceLinuxPulse::SetPlayoutBuffer( const AudioDeviceModule::BufferType type, uint16_t sizeMS) { - + DCHECK(thread_checker_.CalledOnValidThread()); if (type != AudioDeviceModule::kFixedBufferSize) { WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, @@ -1641,7 +1642,7 @@ int32_t AudioDeviceLinuxPulse::PlayoutBuffer( AudioDeviceModule::BufferType& type, uint16_t& sizeMS) const { - + DCHECK(thread_checker_.CalledOnValidThread()); type = _playBufType; sizeMS = _playBufDelayFixed; @@ -1710,8 +1711,8 @@ void AudioDeviceLinuxPulse::ClearRecordingError() void AudioDeviceLinuxPulse::PaContextStateCallback(pa_context *c, void *pThis) { - static_cast (pThis)->PaContextStateCallbackHandler( - c); + static_cast (pThis)-> + PaContextStateCallbackHandler(c); } // ---------------------------------------------------------------------------- @@ -1738,12 +1739,14 @@ void AudioDeviceLinuxPulse::PaServerInfoCallback(pa_context */*c*/, const pa_server_info *i, void *pThis) { - static_cast (pThis)->PaServerInfoCallbackHandler(i); + static_cast (pThis)-> + PaServerInfoCallbackHandler(i); } void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream *p, void *pThis) { - static_cast (pThis)->PaStreamStateCallbackHandler(p); + static_cast (pThis)-> + PaStreamStateCallbackHandler(p); } void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context *c) @@ -1851,7 +1854,8 @@ void AudioDeviceLinuxPulse::PaSourceInfoCallbackHandler( } } -void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler(const pa_server_info *i) +void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler( + const pa_server_info *i) { // Use PA native sampling rate sample_rate_hz_ = i->sample_spec.rate; @@ -1917,7 +1921,8 @@ int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion() // get the server info and update deviceName paOperation = LATE(pa_context_get_server_info)(_paContext, - PaServerInfoCallback, this); + PaServerInfoCallback, + this); WaitForOperationCompletion(paOperation); @@ -1937,7 +1942,8 @@ int32_t AudioDeviceLinuxPulse::InitSamplingFrequency() // Get the server info and update sample_rate_hz_ paOperation = LATE(pa_context_get_server_info)(_paContext, - PaServerInfoCallback, this); + PaServerInfoCallback, + this); WaitForOperationCompletion(paOperation); @@ -1984,7 +1990,8 @@ int32_t AudioDeviceLinuxPulse::GetDefaultDeviceInfo(bool recDevice, // Get the server info and update deviceName paOperation = LATE(pa_context_get_server_info)(_paContext, - PaServerInfoCallback, this); + PaServerInfoCallback, + this); WaitForOperationCompletion(paOperation); @@ -2001,7 +2008,8 @@ int32_t AudioDeviceLinuxPulse::GetDefaultDeviceInfo(bool recDevice, paOperation = LATE(pa_context_get_sink_info_by_name)(_paContext, (char *) tmpName, - PaSinkInfoCallback, this); + PaSinkInfoCallback, + this); } WaitForOperationCompletion(paOperation); @@ -2103,7 +2111,9 @@ int32_t AudioDeviceLinuxPulse::InitPulseAudio() // Connect the context to a server (default) _paStateChanged = false; - retVal = LATE(pa_context_connect)(_paContext, NULL, PA_CONTEXT_NOAUTOSPAWN, + retVal = LATE(pa_context_connect)(_paContext, + NULL, + PA_CONTEXT_NOAUTOSPAWN, NULL); if (retVal != PA_OK) @@ -2153,7 +2163,8 @@ int32_t AudioDeviceLinuxPulse::InitPulseAudio() if (CheckPulseAudioVersion() < 0) { WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " PulseAudio version %s not supported", _paServerVersion); + " PulseAudio version %s not supported", + _paServerVersion); return -1; } @@ -2161,7 +2172,8 @@ int32_t AudioDeviceLinuxPulse::InitPulseAudio() if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0) { WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - " failed to initialize sampling frequency, set to %d Hz", + " failed to initialize sampling frequency," + " set to %d Hz", sample_rate_hz_); return -1; } @@ -2254,9 +2266,9 @@ void AudioDeviceLinuxPulse::EnableWriteCallback() _tempBufferSpace = LATE(pa_stream_writable_size)(_playStream); if (_tempBufferSpace > 0) { - // Yup, there is already space available, so if we register a write - // callback then it will not receive any event. So dispatch one ourself - // instead + // Yup, there is already space available, so if we register a + // write callback then it will not receive any event. So dispatch + // one ourself instead. _timeEventPlay.Set(); return; } @@ -2293,7 +2305,8 @@ void AudioDeviceLinuxPulse::PaStreamWriteCallbackHandler(size_t bufferSpace) void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream */*unused*/, void *pThis) { - static_cast (pThis)->PaStreamUnderflowCallbackHandler(); + static_cast (pThis)-> + PaStreamUnderflowCallbackHandler(); } void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler() @@ -2303,8 +2316,8 @@ void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler() if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS) { - // We didn't configure a pa_buffer_attr before, so switching to one now - // would be questionable. + // We didn't configure a pa_buffer_attr before, so switching to + // one now would be questionable. return; } @@ -2319,8 +2332,9 @@ void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler() } size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); - uint32_t newLatency = _configuredLatencyPlay + bytesPerSec - * WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS / WEBRTC_PA_MSECS_PER_SEC; + uint32_t newLatency = _configuredLatencyPlay + bytesPerSec * + WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS / + WEBRTC_PA_MSECS_PER_SEC; // Set the play buffer attributes _playBufferAttr.maxlength = newLatency; @@ -2347,7 +2361,9 @@ void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler() void AudioDeviceLinuxPulse::EnableReadCallback() { - LATE(pa_stream_set_read_callback)(_recStream, &PaStreamReadCallback, this); + LATE(pa_stream_set_read_callback)(_recStream, + &PaStreamReadCallback, + this); } void AudioDeviceLinuxPulse::DisableReadCallback() @@ -2359,15 +2375,17 @@ void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream */*unused1*/, size_t /*unused2*/, void *pThis) { - static_cast (pThis)->PaStreamReadCallbackHandler(); + static_cast (pThis)-> + PaStreamReadCallbackHandler(); } void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler() { // We get the data pointer and size now in order to save one Lock/Unlock - // in the worker thread - if (LATE(pa_stream_peek)(_recStream, &_tempSampleData, &_tempSampleDataSize) - != 0) + // in the worker thread. + if (LATE(pa_stream_peek)(_recStream, + &_tempSampleData, + &_tempSampleDataSize) != 0) { WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Can't read data!"); @@ -2376,7 +2394,7 @@ void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler() // Since we consume the data asynchronously on a different thread, we have // to temporarily disable the read callback or else Pulse will call it - // continuously until we consume the data. We re-enable it below + // continuously until we consume the data. We re-enable it below. DisableReadCallback(); _timeEventRec.Set(); } @@ -2384,7 +2402,8 @@ void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler() void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream */*unused*/, void *pThis) { - static_cast (pThis)->PaStreamOverflowCallbackHandler(); + static_cast (pThis)-> + PaStreamOverflowCallbackHandler(); } void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler() @@ -2411,23 +2430,24 @@ int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream *stream) { WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Can't query latency"); - // We'd rather continue playout/capture with an incorrect delay than stop - // it altogether, so return a valid value. + // We'd rather continue playout/capture with an incorrect delay than + // stop it altogether, so return a valid value. return 0; } if (negative) { WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, - " warning: pa_stream_get_latency reported negative delay"); + " warning: pa_stream_get_latency reported negative " + "delay"); // The delay can be negative for monitoring streams if the captured - // samples haven't been played yet. In such a case, "latency" contains the - // magnitude, so we must negate it to get the real value. + // samples haven't been played yet. In such a case, "latency" + // contains the magnitude, so we must negate it to get the real value. int32_t tmpLatency = (int32_t) -latency; if (tmpLatency < 0) { - // Make sure that we don't use a negative delay + // Make sure that we don't use a negative delay. tmpLatency = 0; } @@ -2438,13 +2458,14 @@ int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream *stream) } } -int32_t AudioDeviceLinuxPulse::ReadRecordedData(const void* bufferData, - size_t bufferSize) +int32_t AudioDeviceLinuxPulse::ReadRecordedData( + const void* bufferData, + size_t bufferSize) EXCLUSIVE_LOCKS_REQUIRED(_critSect) { size_t size = bufferSize; uint32_t numRecSamples = _recordBufferSize / (2 * _recChannels); - // Account for the peeked data and the used data + // Account for the peeked data and the used data. uint32_t recDelay = (uint32_t) ((LatencyUsecs(_recStream) / 1000) + 10 * ((size + _recordBufferUsed) / _recordBufferSize)); @@ -2452,13 +2473,13 @@ int32_t AudioDeviceLinuxPulse::ReadRecordedData(const void* bufferData, if (_playStream) { - // Get the playout delay + // Get the playout delay. _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream) / 1000); } if (_recordBufferUsed > 0) { - // Have to copy to the buffer until it is full + // Have to copy to the buffer until it is full. size_t copy = _recordBufferSize - _recordBufferUsed; if (size < copy) { @@ -2472,36 +2493,37 @@ int32_t AudioDeviceLinuxPulse::ReadRecordedData(const void* bufferData, if (_recordBufferUsed != _recordBufferSize) { - // Not enough data yet to pass to VoE + // Not enough data yet to pass to VoE. return 0; } - // Provide data to VoiceEngine + // Provide data to VoiceEngine. if (ProcessRecordedData(_recBuffer, numRecSamples, recDelay) == -1) { - // We have stopped recording + // We have stopped recording. return -1; } _recordBufferUsed = 0; } - // Now process full 10ms sample sets directly from the input + // Now process full 10ms sample sets directly from the input. while (size >= _recordBufferSize) { - // Provide data to VoiceEngine + // Provide data to VoiceEngine. if (ProcessRecordedData( static_cast (const_cast (bufferData)), numRecSamples, recDelay) == -1) { - // We have stopped recording + // We have stopped recording. return -1; } - bufferData = static_cast (bufferData) + _recordBufferSize; + bufferData = static_cast (bufferData) + + _recordBufferSize; size -= _recordBufferSize; - // We have consumed 10ms of data + // We have consumed 10ms of data. recDelay -= 10; } @@ -2538,9 +2560,9 @@ int32_t AudioDeviceLinuxPulse::ProcessRecordedData( const uint32_t clockDrift(0); // TODO(andrew): this is a temporary hack, to avoid non-causal far- and // near-end signals at the AEC for PulseAudio. I think the system delay is - // being correctly calculated here, but for legacy reasons we add +10 ms to - // the value in the AEC. The real fix will be part of a larger investigation - // into managing system delay in the AEC. + // being correctly calculated here, but for legacy reasons we add +10 ms + // to the value in the AEC. The real fix will be part of a larger + // investigation into managing system delay in the AEC. if (recDelay > 10) recDelay -= 10; else @@ -2548,12 +2570,12 @@ int32_t AudioDeviceLinuxPulse::ProcessRecordedData( _ptrAudioBuffer->SetVQEData(_sndCardPlayDelay, recDelay, clockDrift); _ptrAudioBuffer->SetTypingStatus(KeyPressed()); // Deliver recorded samples at specified sample rate, - // mic level etc. to the observer using callback + // mic level etc. to the observer using callback. UnLock(); _ptrAudioBuffer->DeliverRecordedData(); Lock(); - // We have been unlocked - check the flag again + // We have been unlocked - check the flag again. if (!_recording) { return -1; @@ -2608,7 +2630,7 @@ bool AudioDeviceLinuxPulse::PlayThreadProcess() return true; } - Lock(); + CriticalSectionScoped lock(&_critSect); if (_startPlay) { @@ -2701,7 +2723,6 @@ bool AudioDeviceLinuxPulse::PlayThreadProcess() _playing = true; _playStartEvent.Set(); - UnLock(); return true; } @@ -2725,10 +2746,10 @@ bool AudioDeviceLinuxPulse::PlayThreadProcess() PaLock(); if (LATE(pa_stream_write)( - _playStream, - (void *) &_playBuffer[_playbackBufferUnused], - write, NULL, (int64_t) 0, - PA_SEEK_RELATIVE) != PA_OK) + _playStream, + (void *) &_playBuffer[_playbackBufferUnused], + write, NULL, (int64_t) 0, + PA_SEEK_RELATIVE) != PA_OK) { _writeErrors++; if (_writeErrors > 10) @@ -2739,7 +2760,8 @@ bool AudioDeviceLinuxPulse::PlayThreadProcess() kTraceUtility, _id, " pending playout error exists"); } - _playError = 1; // Triggers callback from module process thread + // Triggers callback from module process thread. + _playError = 1; WEBRTC_TRACE( kTraceError, kTraceUtility, @@ -2758,11 +2780,12 @@ bool AudioDeviceLinuxPulse::PlayThreadProcess() } uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels); - if (_tempBufferSpace > 0) // Might have been reduced to zero by the above + // Might have been reduced to zero by the above. + if (_tempBufferSpace > 0) { - // Ask for new PCM data to be played out using the AudioDeviceBuffer - // ensure that this callback is executed without taking the - // audio-thread lock + // Ask for new PCM data to be played out using the + // AudioDeviceBuffer ensure that this callback is executed + // without taking the audio-thread lock. UnLock(); WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " requesting data"); @@ -2770,10 +2793,9 @@ bool AudioDeviceLinuxPulse::PlayThreadProcess() _ptrAudioBuffer->RequestPlayoutData(numPlaySamples); Lock(); - // We have been unlocked - check the flag again + // We have been unlocked - check the flag again. if (!_playing) { - UnLock(); return true; } @@ -2807,7 +2829,8 @@ bool AudioDeviceLinuxPulse::PlayThreadProcess() kTraceUtility, _id, " pending playout error exists"); } - _playError = 1; // triggers callback from module process thread + // Triggers callback from module process thread. + _playError = 1; WEBRTC_TRACE( kTraceError, kTraceUtility, @@ -2831,7 +2854,6 @@ bool AudioDeviceLinuxPulse::PlayThreadProcess() } // _playing - UnLock(); return true; } @@ -2849,7 +2871,7 @@ bool AudioDeviceLinuxPulse::RecThreadProcess() return true; } - Lock(); + CriticalSectionScoped lock(&_critSect); if (_startRec) { @@ -2873,10 +2895,10 @@ bool AudioDeviceLinuxPulse::RecThreadProcess() " connecting stream"); // Connect the stream to a source - if (LATE(pa_stream_connect_record)(_recStream, _recDeviceName, - &_recBufferAttr, - (pa_stream_flags_t) _recStreamFlags) - != PA_OK) + if (LATE(pa_stream_connect_record)(_recStream, + _recDeviceName, + &_recBufferAttr, + (pa_stream_flags_t) _recStreamFlags) != PA_OK) { WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " failed to connect rec stream, err=%d", @@ -2911,7 +2933,6 @@ bool AudioDeviceLinuxPulse::RecThreadProcess() _recording = true; _recStartEvent.Set(); - UnLock(); return true; } @@ -2920,7 +2941,6 @@ bool AudioDeviceLinuxPulse::RecThreadProcess() // Read data and provide it to VoiceEngine if (ReadRecordedData(_tempSampleData, _tempSampleDataSize) == -1) { - UnLock(); return true; } @@ -2979,7 +2999,6 @@ bool AudioDeviceLinuxPulse::RecThreadProcess() } // _recording - UnLock(); return true; } diff --git a/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h b/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h index 5fb4550d9d..418dd3d287 100644 --- a/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h +++ b/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h @@ -15,6 +15,7 @@ #include "webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h" #include "webrtc/system_wrappers/interface/critical_section_wrapper.h" #include "webrtc/system_wrappers/interface/thread_wrapper.h" +#include "webrtc/base/thread_checker.h" #include #include @@ -204,18 +205,16 @@ public: // CPU load int32_t CPULoad(uint16_t& load) const override; -public: - bool PlayoutWarning() const override; - bool PlayoutError() const override; - bool RecordingWarning() const override; - bool RecordingError() const override; - void ClearPlayoutWarning() override; - void ClearPlayoutError() override; - void ClearRecordingWarning() override; - void ClearRecordingError() override; + bool PlayoutWarning() const override; + bool PlayoutError() const override; + bool RecordingWarning() const override; + bool RecordingError() const override; + void ClearPlayoutWarning() override; + void ClearPlayoutError() override; + void ClearRecordingWarning() override; + void ClearRecordingError() override; -public: - void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override; + void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override; private: void Lock() EXCLUSIVE_LOCK_FUNCTION(_critSect) { @@ -227,10 +226,8 @@ private: void WaitForOperationCompletion(pa_operation* paOperation) const; void WaitForSuccess(pa_operation* paOperation) const; -private: bool KeyPressed() const; -private: static void PaContextStateCallback(pa_context *c, void *pThis); static void PaSinkInfoCallback(pa_context *c, const pa_sink_info *i, int eol, void *pThis); @@ -279,7 +276,6 @@ private: bool RecThreadProcess(); bool PlayThreadProcess(); -private: AudioDeviceBuffer* _ptrAudioBuffer; CriticalSectionWrapper& _critSect; @@ -305,7 +301,12 @@ private: AudioDeviceModule::BufferType _playBufType; -private: + // Stores thread ID in constructor. + // We can then use ThreadChecker::CalledOnValidThread() to ensure that + // other methods are called from the same thread. + // Currently only does DCHECK(thread_checker_.CalledOnValidThread()). + rtc::ThreadChecker thread_checker_; + bool _initialized; bool _recording; bool _playing; @@ -318,7 +319,6 @@ private: bool _AGC; bool update_speaker_volume_at_startup_; -private: uint16_t _playBufDelayFixed; // fixed playback delay uint32_t _sndCardPlayDelay; diff --git a/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc b/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc index 1ee94cd42f..4df2d94f88 100644 --- a/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc +++ b/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc @@ -12,22 +12,36 @@ #include "webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h" #include "webrtc/system_wrappers/interface/trace.h" +#include "webrtc/base/checks.h" extern webrtc_adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable; // Accesses Pulse functions through our late-binding symbol table instead of -// directly. This way we don't have to link to libpulse, which means our binary -// will work on systems that don't have it. +// directly. This way we don't have to link to libpulse, which means our +// binary will work on systems that don't have it. #define LATE(sym) \ - LATESYM_GET(webrtc_adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, sym) + LATESYM_GET(webrtc_adm_linux_pulse::PulseAudioSymbolTable, \ + &PaSymbolTable, sym) namespace webrtc { -enum { kMaxRetryOnFailure = 2 }; +class AutoPulseLock { + public: + explicit AutoPulseLock(pa_threaded_mainloop* pa_mainloop) + : pa_mainloop_(pa_mainloop) { + LATE(pa_threaded_mainloop_lock)(pa_mainloop_); + } + + ~AutoPulseLock() { + LATE(pa_threaded_mainloop_unlock)(pa_mainloop_); + } + + private: + pa_threaded_mainloop* const pa_mainloop_; +}; AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse(const int32_t id) : - _critSect(*CriticalSectionWrapper::CreateCriticalSection()), _id(id), _paOutputDeviceIndex(-1), _paInputDeviceIndex(-1), @@ -41,8 +55,7 @@ AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse(const int32_t id) : _paSpeakerMute(false), _paSpeakerVolume(PA_VOLUME_NORM), _paChannels(0), - _paObjectsSet(false), - _callbackValues(false) + _paObjectsSet(false) { WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s constructed", __FUNCTION__); @@ -50,27 +63,25 @@ AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse(const int32_t id) : AudioMixerManagerLinuxPulse::~AudioMixerManagerLinuxPulse() { + DCHECK(thread_checker_.CalledOnValidThread()); WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destructed", __FUNCTION__); Close(); - - delete &_critSect; } -// ============================================================================ +// =========================================================================== // PUBLIC METHODS -// ============================================================================ +// =========================================================================== int32_t AudioMixerManagerLinuxPulse::SetPulseAudioObjects( pa_threaded_mainloop* mainloop, pa_context* context) { + DCHECK(thread_checker_.CalledOnValidThread()); WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__); - CriticalSectionScoped lock(&_critSect); - if (!mainloop || !context) { WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, @@ -90,11 +101,10 @@ int32_t AudioMixerManagerLinuxPulse::SetPulseAudioObjects( int32_t AudioMixerManagerLinuxPulse::Close() { + DCHECK(thread_checker_.CalledOnValidThread()); WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__); - CriticalSectionScoped lock(&_critSect); - CloseSpeaker(); CloseMicrophone(); @@ -108,11 +118,10 @@ int32_t AudioMixerManagerLinuxPulse::Close() int32_t AudioMixerManagerLinuxPulse::CloseSpeaker() { + DCHECK(thread_checker_.CalledOnValidThread()); WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__); - CriticalSectionScoped lock(&_critSect); - // Reset the index to -1 _paOutputDeviceIndex = -1; _paPlayStream = NULL; @@ -122,11 +131,10 @@ int32_t AudioMixerManagerLinuxPulse::CloseSpeaker() int32_t AudioMixerManagerLinuxPulse::CloseMicrophone() { + DCHECK(thread_checker_.CalledOnValidThread()); WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__); - CriticalSectionScoped lock(&_critSect); - // Reset the index to -1 _paInputDeviceIndex = -1; _paRecStream = NULL; @@ -136,20 +144,20 @@ int32_t AudioMixerManagerLinuxPulse::CloseMicrophone() int32_t AudioMixerManagerLinuxPulse::SetPlayStream(pa_stream* playStream) { + DCHECK(thread_checker_.CalledOnValidThread()); WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioMixerManagerLinuxPulse::SetPlayStream(playStream)"); - CriticalSectionScoped lock(&_critSect); _paPlayStream = playStream; return 0; } int32_t AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream) { + DCHECK(thread_checker_.CalledOnValidThread()); WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioMixerManagerLinuxPulse::SetRecStream(recStream)"); - CriticalSectionScoped lock(&_critSect); _paRecStream = recStream; return 0; } @@ -157,12 +165,11 @@ int32_t AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream) int32_t AudioMixerManagerLinuxPulse::OpenSpeaker( uint16_t deviceIndex) { + DCHECK(thread_checker_.CalledOnValidThread()); WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioMixerManagerLinuxPulse::OpenSpeaker(deviceIndex=%d)", deviceIndex); - CriticalSectionScoped lock(&_critSect); - // No point in opening the speaker // if PA objects have not been set if (!_paObjectsSet) @@ -185,11 +192,10 @@ int32_t AudioMixerManagerLinuxPulse::OpenSpeaker( int32_t AudioMixerManagerLinuxPulse::OpenMicrophone( uint16_t deviceIndex) { + DCHECK(thread_checker_.CalledOnValidThread()); WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - "AudioMixerManagerLinuxPulse::OpenMicrophone(deviceIndex=%d)", - deviceIndex); - - CriticalSectionScoped lock(&_critSect); + "AudioMixerManagerLinuxPulse::OpenMicrophone" + "(deviceIndex=%d)", deviceIndex); // No point in opening the microphone // if PA objects have not been set @@ -212,6 +218,7 @@ int32_t AudioMixerManagerLinuxPulse::OpenMicrophone( bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const { + DCHECK(thread_checker_.CalledOnValidThread()); WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); @@ -220,6 +227,7 @@ bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const { + DCHECK(thread_checker_.CalledOnValidThread()); WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); @@ -229,12 +237,11 @@ bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume( uint32_t volume) { + DCHECK(thread_checker_.CalledOnValidThread()); WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioMixerManagerLinuxPulse::SetSpeakerVolume(volume=%u)", volume); - CriticalSectionScoped lock(&_critSect); - if (_paOutputDeviceIndex == -1) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -248,7 +255,7 @@ int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume( != PA_STREAM_UNCONNECTED)) { // We can only really set the volume if we have a connected stream - PaLock(); + AutoPulseLock auto_lock(_paMainloop); // Get the number of channels from the sample specification const pa_sample_spec *spec = @@ -257,7 +264,6 @@ int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume( { WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " could not get sample specification"); - PaUnLock(); return -1; } @@ -278,8 +284,6 @@ int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume( // Don't need to wait for the completion LATE(pa_operation_unref)(paOperation); - - PaUnLock(); } else { // We have not created a stream or it's not connected to the sink @@ -302,7 +306,6 @@ int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume( int32_t AudioMixerManagerLinuxPulse::SpeakerVolume(uint32_t& volume) const { - if (_paOutputDeviceIndex == -1) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -317,15 +320,16 @@ AudioMixerManagerLinuxPulse::SpeakerVolume(uint32_t& volume) const if (!GetSinkInputInfo()) return -1; + AutoPulseLock auto_lock(_paMainloop); volume = static_cast (_paVolume); - ResetCallbackVariables(); } else { + AutoPulseLock auto_lock(_paMainloop); volume = _paSpeakerVolume; } WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - " AudioMixerManagerLinuxPulse::SpeakerVolume() => vol=%i", + "\tAudioMixerManagerLinuxPulse::SpeakerVolume() => vol=%i", volume); return 0; @@ -368,7 +372,7 @@ AudioMixerManagerLinuxPulse::MinSpeakerVolume(uint32_t& minVolume) const int32_t AudioMixerManagerLinuxPulse::SpeakerVolumeStepSize(uint16_t& stepSize) const { - + DCHECK(thread_checker_.CalledOnValidThread()); if (_paOutputDeviceIndex == -1) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -381,11 +385,8 @@ AudioMixerManagerLinuxPulse::SpeakerVolumeStepSize(uint16_t& stepSize) const stepSize = 1; WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - " AudioMixerManagerLinuxPulse::SpeakerVolumeStepSize() => " - "size=%i, stepSize"); - - // Reset members modified by callback - ResetCallbackVariables(); + "\tAudioMixerManagerLinuxPulse::SpeakerVolumeStepSize() => " + "size=%i", stepSize); return 0; } @@ -393,6 +394,7 @@ AudioMixerManagerLinuxPulse::SpeakerVolumeStepSize(uint16_t& stepSize) const int32_t AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available) { + DCHECK(thread_checker_.CalledOnValidThread()); if (_paOutputDeviceIndex == -1) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -409,6 +411,7 @@ AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available) int32_t AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available) { + DCHECK(thread_checker_.CalledOnValidThread()); if (_paOutputDeviceIndex == -1) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -424,12 +427,11 @@ AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available) int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable) { + DCHECK(thread_checker_.CalledOnValidThread()); WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioMixerManagerLinuxPulse::SetSpeakerMute(enable=%u)", enable); - CriticalSectionScoped lock(&_critSect); - if (_paOutputDeviceIndex == -1) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -443,7 +445,7 @@ int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable) != PA_STREAM_UNCONNECTED)) { // We can only really mute if we have a connected stream - PaLock(); + AutoPulseLock auto_lock(_paMainloop); pa_operation* paOperation = NULL; paOperation = LATE(pa_context_set_sink_input_mute)( @@ -459,8 +461,6 @@ int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable) // Don't need to wait for the completion LATE(pa_operation_unref)(paOperation); - - PaUnLock(); } else { // We have not created a stream or it's not connected to the sink @@ -497,7 +497,6 @@ int32_t AudioMixerManagerLinuxPulse::SpeakerMute(bool& enabled) const return -1; enabled = static_cast (_paMute); - ResetCallbackVariables(); } else { enabled = _paSpeakerMute; @@ -513,6 +512,7 @@ int32_t AudioMixerManagerLinuxPulse::SpeakerMute(bool& enabled) const int32_t AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available) { + DCHECK(thread_checker_.CalledOnValidThread()); if (_paOutputDeviceIndex == -1) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -522,33 +522,31 @@ AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available) uint32_t deviceIndex = (uint32_t) _paOutputDeviceIndex; - PaLock(); - - // Get the actual stream device index if we have a connected stream - // The device used by the stream can be changed - // during the call - if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream) - != PA_STREAM_UNCONNECTED)) { - deviceIndex = LATE(pa_stream_get_device_index)(_paPlayStream); - } + AutoPulseLock auto_lock(_paMainloop); - PaUnLock(); + // Get the actual stream device index if we have a connected stream + // The device used by the stream can be changed + // during the call + if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream) + != PA_STREAM_UNCONNECTED)) + { + deviceIndex = LATE(pa_stream_get_device_index)(_paPlayStream); + } + } if (!GetSinkInfoByIndex(deviceIndex)) return -1; available = static_cast (_paChannels == 2); - // Reset members modified by callback - ResetCallbackVariables(); - return 0; } int32_t AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(bool& available) { + DCHECK(thread_checker_.CalledOnValidThread()); if (_paInputDeviceIndex == -1) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -558,7 +556,7 @@ AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(bool& available) uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; - PaLock(); + AutoPulseLock auto_lock(_paMainloop); // Get the actual stream device index if we have a connected stream // The device used by the stream can be changed @@ -570,7 +568,6 @@ AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(bool& available) } pa_operation* paOperation = NULL; - ResetCallbackVariables(); // Get info for this source // We want to know if the actual device can record in stereo @@ -580,31 +577,20 @@ AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(bool& available) (void*) this); WaitForOperationCompletion(paOperation); - PaUnLock(); - - if (!_callbackValues) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "Error getting number of input channels: %d", - LATE(pa_context_errno)(_paContext)); - return -1; - } available = static_cast (_paChannels == 2); WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - " AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable()" + " AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable()" " => available=%i, available"); - // Reset members modified by callback - ResetCallbackVariables(); - return 0; } int32_t AudioMixerManagerLinuxPulse::MicrophoneMuteIsAvailable( bool& available) { + DCHECK(thread_checker_.CalledOnValidThread()); if (_paInputDeviceIndex == -1) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -620,12 +606,11 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneMuteIsAvailable( int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable) { + DCHECK(thread_checker_.CalledOnValidThread()); WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioMixerManagerLinuxPulse::SetMicrophoneMute(enable=%u)", enable); - CriticalSectionScoped lock(&_critSect); - if (_paInputDeviceIndex == -1) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -635,11 +620,10 @@ int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable) bool setFailed(false); pa_operation* paOperation = NULL; - ResetCallbackVariables(); uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; - PaLock(); + AutoPulseLock auto_lock(_paMainloop); // Get the actual stream device index if we have a connected stream // The device used by the stream can be changed @@ -664,11 +648,6 @@ int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable) // Don't need to wait for this to complete. LATE(pa_operation_unref)(paOperation); - PaUnLock(); - - // Reset variables altered by callback - ResetCallbackVariables(); - if (setFailed) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -682,7 +661,7 @@ int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable) int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const { - + DCHECK(thread_checker_.CalledOnValidThread()); if (_paInputDeviceIndex == -1) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -692,30 +671,26 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; - PaLock(); - - // Get the actual stream device index if we have a connected stream - // The device used by the stream can be changed - // during the call - if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream) - != PA_STREAM_UNCONNECTED)) { - deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); + AutoPulseLock auto_lock(_paMainloop); + // Get the actual stream device index if we have a connected stream + // The device used by the stream can be changed + // during the call + if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream) + != PA_STREAM_UNCONNECTED)) + { + deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); + } } - PaUnLock(); - if (!GetSourceInfoByIndex(deviceIndex)) return -1; enabled = static_cast (_paMute); WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - " AudioMixerManagerLinuxPulse::MicrophoneMute() =>" - " enabled=%i, enabled"); - - // Reset members modified by callback - ResetCallbackVariables(); + "\tAudioMixerManagerLinuxPulse::MicrophoneMute() =>" + " enabled=%i", enabled); return 0; } @@ -723,6 +698,7 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const int32_t AudioMixerManagerLinuxPulse::MicrophoneBoostIsAvailable(bool& available) { + DCHECK(thread_checker_.CalledOnValidThread()); if (_paInputDeviceIndex == -1) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -740,12 +716,11 @@ AudioMixerManagerLinuxPulse::MicrophoneBoostIsAvailable(bool& available) int32_t AudioMixerManagerLinuxPulse::SetMicrophoneBoost(bool enable) { + DCHECK(thread_checker_.CalledOnValidThread()); WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioMixerManagerLinuxPulse::SetMicrophoneBoost(enable=%u)", enable); - CriticalSectionScoped lock(&_critSect); - if (_paInputDeviceIndex == -1) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -753,7 +728,7 @@ int32_t AudioMixerManagerLinuxPulse::SetMicrophoneBoost(bool enable) return -1; } - // Ensure that the selected microphone destination has a valid boost control + // Ensure the selected microphone destination has a valid boost control bool available(false); MicrophoneBoostIsAvailable(available); if (!available) @@ -770,7 +745,7 @@ int32_t AudioMixerManagerLinuxPulse::SetMicrophoneBoost(bool enable) int32_t AudioMixerManagerLinuxPulse::MicrophoneBoost(bool& enabled) const { - + DCHECK(thread_checker_.CalledOnValidThread()); if (_paInputDeviceIndex == -1) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -787,6 +762,7 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneBoost(bool& enabled) const int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeIsAvailable( bool& available) { + DCHECK(thread_checker_.CalledOnValidThread()); if (_paInputDeviceIndex == -1) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -804,10 +780,8 @@ int32_t AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume) { WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - "AudioMixerManagerLinuxPulse::SetMicrophoneVolume(volume=%u)", - volume); - - CriticalSectionScoped lock(&_critSect); + "AudioMixerManagerLinuxPulse::SetMicrophoneVolume" + "(volume=%u)", volume); if (_paInputDeviceIndex == -1) { @@ -816,22 +790,20 @@ AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume) return -1; } - // Unlike output streams, input streams have no concept of a stream volume, - // only a device volume. So we have to change the volume of the device - // itself. + // Unlike output streams, input streams have no concept of a stream + // volume, only a device volume. So we have to change the volume of the + // device itself. // The device may have a different number of channels than the stream and - // their mapping may be different, so we don't want to use the channel count - // from our sample spec. We could use PA_CHANNELS_MAX to cover our bases, - // and the server allows that even if the device's channel count is lower, - // but some buggy PA clients don't like that (the pavucontrol on Hardy dies - // in an assert if the channel count is different). So instead we look up - // the actual number of channels that the device has. - + // their mapping may be different, so we don't want to use the channel + // count from our sample spec. We could use PA_CHANNELS_MAX to cover our + // bases, and the server allows that even if the device's channel count + // is lower, but some buggy PA clients don't like that (the pavucontrol + // on Hardy dies in an assert if the channel count is different). So + // instead we look up the actual number of channels that the device has. + AutoPulseLock auto_lock(_paMainloop); uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; - PaLock(); - // Get the actual stream device index if we have a connected stream // The device used by the stream can be changed // during the call @@ -843,7 +815,6 @@ AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume) bool setFailed(false); pa_operation* paOperation = NULL; - ResetCallbackVariables(); // Get the number of channels for this source paOperation @@ -853,18 +824,7 @@ AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume) WaitForOperationCompletion(paOperation); - if (!_callbackValues) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "Error getting input channels: %d", - LATE(pa_context_errno)(_paContext)); - PaUnLock(); - return -1; - } - uint8_t channels = _paChannels; - ResetCallbackVariables(); - pa_cvolume cVolumes; LATE(pa_cvolume_set)(&cVolumes, channels, volume); @@ -872,7 +832,8 @@ AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume) paOperation = LATE(pa_context_set_source_volume_by_index)(_paContext, deviceIndex, &cVolumes, - PaSetVolumeCallback, NULL); + PaSetVolumeCallback, + NULL); if (!paOperation) { @@ -882,11 +843,6 @@ AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume) // Don't need to wait for this to complete. LATE(pa_operation_unref)(paOperation); - PaUnLock(); - - // Reset variables altered by callback - ResetCallbackVariables(); - if (setFailed) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -911,29 +867,28 @@ AudioMixerManagerLinuxPulse::MicrophoneVolume(uint32_t& volume) const uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; - PaLock(); - - // Get the actual stream device index if we have a connected stream - // The device used by the stream can be changed - // during the call - if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream) - != PA_STREAM_UNCONNECTED)) { - deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); + AutoPulseLock auto_lock(_paMainloop); + // Get the actual stream device index if we have a connected stream. + // The device used by the stream can be changed during the call. + if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream) + != PA_STREAM_UNCONNECTED)) + { + deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); + } } - PaUnLock(); - if (!GetSourceInfoByIndex(deviceIndex)) - return -1; + return -1; - volume = static_cast (_paVolume); + { + AutoPulseLock auto_lock(_paMainloop); + volume = static_cast (_paVolume); + } WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - " AudioMixerManagerLinuxPulse::MicrophoneVolume() => vol=%i, volume"); - - // Reset members modified by callback - ResetCallbackVariables(); + " AudioMixerManagerLinuxPulse::MicrophoneVolume()" + " => vol=%i, volume"); return 0; } @@ -976,7 +931,7 @@ AudioMixerManagerLinuxPulse::MinMicrophoneVolume(uint32_t& minVolume) const int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize( uint16_t& stepSize) const { - + DCHECK(thread_checker_.CalledOnValidThread()); if (_paInputDeviceIndex == -1) { WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, @@ -986,7 +941,7 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize( uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; - PaLock(); + AutoPulseLock auto_lock(_paMainloop); // Get the actual stream device index if we have a connected stream // The device used by the stream can be changed @@ -998,7 +953,6 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize( } pa_operation* paOperation = NULL; - ResetCallbackVariables(); // Get info for this source paOperation @@ -1008,60 +962,55 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize( WaitForOperationCompletion(paOperation); - PaUnLock(); - - if (!_callbackValues) - { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "Error getting step size: %d", - LATE(pa_context_errno)(_paContext)); - return -1; - } - stepSize = static_cast ((PA_VOLUME_NORM + 1) / _paVolSteps); WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, - " AudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize()" - " => size=%i, stepSize"); - - // Reset members modified by callback - ResetCallbackVariables(); + "\tAudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize()" + " => size=%i", stepSize); return 0; } -// ============================================================================ +// =========================================================================== // Private Methods -// ============================================================================ +// =========================================================================== -void AudioMixerManagerLinuxPulse::PaSinkInfoCallback(pa_context */*c*/, - const pa_sink_info *i, - int eol, void *pThis) +void +AudioMixerManagerLinuxPulse::PaSinkInfoCallback(pa_context */*c*/, + const pa_sink_info *i, + int eol, + void *pThis) { - static_cast (pThis)-> PaSinkInfoCallbackHandler( - i, eol); + static_cast (pThis)-> + PaSinkInfoCallbackHandler(i, eol); } -void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallback( +void +AudioMixerManagerLinuxPulse::PaSinkInputInfoCallback( pa_context */*c*/, const pa_sink_input_info *i, - int eol, void *pThis) + int eol, + void *pThis) { static_cast (pThis)-> PaSinkInputInfoCallbackHandler(i, eol); } -void AudioMixerManagerLinuxPulse::PaSourceInfoCallback(pa_context */*c*/, - const pa_source_info *i, - int eol, void *pThis) +void +AudioMixerManagerLinuxPulse::PaSourceInfoCallback(pa_context */*c*/, + const pa_source_info *i, + int eol, + void *pThis) { static_cast (pThis)-> PaSourceInfoCallbackHandler(i, eol); } -void AudioMixerManagerLinuxPulse::PaSetVolumeCallback(pa_context * c, - int success, void */*pThis*/) +void +AudioMixerManagerLinuxPulse::PaSetVolumeCallback(pa_context * c, + int success, + void */*pThis*/) { if (!success) { @@ -1081,7 +1030,6 @@ void AudioMixerManagerLinuxPulse::PaSinkInfoCallbackHandler( return; } - _callbackValues = true; _paChannels = i->channel_map.channels; // Get number of channels pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value. for (int j = 0; j < _paChannels; ++j) @@ -1111,7 +1059,6 @@ void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallbackHandler( return; } - _callbackValues = true; _paChannels = i->channel_map.channels; // Get number of channels pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value. for (int j = 0; j < _paChannels; ++j) @@ -1136,7 +1083,6 @@ void AudioMixerManagerLinuxPulse::PaSourceInfoCallbackHandler( return; } - _callbackValues = true; _paChannels = i->channel_map.channels; // Get number of channels pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value. for (int j = 0; j < _paChannels; ++j) @@ -1155,15 +1101,6 @@ void AudioMixerManagerLinuxPulse::PaSourceInfoCallbackHandler( _paVolSteps = PA_VOLUME_NORM + 1; } -void AudioMixerManagerLinuxPulse::ResetCallbackVariables() const -{ - _paVolume = 0; - _paMute = 0; - _paVolSteps = 0; - _paChannels = 0; - _callbackValues = false; -} - void AudioMixerManagerLinuxPulse::WaitForOperationCompletion( pa_operation* paOperation) const { @@ -1175,92 +1112,42 @@ void AudioMixerManagerLinuxPulse::WaitForOperationCompletion( LATE(pa_operation_unref)(paOperation); } -void AudioMixerManagerLinuxPulse::PaLock() const -{ - LATE(pa_threaded_mainloop_lock)(_paMainloop); -} - -void AudioMixerManagerLinuxPulse::PaUnLock() const -{ - LATE(pa_threaded_mainloop_unlock)(_paMainloop); -} - bool AudioMixerManagerLinuxPulse::GetSinkInputInfo() const { pa_operation* paOperation = NULL; - ResetCallbackVariables(); - PaLock(); - for (int retries = 0; retries < kMaxRetryOnFailure && !_callbackValues; - retries ++) { - // Get info for this stream (sink input). - paOperation = LATE(pa_context_get_sink_input_info)( - _paContext, - LATE(pa_stream_get_index)(_paPlayStream), - PaSinkInputInfoCallback, - (void*) this); - - WaitForOperationCompletion(paOperation); - } - PaUnLock(); - - if (!_callbackValues) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "GetSinkInputInfo failed to get volume info : %d", - LATE(pa_context_errno)(_paContext)); - return false; - } + AutoPulseLock auto_lock(_paMainloop); + // Get info for this stream (sink input). + paOperation = LATE(pa_context_get_sink_input_info)( + _paContext, + LATE(pa_stream_get_index)(_paPlayStream), + PaSinkInputInfoCallback, + (void*) this); + WaitForOperationCompletion(paOperation); return true; } bool AudioMixerManagerLinuxPulse::GetSinkInfoByIndex( int device_index) const { pa_operation* paOperation = NULL; - ResetCallbackVariables(); - PaLock(); - for (int retries = 0; retries < kMaxRetryOnFailure && !_callbackValues; - retries ++) { - paOperation = LATE(pa_context_get_sink_info_by_index)(_paContext, - device_index, PaSinkInfoCallback, (void*) this); - - WaitForOperationCompletion(paOperation); - } - PaUnLock(); - - if (!_callbackValues) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "GetSinkInfoByIndex failed to get volume info: %d", - LATE(pa_context_errno)(_paContext)); - return false; - } + AutoPulseLock auto_lock(_paMainloop); + paOperation = LATE(pa_context_get_sink_info_by_index)(_paContext, + device_index, PaSinkInfoCallback, (void*) this); + WaitForOperationCompletion(paOperation); return true; } bool AudioMixerManagerLinuxPulse::GetSourceInfoByIndex( int device_index) const { pa_operation* paOperation = NULL; - ResetCallbackVariables(); - PaLock(); - for (int retries = 0; retries < kMaxRetryOnFailure && !_callbackValues; - retries ++) { + AutoPulseLock auto_lock(_paMainloop); paOperation = LATE(pa_context_get_source_info_by_index)( _paContext, device_index, PaSourceInfoCallback, (void*) this); WaitForOperationCompletion(paOperation); - } - - PaUnLock(); - - if (!_callbackValues) { - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, - "GetSourceInfoByIndex error: %d", - LATE(pa_context_errno)(_paContext)); - return false; - } - return true; } diff --git a/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h b/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h index 9296c76488..85676319bd 100644 --- a/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h +++ b/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h @@ -15,6 +15,7 @@ #include "webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h" #include "webrtc/system_wrappers/interface/critical_section_wrapper.h" #include "webrtc/typedefs.h" +#include "webrtc/base/thread_checker.h" #include #include @@ -82,17 +83,13 @@ private: void PaSinkInputInfoCallbackHandler(const pa_sink_input_info *i, int eol); void PaSourceInfoCallbackHandler(const pa_source_info *i, int eol); - void ResetCallbackVariables() const; void WaitForOperationCompletion(pa_operation* paOperation) const; - void PaLock() const; - void PaUnLock() const; bool GetSinkInputInfo() const; bool GetSinkInfoByIndex(int device_index)const ; bool GetSourceInfoByIndex(int device_index) const; private: - CriticalSectionWrapper& _critSect; int32_t _id; int16_t _paOutputDeviceIndex; int16_t _paInputDeviceIndex; @@ -110,7 +107,12 @@ private: mutable uint32_t _paSpeakerVolume; mutable uint8_t _paChannels; bool _paObjectsSet; - mutable bool _callbackValues; + + // Stores thread ID in constructor. + // We can then use ThreadChecker::CalledOnValidThread() to ensure that + // other methods are called from the same thread. + // Currently only does DCHECK(thread_checker_.CalledOnValidThread()). + rtc::ThreadChecker thread_checker_; }; } diff --git a/webrtc/modules/audio_processing/gain_control_impl.cc b/webrtc/modules/audio_processing/gain_control_impl.cc index 6211c4985c..5f301c1345 100644 --- a/webrtc/modules/audio_processing/gain_control_impl.cc +++ b/webrtc/modules/audio_processing/gain_control_impl.cc @@ -179,6 +179,7 @@ int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio) { // TODO(ajm): ensure this is called under kAdaptiveAnalog. int GainControlImpl::set_stream_analog_level(int level) { + CriticalSectionScoped crit_scoped(crit_); was_analog_level_set_ = true; if (level < minimum_capture_level_ || level > maximum_capture_level_) { return apm_->kBadParameterError;