Remove the Windows Wave audio device implementation.
This implementation uses various legacy classes such as EventTimeWrapper, CriticalSectionWrapper, EventWrapper etc and hasn't been maintained (or used?) for a long time. Instead of spending time on testing and updating the class, I think we should just remove it. For versions of Windows that we support, following Win7, we use the CoreAudio implementation. BUG=webrtc:7183 R=solenberg@webrtc.org Review-Url: https://codereview.webrtc.org/2700983002 . Cr-Commit-Position: refs/heads/master@{#16678}
This commit is contained in:
parent
8fefe9889d
commit
cc8588c040
@ -467,7 +467,6 @@ enum StereoChannel { kStereoLeft = 0, kStereoRight, kStereoBoth };
|
||||
// Audio device layers
|
||||
enum AudioLayers {
|
||||
kAudioPlatformDefault = 0,
|
||||
kAudioWindowsWave = 1,
|
||||
kAudioWindowsCore = 2,
|
||||
kAudioLinuxAlsa = 3,
|
||||
kAudioLinuxPulse = 4
|
||||
|
||||
@ -211,8 +211,6 @@ rtc_static_library("audio_device") {
|
||||
sources += [
|
||||
"win/audio_device_core_win.cc",
|
||||
"win/audio_device_core_win.h",
|
||||
"win/audio_device_wave_win.cc",
|
||||
"win/audio_device_wave_win.h",
|
||||
"win/audio_mixer_manager_win.cc",
|
||||
"win/audio_mixer_manager_win.h",
|
||||
]
|
||||
|
||||
@ -23,7 +23,6 @@
|
||||
#include <string.h>
|
||||
|
||||
#if defined(_WIN32)
|
||||
#include "audio_device_wave_win.h"
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
#include "audio_device_core_win.h"
|
||||
#endif
|
||||
@ -200,17 +199,6 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() {
|
||||
|
||||
// Create the *Windows* implementation of the Audio Device
|
||||
//
|
||||
#if defined(_WIN32)
|
||||
if ((audioLayer == kWindowsWaveAudio)
|
||||
#if !defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// Wave audio is default if Core audio is not supported in this build
|
||||
|| (audioLayer == kPlatformDefaultAudio)
|
||||
#endif
|
||||
) {
|
||||
// create *Windows Wave Audio* implementation
|
||||
ptrAudioDevice = new AudioDeviceWindowsWave(Id());
|
||||
LOG(INFO) << "Windows Wave APIs will be utilized";
|
||||
}
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
if ((audioLayer == kWindowsCoreAudio) ||
|
||||
(audioLayer == kPlatformDefaultAudio)) {
|
||||
@ -220,20 +208,9 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() {
|
||||
// create *Windows Core Audio* implementation
|
||||
ptrAudioDevice = new AudioDeviceWindowsCore(Id());
|
||||
LOG(INFO) << "Windows Core Audio APIs will be utilized";
|
||||
} else {
|
||||
// create *Windows Wave Audio* implementation
|
||||
ptrAudioDevice = new AudioDeviceWindowsWave(Id());
|
||||
if (ptrAudioDevice != NULL) {
|
||||
// Core Audio was not supported => revert to Windows Wave instead
|
||||
_platformAudioLayer =
|
||||
kWindowsWaveAudio; // modify the state set at construction
|
||||
LOG(WARNING) << "Windows Core Audio is *not* supported => Wave APIs "
|
||||
"will be utilized instead";
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
#endif // #if defined(_WIN32)
|
||||
|
||||
#if defined(WEBRTC_ANDROID)
|
||||
// Create an Android audio manager.
|
||||
|
||||
@ -26,7 +26,6 @@ class AudioDeviceModule : public RefCountedModule {
|
||||
|
||||
enum AudioLayer {
|
||||
kPlatformDefaultAudio = 0,
|
||||
kWindowsWaveAudio = 1,
|
||||
kWindowsCoreAudio = 2,
|
||||
kLinuxAlsaAudio = 3,
|
||||
kLinuxPulseAudio = 4,
|
||||
|
||||
@ -163,45 +163,22 @@ class AudioDeviceAPITest: public testing::Test {
|
||||
|
||||
// Windows:
|
||||
// if (WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// user can select between default (Core) or Wave
|
||||
// else
|
||||
// user can select between default (Wave) or Wave
|
||||
// user can select only the default (Core)
|
||||
const int32_t kId = 444;
|
||||
|
||||
#if defined(_WIN32)
|
||||
EXPECT_TRUE((audio_device_ = AudioDeviceModule::Create(
|
||||
kId, AudioDeviceModule::kLinuxAlsaAudio)) == NULL);
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
TEST_LOG("WEBRTC_WINDOWS_CORE_AUDIO_BUILD is defined!\n\n");
|
||||
// create default implementation (=Core Audio) instance
|
||||
EXPECT_TRUE((audio_device_ = AudioDeviceModule::Create(
|
||||
kId, AudioDeviceModule::kPlatformDefaultAudio)) != NULL);
|
||||
EXPECT_EQ(0, audio_device_.release()->Release());
|
||||
// create non-default (=Wave Audio) instance
|
||||
EXPECT_TRUE((audio_device_ = AudioDeviceModule::Create(
|
||||
kId, AudioDeviceModule::kWindowsWaveAudio)) != NULL);
|
||||
EXPECT_EQ(0, audio_device_.release()->Release());
|
||||
// explicitly specify usage of Core Audio (same as default)
|
||||
EXPECT_TRUE((audio_device_ = AudioDeviceModule::Create(
|
||||
kId, AudioDeviceModule::kWindowsCoreAudio)) != NULL);
|
||||
#else
|
||||
TEST_LOG("WEBRTC_WINDOWS_CORE_AUDIO_BUILD is *not* defined!\n");
|
||||
EXPECT_TRUE((audio_device_ = AudioDeviceModule::Create(
|
||||
kId, AudioDeviceModule::kWindowsCoreAudio)) == NULL);
|
||||
// create default implementation (=Wave Audio) instance
|
||||
EXPECT_TRUE((audio_device_ = AudioDeviceModule::Create(
|
||||
kId, AudioDeviceModule::kPlatformDefaultAudio)) != NULL);
|
||||
EXPECT_EQ(0, audio_device_.release()->Release());
|
||||
// explicitly specify usage of Wave Audio (same as default)
|
||||
EXPECT_TRUE((audio_device_ = AudioDeviceModule::Create(
|
||||
kId, AudioDeviceModule::kWindowsWaveAudio)) != NULL);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(ANDROID)
|
||||
// Fails tests
|
||||
EXPECT_TRUE((audio_device_ = AudioDeviceModule::Create(
|
||||
kId, AudioDeviceModule::kWindowsWaveAudio)) == NULL);
|
||||
EXPECT_TRUE((audio_device_ = AudioDeviceModule::Create(
|
||||
kId, AudioDeviceModule::kWindowsCoreAudio)) == NULL);
|
||||
EXPECT_TRUE((audio_device_ = AudioDeviceModule::Create(
|
||||
@ -212,8 +189,6 @@ class AudioDeviceAPITest: public testing::Test {
|
||||
EXPECT_TRUE((audio_device_ = AudioDeviceModule::Create(
|
||||
kId, AudioDeviceModule::kPlatformDefaultAudio)) != NULL);
|
||||
#elif defined(WEBRTC_LINUX)
|
||||
EXPECT_TRUE((audio_device_ = AudioDeviceModule::Create(
|
||||
kId, AudioDeviceModule::kWindowsWaveAudio)) == NULL);
|
||||
EXPECT_TRUE((audio_device_ = AudioDeviceModule::Create(
|
||||
kId, AudioDeviceModule::kWindowsCoreAudio)) == NULL);
|
||||
// create default implementation instance
|
||||
@ -228,8 +203,6 @@ class AudioDeviceAPITest: public testing::Test {
|
||||
|
||||
#if defined(WEBRTC_MAC)
|
||||
// Fails tests
|
||||
EXPECT_TRUE((audio_device_ = AudioDeviceModule::Create(
|
||||
kId, AudioDeviceModule::kWindowsWaveAudio)) == NULL);
|
||||
EXPECT_TRUE((audio_device_ = AudioDeviceModule::Create(
|
||||
kId, AudioDeviceModule::kWindowsCoreAudio)) == NULL);
|
||||
EXPECT_TRUE((audio_device_ = AudioDeviceModule::Create(
|
||||
@ -471,7 +444,7 @@ TEST_F(AudioDeviceAPITest, SetRecordingDevice) {
|
||||
|
||||
TEST_F(AudioDeviceAPITest, PlayoutIsAvailable) {
|
||||
bool available;
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
EXPECT_TRUE(audio_device_->SetPlayoutDevice(
|
||||
AudioDeviceModule::kDefaultCommunicationDevice) == 0);
|
||||
EXPECT_EQ(0, audio_device_->PlayoutIsAvailable(&available));
|
||||
@ -494,7 +467,7 @@ TEST_F(AudioDeviceAPITest, PlayoutIsAvailable) {
|
||||
|
||||
TEST_F(AudioDeviceAPITest, RecordingIsAvailable) {
|
||||
bool available;
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
EXPECT_EQ(0, audio_device_->SetRecordingDevice(
|
||||
AudioDeviceModule::kDefaultCommunicationDevice));
|
||||
EXPECT_EQ(0, audio_device_->RecordingIsAvailable(&available));
|
||||
@ -623,7 +596,7 @@ TEST_F(AudioDeviceAPITest, StartAndStopPlayout) {
|
||||
EXPECT_EQ(-1, audio_device_->StartPlayout());
|
||||
EXPECT_EQ(0, audio_device_->StopPlayout());
|
||||
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// kDefaultCommunicationDevice
|
||||
EXPECT_TRUE(audio_device_->SetPlayoutDevice(
|
||||
AudioDeviceModule::kDefaultCommunicationDevice) == 0);
|
||||
@ -682,7 +655,7 @@ TEST_F(AudioDeviceAPITest, StartAndStopRecording) {
|
||||
EXPECT_EQ(-1, audio_device_->StartRecording());
|
||||
EXPECT_EQ(0, audio_device_->StopRecording());
|
||||
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// kDefaultCommunicationDevice
|
||||
EXPECT_TRUE(audio_device_->SetRecordingDevice(
|
||||
AudioDeviceModule::kDefaultCommunicationDevice) == 0);
|
||||
@ -732,58 +705,6 @@ TEST_F(AudioDeviceAPITest, StartAndStopRecording) {
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(_WIN32) && !defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
TEST_F(AudioDeviceAPITest, SetAndGetWaveOutVolume) {
|
||||
uint32_t vol(0);
|
||||
// NOTE 1: Windows Wave only!
|
||||
// NOTE 2: It seems like the waveOutSetVolume API returns
|
||||
// MMSYSERR_NOTSUPPORTED on some Vista machines!
|
||||
const uint16_t maxVol(0xFFFF);
|
||||
uint16_t volL, volR;
|
||||
|
||||
CheckInitialPlayoutStates();
|
||||
|
||||
// make dummy test to see if this API is supported
|
||||
int32_t works = audio_device_->SetWaveOutVolume(vol, vol);
|
||||
WARNING(works == 0);
|
||||
|
||||
if (works == 0)
|
||||
{
|
||||
// set volume without open playout device
|
||||
for (vol = 0; vol <= maxVol; vol += (maxVol/5))
|
||||
{
|
||||
EXPECT_EQ(0, audio_device_->SetWaveOutVolume(vol, vol));
|
||||
EXPECT_EQ(0, audio_device_->WaveOutVolume(volL, volR));
|
||||
EXPECT_TRUE((volL == vol) && (volR == vol));
|
||||
}
|
||||
|
||||
// repeat test but this time with an open (default) output device
|
||||
EXPECT_EQ(0, audio_device_->SetPlayoutDevice(
|
||||
AudioDeviceModule::kDefaultDevice));
|
||||
EXPECT_EQ(0, audio_device_->InitPlayout());
|
||||
EXPECT_TRUE(audio_device_->PlayoutIsInitialized());
|
||||
for (vol = 0; vol <= maxVol; vol += (maxVol/5))
|
||||
{
|
||||
EXPECT_EQ(0, audio_device_->SetWaveOutVolume(vol, vol));
|
||||
EXPECT_EQ(0, audio_device_->WaveOutVolume(volL, volR));
|
||||
EXPECT_TRUE((volL == vol) && (volR == vol));
|
||||
}
|
||||
|
||||
// as above but while playout is active
|
||||
EXPECT_EQ(0, audio_device_->StartPlayout());
|
||||
EXPECT_TRUE(audio_device_->Playing());
|
||||
for (vol = 0; vol <= maxVol; vol += (maxVol/5))
|
||||
{
|
||||
EXPECT_EQ(0, audio_device_->SetWaveOutVolume(vol, vol));
|
||||
EXPECT_EQ(0, audio_device_->WaveOutVolume(volL, volR));
|
||||
EXPECT_TRUE((volL == vol) && (volR == vol));
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_EQ(0, audio_device_->StopPlayout());
|
||||
EXPECT_FALSE(audio_device_->Playing());
|
||||
}
|
||||
#endif // defined(_WIN32) && !defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
|
||||
TEST_F(AudioDeviceAPITest, InitSpeaker) {
|
||||
// NOTE: By calling Terminate (in TearDown) followed by Init (in SetUp) we
|
||||
@ -857,7 +778,7 @@ TEST_F(AudioDeviceAPITest, SpeakerVolumeIsAvailable) {
|
||||
CheckInitialPlayoutStates();
|
||||
bool available;
|
||||
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// check the kDefaultCommunicationDevice
|
||||
EXPECT_TRUE(audio_device_->SetPlayoutDevice(
|
||||
AudioDeviceModule::kDefaultCommunicationDevice) == 0);
|
||||
@ -904,20 +825,7 @@ TEST_F(AudioDeviceAPITest, SpeakerVolumeTests) {
|
||||
EXPECT_EQ(-1, audio_device_->MinSpeakerVolume(&minVolume));
|
||||
EXPECT_EQ(-1, audio_device_->SpeakerVolumeStepSize(&stepSize));
|
||||
|
||||
#if defined(_WIN32) && !defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// test for warning (can e.g. happen on Vista with Wave API)
|
||||
EXPECT_EQ(0,
|
||||
audio_device_->SetPlayoutDevice(AudioDeviceModule::kDefaultDevice));
|
||||
EXPECT_EQ(0, audio_device_->SpeakerVolumeIsAvailable(&available));
|
||||
if (available) {
|
||||
EXPECT_EQ(0, audio_device_->InitSpeaker());
|
||||
EXPECT_EQ(0, audio_device_->SetSpeakerVolume(19001));
|
||||
EXPECT_EQ(0, audio_device_->SpeakerVolume(&volume));
|
||||
WARNING(volume == 19001);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// use kDefaultCommunicationDevice and modify/retrieve the volume
|
||||
EXPECT_TRUE(audio_device_->SetPlayoutDevice(
|
||||
AudioDeviceModule::kDefaultCommunicationDevice) == 0);
|
||||
@ -1001,7 +909,7 @@ TEST_F(AudioDeviceAPITest, MicrophoneVolumeIsAvailable) {
|
||||
CheckInitialRecordingStates();
|
||||
bool available;
|
||||
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// check the kDefaultCommunicationDevice
|
||||
EXPECT_TRUE(audio_device_->SetRecordingDevice(
|
||||
AudioDeviceModule::kDefaultCommunicationDevice) == 0);
|
||||
@ -1054,21 +962,7 @@ TEST_F(AudioDeviceAPITest, MAYBE_MicrophoneVolumeTests) {
|
||||
EXPECT_EQ(-1, audio_device_->MinMicrophoneVolume(&minVolume));
|
||||
EXPECT_EQ(-1, audio_device_->MicrophoneVolumeStepSize(&stepSize));
|
||||
|
||||
#if defined(_WIN32) && !defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// test for warning (can e.g. happen on Vista with Wave API)
|
||||
EXPECT_EQ(0, audio_device_->SetRecordingDevice(
|
||||
AudioDeviceModule::kDefaultDevice));
|
||||
EXPECT_EQ(0, audio_device_->MicrophoneVolumeIsAvailable(&available));
|
||||
if (available)
|
||||
{
|
||||
EXPECT_EQ(0, audio_device_->InitMicrophone());
|
||||
EXPECT_EQ(0, audio_device_->SetMicrophoneVolume(19001));
|
||||
EXPECT_EQ(0, audio_device_->MicrophoneVolume(&volume));
|
||||
WARNING(volume == 19001);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// initialize kDefaultCommunicationDevice and modify/retrieve the volume
|
||||
EXPECT_TRUE(audio_device_->SetRecordingDevice(
|
||||
AudioDeviceModule::kDefaultCommunicationDevice) == 0);
|
||||
@ -1134,7 +1028,7 @@ TEST_F(AudioDeviceAPITest, MAYBE_MicrophoneVolumeTests) {
|
||||
TEST_F(AudioDeviceAPITest, SpeakerMuteIsAvailable) {
|
||||
bool available;
|
||||
CheckInitialPlayoutStates();
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// check the kDefaultCommunicationDevice
|
||||
EXPECT_TRUE(audio_device_->SetPlayoutDevice(
|
||||
AudioDeviceModule::kDefaultCommunicationDevice) == 0);
|
||||
@ -1160,7 +1054,7 @@ TEST_F(AudioDeviceAPITest, SpeakerMuteIsAvailable) {
|
||||
TEST_F(AudioDeviceAPITest, MicrophoneMuteIsAvailable) {
|
||||
bool available;
|
||||
CheckInitialRecordingStates();
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// check the kDefaultCommunicationDevice
|
||||
EXPECT_TRUE(audio_device_->SetRecordingDevice(
|
||||
AudioDeviceModule::kDefaultCommunicationDevice) == 0);
|
||||
@ -1186,7 +1080,7 @@ TEST_F(AudioDeviceAPITest, MicrophoneMuteIsAvailable) {
|
||||
TEST_F(AudioDeviceAPITest, MicrophoneBoostIsAvailable) {
|
||||
bool available;
|
||||
CheckInitialRecordingStates();
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// check the kDefaultCommunicationDevice
|
||||
EXPECT_TRUE(audio_device_->SetRecordingDevice(
|
||||
AudioDeviceModule::kDefaultCommunicationDevice) == 0);
|
||||
@ -1218,7 +1112,7 @@ TEST_F(AudioDeviceAPITest, SpeakerMuteTests) {
|
||||
// requires initialization
|
||||
EXPECT_EQ(-1, audio_device_->SpeakerMute(&enabled));
|
||||
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// initialize kDefaultCommunicationDevice and modify/retrieve the mute state
|
||||
EXPECT_EQ(0, audio_device_->SetPlayoutDevice(
|
||||
AudioDeviceModule::kDefaultCommunicationDevice));
|
||||
@ -1272,7 +1166,7 @@ TEST_F(AudioDeviceAPITest, MicrophoneMuteTests) {
|
||||
bool enabled;
|
||||
EXPECT_EQ(-1, audio_device_->MicrophoneMute(&enabled));
|
||||
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// initialize kDefaultCommunicationDevice and modify/retrieve the mute
|
||||
EXPECT_TRUE(audio_device_->SetRecordingDevice(
|
||||
AudioDeviceModule::kDefaultCommunicationDevice) == 0);
|
||||
@ -1326,7 +1220,7 @@ TEST_F(AudioDeviceAPITest, MicrophoneBoostTests) {
|
||||
// requires initialization
|
||||
EXPECT_EQ(-1, audio_device_->MicrophoneBoost(&enabled));
|
||||
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// initialize kDefaultCommunicationDevice and modify/retrieve the boost
|
||||
EXPECT_TRUE(audio_device_->SetRecordingDevice(
|
||||
AudioDeviceModule::kDefaultCommunicationDevice) == 0);
|
||||
@ -1505,7 +1399,8 @@ TEST_F(AudioDeviceAPITest, PlayoutBufferTests) {
|
||||
|
||||
CheckInitialPlayoutStates();
|
||||
EXPECT_EQ(0, audio_device_->PlayoutBuffer(&bufferType, &sizeMS));
|
||||
#if defined(_WIN32) || defined(ANDROID) || defined(WEBRTC_IOS)
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD) || defined(ANDROID) || \
|
||||
defined(WEBRTC_IOS)
|
||||
EXPECT_EQ(AudioDeviceModule::kAdaptiveBufferSize, bufferType);
|
||||
#else
|
||||
EXPECT_EQ(AudioDeviceModule::kFixedBufferSize, bufferType);
|
||||
@ -1532,7 +1427,7 @@ TEST_F(AudioDeviceAPITest, PlayoutBufferTests) {
|
||||
|
||||
// bulk tests (all should be successful)
|
||||
EXPECT_FALSE(audio_device_->PlayoutIsInitialized());
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
EXPECT_EQ(0, audio_device_->SetPlayoutBuffer(
|
||||
AudioDeviceModule::kAdaptiveBufferSize, 0));
|
||||
EXPECT_EQ(0, audio_device_->PlayoutBuffer(&bufferType, &sizeMS));
|
||||
@ -1564,7 +1459,7 @@ TEST_F(AudioDeviceAPITest, PlayoutBufferTests) {
|
||||
EXPECT_EQ(100, sizeMS);
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
// restore default
|
||||
EXPECT_EQ(0, audio_device_->SetPlayoutBuffer(
|
||||
AudioDeviceModule::kAdaptiveBufferSize, 0));
|
||||
@ -1596,7 +1491,7 @@ TEST_F(AudioDeviceAPITest, CPULoad) {
|
||||
uint16_t load(0);
|
||||
|
||||
// bulk tests
|
||||
#ifdef _WIN32
|
||||
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
|
||||
EXPECT_EQ(0, audio_device_->CPULoad(&load));
|
||||
EXPECT_EQ(0, load);
|
||||
#else
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,343 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_WAVE_WIN_H
|
||||
#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_WAVE_WIN_H
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "webrtc/base/platform_thread.h"
|
||||
#include "webrtc/modules/audio_device/audio_device_generic.h"
|
||||
#include "webrtc/modules/audio_device/win/audio_mixer_manager_win.h"
|
||||
|
||||
#pragma comment( lib, "winmm.lib" )
|
||||
|
||||
namespace webrtc {
|
||||
class EventTimerWrapper;
|
||||
class EventWrapper;
|
||||
|
||||
const uint32_t TIMER_PERIOD_MS = 2;
|
||||
const uint32_t REC_CHECK_TIME_PERIOD_MS = 4;
|
||||
const uint16_t REC_PUT_BACK_DELAY = 4;
|
||||
|
||||
const uint32_t N_REC_SAMPLES_PER_SEC = 48000;
|
||||
const uint32_t N_PLAY_SAMPLES_PER_SEC = 48000;
|
||||
|
||||
const uint32_t N_REC_CHANNELS = 1; // default is mono recording
|
||||
const uint32_t N_PLAY_CHANNELS = 2; // default is stereo playout
|
||||
|
||||
// NOTE - CPU load will not be correct for other sizes than 10ms
|
||||
const uint32_t REC_BUF_SIZE_IN_SAMPLES = (N_REC_SAMPLES_PER_SEC/100);
|
||||
const uint32_t PLAY_BUF_SIZE_IN_SAMPLES = (N_PLAY_SAMPLES_PER_SEC/100);
|
||||
|
||||
enum { N_BUFFERS_IN = 200 };
|
||||
enum { N_BUFFERS_OUT = 200 };
|
||||
|
||||
class AudioDeviceWindowsWave : public AudioDeviceGeneric
|
||||
{
|
||||
public:
|
||||
AudioDeviceWindowsWave(const int32_t id);
|
||||
~AudioDeviceWindowsWave();
|
||||
|
||||
// Retrieve the currently utilized audio layer
|
||||
virtual int32_t ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const;
|
||||
|
||||
// Main initializaton and termination
|
||||
virtual InitStatus Init();
|
||||
virtual int32_t Terminate();
|
||||
virtual bool Initialized() const;
|
||||
|
||||
// Device enumeration
|
||||
virtual int16_t PlayoutDevices();
|
||||
virtual int16_t RecordingDevices();
|
||||
virtual int32_t PlayoutDeviceName(
|
||||
uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]);
|
||||
virtual int32_t RecordingDeviceName(
|
||||
uint16_t index,
|
||||
char name[kAdmMaxDeviceNameSize],
|
||||
char guid[kAdmMaxGuidSize]);
|
||||
|
||||
// Device selection
|
||||
virtual int32_t SetPlayoutDevice(uint16_t index);
|
||||
virtual int32_t SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device);
|
||||
virtual int32_t SetRecordingDevice(uint16_t index);
|
||||
virtual int32_t SetRecordingDevice(AudioDeviceModule::WindowsDeviceType device);
|
||||
|
||||
// Audio transport initialization
|
||||
virtual int32_t PlayoutIsAvailable(bool& available);
|
||||
virtual int32_t InitPlayout();
|
||||
virtual bool PlayoutIsInitialized() const;
|
||||
virtual int32_t RecordingIsAvailable(bool& available);
|
||||
virtual int32_t InitRecording();
|
||||
virtual bool RecordingIsInitialized() const;
|
||||
|
||||
// Audio transport control
|
||||
virtual int32_t StartPlayout();
|
||||
virtual int32_t StopPlayout();
|
||||
virtual bool Playing() const;
|
||||
virtual int32_t StartRecording();
|
||||
virtual int32_t StopRecording();
|
||||
virtual bool Recording() const;
|
||||
|
||||
// Microphone Automatic Gain Control (AGC)
|
||||
virtual int32_t SetAGC(bool enable);
|
||||
virtual bool AGC() const;
|
||||
|
||||
// Volume control based on the Windows Wave API (Windows only)
|
||||
virtual int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight);
|
||||
virtual int32_t WaveOutVolume(uint16_t& volumeLeft, uint16_t& volumeRight) const;
|
||||
|
||||
// Audio mixer initialization
|
||||
virtual int32_t InitSpeaker();
|
||||
virtual bool SpeakerIsInitialized() const;
|
||||
virtual int32_t InitMicrophone();
|
||||
virtual bool MicrophoneIsInitialized() const;
|
||||
|
||||
// Speaker volume controls
|
||||
virtual int32_t SpeakerVolumeIsAvailable(bool& available);
|
||||
virtual int32_t SetSpeakerVolume(uint32_t volume);
|
||||
virtual int32_t SpeakerVolume(uint32_t& volume) const;
|
||||
virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
|
||||
virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const;
|
||||
virtual int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const;
|
||||
|
||||
// Microphone volume controls
|
||||
virtual int32_t MicrophoneVolumeIsAvailable(bool& available);
|
||||
virtual int32_t SetMicrophoneVolume(uint32_t volume);
|
||||
virtual int32_t MicrophoneVolume(uint32_t& volume) const;
|
||||
virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
|
||||
virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
|
||||
virtual int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const;
|
||||
|
||||
// Speaker mute control
|
||||
virtual int32_t SpeakerMuteIsAvailable(bool& available);
|
||||
virtual int32_t SetSpeakerMute(bool enable);
|
||||
virtual int32_t SpeakerMute(bool& enabled) const;
|
||||
|
||||
// Microphone mute control
|
||||
virtual int32_t MicrophoneMuteIsAvailable(bool& available);
|
||||
virtual int32_t SetMicrophoneMute(bool enable);
|
||||
virtual int32_t MicrophoneMute(bool& enabled) const;
|
||||
|
||||
// Microphone boost control
|
||||
virtual int32_t MicrophoneBoostIsAvailable(bool& available);
|
||||
virtual int32_t SetMicrophoneBoost(bool enable);
|
||||
virtual int32_t MicrophoneBoost(bool& enabled) const;
|
||||
|
||||
// Stereo support
|
||||
virtual int32_t StereoPlayoutIsAvailable(bool& available);
|
||||
virtual int32_t SetStereoPlayout(bool enable);
|
||||
virtual int32_t StereoPlayout(bool& enabled) const;
|
||||
virtual int32_t StereoRecordingIsAvailable(bool& available);
|
||||
virtual int32_t SetStereoRecording(bool enable);
|
||||
virtual int32_t StereoRecording(bool& enabled) const;
|
||||
|
||||
// Delay information and control
|
||||
virtual int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type, uint16_t sizeMS);
|
||||
virtual int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type, uint16_t& sizeMS) const;
|
||||
virtual int32_t PlayoutDelay(uint16_t& delayMS) const;
|
||||
virtual int32_t RecordingDelay(uint16_t& delayMS) const;
|
||||
|
||||
// CPU load
|
||||
virtual int32_t CPULoad(uint16_t& load) const;
|
||||
|
||||
public:
|
||||
virtual bool PlayoutWarning() const;
|
||||
virtual bool PlayoutError() const;
|
||||
virtual bool RecordingWarning() const;
|
||||
virtual bool RecordingError() const;
|
||||
virtual void ClearPlayoutWarning();
|
||||
virtual void ClearPlayoutError();
|
||||
virtual void ClearRecordingWarning();
|
||||
virtual void ClearRecordingError();
|
||||
|
||||
public:
|
||||
virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
|
||||
|
||||
private:
|
||||
void Lock() { _critSect.Enter(); };
|
||||
void UnLock() { _critSect.Leave(); };
|
||||
int32_t Id() {return _id;}
|
||||
bool IsUsingOutputDeviceIndex() const {return _usingOutputDeviceIndex;}
|
||||
AudioDeviceModule::WindowsDeviceType OutputDevice() const {return _outputDevice;}
|
||||
uint16_t OutputDeviceIndex() const {return _outputDeviceIndex;}
|
||||
bool IsUsingInputDeviceIndex() const {return _usingInputDeviceIndex;}
|
||||
AudioDeviceModule::WindowsDeviceType InputDevice() const {return _inputDevice;}
|
||||
uint16_t InputDeviceIndex() const {return _inputDeviceIndex;}
|
||||
|
||||
private:
|
||||
inline int32_t InputSanityCheckAfterUnlockedPeriod() const;
|
||||
inline int32_t OutputSanityCheckAfterUnlockedPeriod() const;
|
||||
|
||||
private:
|
||||
bool KeyPressed() const;
|
||||
|
||||
private:
|
||||
int32_t EnumeratePlayoutDevices();
|
||||
int32_t EnumerateRecordingDevices();
|
||||
void TraceSupportFlags(DWORD dwSupport) const;
|
||||
void TraceWaveInError(MMRESULT error) const;
|
||||
void TraceWaveOutError(MMRESULT error) const;
|
||||
int32_t PrepareStartRecording();
|
||||
int32_t PrepareStartPlayout();
|
||||
|
||||
int32_t RecProc(LONGLONG& consumedTime);
|
||||
int PlayProc(LONGLONG& consumedTime);
|
||||
|
||||
int32_t GetPlayoutBufferDelay(uint32_t& writtenSamples, uint32_t& playedSamples);
|
||||
int32_t GetRecordingBufferDelay(uint32_t& readSamples, uint32_t& recSamples);
|
||||
int32_t Write(int8_t* data, uint16_t nSamples);
|
||||
int32_t GetClockDrift(const uint32_t plSamp, const uint32_t rcSamp);
|
||||
int32_t MonitorRecording(const uint32_t time);
|
||||
int32_t RestartTimerIfNeeded(const uint32_t time);
|
||||
|
||||
private:
|
||||
static bool ThreadFunc(void*);
|
||||
bool ThreadProcess();
|
||||
|
||||
static DWORD WINAPI GetCaptureVolumeThread(LPVOID context);
|
||||
DWORD DoGetCaptureVolumeThread();
|
||||
|
||||
static DWORD WINAPI SetCaptureVolumeThread(LPVOID context);
|
||||
DWORD DoSetCaptureVolumeThread();
|
||||
|
||||
private:
|
||||
AudioDeviceBuffer* _ptrAudioBuffer;
|
||||
|
||||
CriticalSectionWrapper& _critSect;
|
||||
EventTimerWrapper& _timeEvent;
|
||||
EventWrapper& _recStartEvent;
|
||||
EventWrapper& _playStartEvent;
|
||||
|
||||
HANDLE _hGetCaptureVolumeThread;
|
||||
HANDLE _hShutdownGetVolumeEvent;
|
||||
HANDLE _hSetCaptureVolumeThread;
|
||||
HANDLE _hShutdownSetVolumeEvent;
|
||||
HANDLE _hSetCaptureVolumeEvent;
|
||||
|
||||
// TODO(pbos): Remove unique_ptr usage and use PlatformThread directly
|
||||
std::unique_ptr<rtc::PlatformThread> _ptrThread;
|
||||
|
||||
CriticalSectionWrapper& _critSectCb;
|
||||
|
||||
int32_t _id;
|
||||
|
||||
AudioMixerManager _mixerManager;
|
||||
|
||||
bool _usingInputDeviceIndex;
|
||||
bool _usingOutputDeviceIndex;
|
||||
AudioDeviceModule::WindowsDeviceType _inputDevice;
|
||||
AudioDeviceModule::WindowsDeviceType _outputDevice;
|
||||
uint16_t _inputDeviceIndex;
|
||||
uint16_t _outputDeviceIndex;
|
||||
bool _inputDeviceIsSpecified;
|
||||
bool _outputDeviceIsSpecified;
|
||||
|
||||
WAVEFORMATEX _waveFormatIn;
|
||||
WAVEFORMATEX _waveFormatOut;
|
||||
|
||||
HWAVEIN _hWaveIn;
|
||||
HWAVEOUT _hWaveOut;
|
||||
|
||||
WAVEHDR _waveHeaderIn[N_BUFFERS_IN];
|
||||
WAVEHDR _waveHeaderOut[N_BUFFERS_OUT];
|
||||
|
||||
uint8_t _recChannels;
|
||||
uint8_t _playChannels;
|
||||
uint16_t _recBufCount;
|
||||
uint16_t _recDelayCount;
|
||||
uint16_t _recPutBackDelay;
|
||||
|
||||
int8_t _recBuffer[N_BUFFERS_IN][4*REC_BUF_SIZE_IN_SAMPLES];
|
||||
int8_t _playBuffer[N_BUFFERS_OUT][4*PLAY_BUF_SIZE_IN_SAMPLES];
|
||||
|
||||
AudioDeviceModule::BufferType _playBufType;
|
||||
|
||||
private:
|
||||
bool _initialized;
|
||||
bool _recording;
|
||||
bool _playing;
|
||||
bool _recIsInitialized;
|
||||
bool _playIsInitialized;
|
||||
bool _startRec;
|
||||
bool _stopRec;
|
||||
bool _startPlay;
|
||||
bool _stopPlay;
|
||||
bool _AGC;
|
||||
|
||||
private:
|
||||
uint32_t _prevPlayTime;
|
||||
uint32_t _prevRecTime;
|
||||
uint32_t _prevTimerCheckTime;
|
||||
|
||||
uint16_t _playBufCount; // playout buffer index
|
||||
uint16_t _dTcheckPlayBufDelay; // dT for check of play buffer, {2,5,10} [ms]
|
||||
uint16_t _playBufDelay; // playback delay
|
||||
uint16_t _playBufDelayFixed; // fixed playback delay
|
||||
uint16_t _minPlayBufDelay; // minimum playback delay
|
||||
uint16_t _MAX_minBuffer; // level of (adaptive) min threshold must be < _MAX_minBuffer
|
||||
|
||||
int32_t _erZeroCounter; // counts "buffer-is-empty" events
|
||||
int32_t _intro;
|
||||
int32_t _waitCounter;
|
||||
|
||||
uint32_t _writtenSamples;
|
||||
uint32_t _writtenSamplesOld;
|
||||
uint32_t _playedSamplesOld;
|
||||
|
||||
uint32_t _sndCardPlayDelay;
|
||||
uint32_t _sndCardRecDelay;
|
||||
|
||||
uint32_t _plSampOld;
|
||||
uint32_t _rcSampOld;
|
||||
|
||||
uint32_t _read_samples;
|
||||
uint32_t _read_samples_old;
|
||||
uint32_t _rec_samples_old;
|
||||
|
||||
// State that detects driver problems:
|
||||
int32_t _dc_diff_mean;
|
||||
int32_t _dc_y_prev;
|
||||
int32_t _dc_penalty_counter;
|
||||
int32_t _dc_prevtime;
|
||||
uint32_t _dc_prevplay;
|
||||
|
||||
uint32_t _recordedBytes; // accumulated #recorded bytes (reset periodically)
|
||||
uint32_t _prevRecByteCheckTime; // time when we last checked the recording process
|
||||
|
||||
// CPU load measurements
|
||||
LARGE_INTEGER _perfFreq;
|
||||
LONGLONG _playAcc; // accumulated time for playout callback
|
||||
float _avgCPULoad; // average total (rec+play) CPU load
|
||||
|
||||
int32_t _wrapCounter;
|
||||
|
||||
int32_t _useHeader;
|
||||
int16_t _timesdwBytes;
|
||||
int32_t _no_of_msecleft_warnings;
|
||||
int32_t _writeErrors;
|
||||
int32_t _timerFaults;
|
||||
int32_t _timerRestartAttempts;
|
||||
|
||||
uint16_t _playWarning;
|
||||
uint16_t _playError;
|
||||
uint16_t _recWarning;
|
||||
uint16_t _recError;
|
||||
|
||||
uint32_t _newMicLevel;
|
||||
uint32_t _minMicVolume;
|
||||
uint32_t _maxMicVolume;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_WAVE_WIN_H
|
||||
@ -57,9 +57,6 @@ int VoEHardwareImpl::SetAudioDeviceLayer(AudioLayers audioLayer) {
|
||||
case kAudioWindowsCore:
|
||||
wantedLayer = AudioDeviceModule::kWindowsCoreAudio;
|
||||
break;
|
||||
case kAudioWindowsWave:
|
||||
wantedLayer = AudioDeviceModule::kWindowsWaveAudio;
|
||||
break;
|
||||
case kAudioLinuxAlsa:
|
||||
wantedLayer = AudioDeviceModule::kLinuxAlsaAudio;
|
||||
break;
|
||||
@ -100,9 +97,6 @@ int VoEHardwareImpl::GetAudioDeviceLayer(AudioLayers& audioLayer) {
|
||||
case AudioDeviceModule::kWindowsCoreAudio:
|
||||
audioLayer = kAudioWindowsCore;
|
||||
break;
|
||||
case AudioDeviceModule::kWindowsWaveAudio:
|
||||
audioLayer = kAudioWindowsWave;
|
||||
break;
|
||||
case AudioDeviceModule::kLinuxAlsaAudio:
|
||||
audioLayer = kAudioLinuxAlsa;
|
||||
break;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user