Ports CoreAudioUtil from Chrome to WebRTC.

See https://cs.chromium.org/chromium/src/media/audio/win/core_audio_util_win.h?q=coreaudio&sq=package:chromium&g=0&l=34
for details.

Bug: webrtc:9265
Change-Id: I0fd26620d94a81ccced68d81021c39723a5be2cb
Reviewed-on: https://webrtc-review.googlesource.com/76900
Reviewed-by: Oskar Sundbom <ossu@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Commit-Queue: Henrik Andreassson <henrika@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23298}
This commit is contained in:
henrika 2018-05-18 10:20:58 +02:00 committed by Commit Bot
parent 7eb8e9fd7b
commit 7e6fcea7de
6 changed files with 1910 additions and 0 deletions

View File

@ -153,6 +153,13 @@ rtc_source_set("audio_device_generic") {
}
}
rtc_source_set("audio_device_name") {
sources = [
"audio_device_name.cc",
"audio_device_name.h",
]
}
# Contains default implementations of webrtc::AudioDeviceModule for Windows,
# Linux, Mac, iOS and Android.
rtc_source_set("audio_device_impl") {
@ -180,6 +187,9 @@ rtc_source_set("audio_device_impl") {
if (rtc_include_internal_audio_device && is_ios) {
deps += [ ":audio_device_ios_objc" ]
}
if (is_win) {
deps += [ ":audio_device_name" ]
}
sources = [
"dummy/audio_device_dummy.cc",
@ -329,6 +339,8 @@ rtc_source_set("audio_device_impl") {
sources += [
"win/audio_device_core_win.cc",
"win/audio_device_core_win.h",
"win/core_audio_utility_win.cc",
"win/core_audio_utility_win.h",
]
libs = [
# Required for the built-in WASAPI AEC.
@ -437,6 +449,9 @@ if (rtc_include_tests) {
if (is_linux || is_mac || is_win) {
sources += [ "audio_device_unittest.cc" ]
}
if (is_win && !rtc_use_dummy_audio_file_devices) {
sources += [ "win/core_audio_utility_win_unittest.cc" ]
}
if (is_android) {
# Need to disable error due to the line in
# base/android/jni_android.h triggering it:

View File

@ -0,0 +1,28 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_device/audio_device_name.h"
#include <utility>
namespace webrtc {
const char AudioDeviceName::kDefaultDeviceId[] = "default";
const char AudioDeviceName::kDefaultCommunicationsDeviceId[] = "communications";
AudioDeviceName::AudioDeviceName(const std::string device_name,
const std::string unique_id)
: device_name(std::move(device_name)), unique_id(std::move(unique_id)) {}
bool AudioDeviceName::IsValid() {
return !device_name.empty() && !unique_id.empty();
}
} // namespace webrtc

View File

@ -0,0 +1,47 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_DEVICE_AUDIO_DEVICE_NAME_H_
#define MODULES_AUDIO_DEVICE_AUDIO_DEVICE_NAME_H_
#include <string>
#include <vector>
namespace webrtc {
struct AudioDeviceName {
// Unique ID of the generic default device.
static const char kDefaultDeviceId[];
// Unique ID of the generic default communications device.
static const char kDefaultCommunicationsDeviceId[];
AudioDeviceName() = default;
AudioDeviceName(const std::string device_name, const std::string unique_id);
~AudioDeviceName() = default;
// Support copy and move.
AudioDeviceName(const AudioDeviceName& other) = default;
AudioDeviceName(AudioDeviceName&&) = default;
AudioDeviceName& operator=(const AudioDeviceName&) = default;
AudioDeviceName& operator=(AudioDeviceName&&) = default;
bool IsValid();
std::string device_name; // Friendly name of the device.
std::string unique_id; // Unique identifier for the device.
};
typedef std::vector<AudioDeviceName> AudioDeviceNames;
} // namespace webrtc
#endif // MODULES_AUDIO_DEVICE_AUDIO_DEVICE_NAME_H_

View File

@ -0,0 +1,826 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_device/win/core_audio_utility_win.h"
#include <Functiondiscoverykeys_devpkey.h>
#include <stdio.h>
#include <tchar.h>
#include <iomanip>
#include <string>
#include <utility>
#include "rtc_base/arraysize.h"
#include "rtc_base/logging.h"
#include "rtc_base/platform_thread_types.h"
#include "rtc_base/strings/string_builder.h"
#include "rtc_base/stringutils.h"
using Microsoft::WRL::ComPtr;
using webrtc::AudioDeviceName;
using webrtc::AudioParameters;
namespace webrtc_win {
namespace {
using core_audio_utility::ErrorToString;
bool LoadAudiosesDll() {
static const wchar_t* const kAudiosesDLL =
L"%WINDIR%\\system32\\audioses.dll";
wchar_t path[MAX_PATH] = {0};
ExpandEnvironmentStringsW(kAudiosesDLL, path, arraysize(path));
RTC_DLOG(INFO) << rtc::ToUtf8(path);
return (LoadLibraryExW(path, nullptr, LOAD_WITH_ALTERED_SEARCH_PATH) !=
nullptr);
}
ComPtr<IMMDeviceEnumerator> CreateDeviceEnumeratorInternal(
bool allow_reinitialize) {
ComPtr<IMMDeviceEnumerator> device_enumerator;
_com_error error = ::CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr,
CLSCTX_INPROC_SERVER,
IID_PPV_ARGS(&device_enumerator));
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "CoCreateInstance failed: " << ErrorToString(error);
}
if (error.Error() == CO_E_NOTINITIALIZED && allow_reinitialize) {
RTC_LOG(LS_ERROR) << "CoCreateInstance failed with CO_E_NOTINITIALIZED";
// We have seen crashes which indicates that this method can in fact
// fail with CO_E_NOTINITIALIZED in combination with certain 3rd party
// modules. Calling CoInitializeEx() is an attempt to resolve the reported
// issues. See http://crbug.com/378465 for details.
error = CoInitializeEx(nullptr, COINIT_MULTITHREADED);
if (error.Error() != S_OK) {
error = ::CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr,
CLSCTX_INPROC_SERVER,
IID_PPV_ARGS(&device_enumerator));
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "CoCreateInstance failed: "
<< ErrorToString(error);
}
}
}
return device_enumerator;
}
bool IsSupportedInternal() {
// The Core Audio APIs are implemented in the user-mode system components
// Audioses.dll and Mmdevapi.dll. Dependency Walker shows that it is
// enough to verify possibility to load the Audioses DLL since it depends
// on Mmdevapi.dll. See http://crbug.com/166397 why this extra step is
// required to guarantee Core Audio support.
if (!LoadAudiosesDll())
return false;
// Being able to load the Audioses.dll does not seem to be sufficient for
// all devices to guarantee Core Audio support. To be 100%, we also verify
// that it is possible to a create the IMMDeviceEnumerator interface. If
// this works as well we should be home free.
ComPtr<IMMDeviceEnumerator> device_enumerator =
CreateDeviceEnumeratorInternal(false);
if (!device_enumerator) {
RTC_LOG(LS_ERROR)
<< "Failed to create Core Audio device enumerator on thread with ID "
<< rtc::CurrentThreadId();
return false;
}
return true;
}
bool IsDeviceActive(IMMDevice* device) {
DWORD state = DEVICE_STATE_DISABLED;
return SUCCEEDED(device->GetState(&state)) && (state & DEVICE_STATE_ACTIVE);
}
// Retrieve an audio device specified by |device_id| or a default device
// specified by data-flow direction and role if |device_id| is default.
ComPtr<IMMDevice> CreateDeviceInternal(const std::string& device_id,
EDataFlow data_flow,
ERole role) {
RTC_DLOG(INFO) << "CreateDeviceInternal: " << role;
ComPtr<IMMDevice> audio_endpoint_device;
// Create the IMMDeviceEnumerator interface.
ComPtr<IMMDeviceEnumerator> device_enum(CreateDeviceEnumeratorInternal(true));
if (!device_enum.Get())
return audio_endpoint_device;
_com_error error(S_FALSE);
if (device_id == AudioDeviceName::kDefaultDeviceId) {
error = device_enum->GetDefaultAudioEndpoint(
data_flow, role, audio_endpoint_device.GetAddressOf());
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR)
<< "IMMDeviceEnumerator::GetDefaultAudioEndpoint failed: "
<< ErrorToString(error);
}
} else {
error = device_enum->GetDevice(rtc::ToUtf16(device_id).c_str(),
audio_endpoint_device.GetAddressOf());
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IMMDeviceEnumerator::GetDevice failed: "
<< ErrorToString(error);
}
}
// Verify that the audio endpoint device is active, i.e., that the audio
// adapter that connects to the endpoint device is present and enabled.
if (SUCCEEDED(error.Error()) &&
!IsDeviceActive(audio_endpoint_device.Get())) {
RTC_LOG(LS_WARNING) << "Selected endpoint device is not active";
audio_endpoint_device.Reset();
}
return audio_endpoint_device;
}
std::string GetDeviceIdInternal(IMMDevice* device) {
// Retrieve unique name of endpoint device.
// Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
ScopedCoMem<WCHAR> device_id;
if (SUCCEEDED(device->GetId(&device_id))) {
return rtc::ToUtf8(device_id, wcslen(device_id));
} else {
return std::string();
}
}
std::string GetDeviceFriendlyNameInternal(IMMDevice* device) {
// Retrieve user-friendly name of endpoint device.
// Example: "Microphone (Realtek High Definition Audio)".
ComPtr<IPropertyStore> properties;
HRESULT hr = device->OpenPropertyStore(STGM_READ, properties.GetAddressOf());
if (FAILED(hr))
return std::string();
ScopedPropVariant friendly_name_pv;
hr = properties->GetValue(PKEY_Device_FriendlyName,
friendly_name_pv.Receive());
if (FAILED(hr))
return std::string();
if (friendly_name_pv.get().vt == VT_LPWSTR &&
friendly_name_pv.get().pwszVal) {
return rtc::ToUtf8(friendly_name_pv.get().pwszVal,
wcslen(friendly_name_pv.get().pwszVal));
} else {
return std::string();
}
}
// Creates and activates an IAudioClient COM object given the selected
// endpoint device.
ComPtr<IAudioClient> CreateClientInternal(IMMDevice* audio_device) {
if (!audio_device)
return ComPtr<IAudioClient>();
ComPtr<IAudioClient> audio_client;
_com_error error = audio_device->Activate(
__uuidof(IAudioClient), CLSCTX_INPROC_SERVER, nullptr, &audio_client);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioClient) failed: "
<< ErrorToString(error);
}
return audio_client;
}
ComPtr<IAudioClient2> CreateClient2Internal(IMMDevice* audio_device) {
if (!audio_device)
return ComPtr<IAudioClient2>();
ComPtr<IAudioClient2> audio_client;
_com_error error = audio_device->Activate(
__uuidof(IAudioClient2), CLSCTX_INPROC_SERVER, nullptr, &audio_client);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioClient2) failed: "
<< ErrorToString(error);
}
return audio_client;
}
ComPtr<IMMDeviceCollection> CreateCollectionInternal(EDataFlow data_flow) {
ComPtr<IMMDeviceEnumerator> device_enumerator(
CreateDeviceEnumeratorInternal(true));
if (!device_enumerator) {
return ComPtr<IMMDeviceCollection>();
}
// Generate a collection of active (present and not disabled) audio endpoint
// devices for the specified data-flow direction.
// This method will succeed even if all devices are disabled.
ComPtr<IMMDeviceCollection> collection;
_com_error error = device_enumerator->EnumAudioEndpoints(
data_flow, DEVICE_STATE_ACTIVE, collection.GetAddressOf());
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IMMDeviceCollection::EnumAudioEndpoints failed: "
<< ErrorToString(error);
}
return collection;
}
bool GetDeviceNamesInternal(EDataFlow data_flow,
webrtc::AudioDeviceNames* device_names) {
// Always add the default device in index 0 and the default communication
// device as index 1 in the vector. The name of the default device starts
// with "Default - " and the default communication device starts with
// "Communication - ".
// Example of friendly name: "Default - Headset (SB Arena Headset)"
ERole role[] = {eConsole, eCommunications};
ComPtr<IMMDevice> default_device;
AudioDeviceName default_device_name;
for (size_t i = 0; i < arraysize(role); ++i) {
default_device = CreateDeviceInternal(AudioDeviceName::kDefaultDeviceId,
data_flow, role[i]);
std::string device_name;
device_name += (role[i] == eConsole ? "Default - " : "Communication - ");
device_name += GetDeviceFriendlyNameInternal(default_device.Get());
std::string unique_id = GetDeviceIdInternal(default_device.Get());
default_device_name.device_name = std::move(device_name);
default_device_name.unique_id = std::move(unique_id);
RTC_DLOG(INFO) << "friendly name: " << default_device_name.device_name;
RTC_DLOG(INFO) << "unique id : " << default_device_name.unique_id;
// Add combination of user-friendly and unique name to the output list.
device_names->emplace_back(default_device_name);
}
// Next, add all active input devices on index 2 and above. Note that,
// one device can have more than one role. Hence, if only one input device
// is present, the output vector will contain three elements all with the
// same unique ID but with different names.
// Example (one capture device but three elements in device_names):
// 0: friendly name: Default - Headset (SB Arena Headset)
// 0: unique id : {0.0.1.00000000}.{822d99bb-d9b0-4f6f-b2a5-cd1be220d338}
// 1: friendly name: Communication - Headset (SB Arena Headset)
// 1: unique id : {0.0.1.00000000}.{822d99bb-d9b0-4f6f-b2a5-cd1be220d338}
// 2: friendly name: Headset (SB Arena Headset)
// 2: unique id : {0.0.1.00000000}.{822d99bb-d9b0-4f6f-b2a5-cd1be220d338}
// Generate a collection of active audio endpoint devices for the specified
// direction.
ComPtr<IMMDeviceCollection> collection = CreateCollectionInternal(data_flow);
if (!collection.Get()) {
return false;
}
// Retrieve the number of active audio devices for the specified direction.
UINT number_of_active_devices = 0;
collection->GetCount(&number_of_active_devices);
if (number_of_active_devices == 0) {
return true;
}
// Loop over all active devices and add friendly name and unique ID to the
// |device_names| list which already contains two elements
RTC_DCHECK_EQ(device_names->size(), 2);
for (UINT i = 0; i < number_of_active_devices; ++i) {
// Retrieve a pointer to the specified item in the device collection.
ComPtr<IMMDevice> audio_device;
_com_error error = collection->Item(i, audio_device.GetAddressOf());
if (error.Error() != S_OK)
continue;
// Retrieve the complete device name for the given audio device endpoint.
AudioDeviceName device_name(
GetDeviceFriendlyNameInternal(audio_device.Get()),
GetDeviceIdInternal(audio_device.Get()));
RTC_DLOG(INFO) << "friendly name: " << device_name.device_name;
RTC_DLOG(INFO) << "unique id : " << device_name.unique_id;
// Add combination of user-friendly and unique name to the output list.
device_names->emplace_back(device_name);
}
return true;
}
HRESULT GetPreferredAudioParametersInternal(IAudioClient* client,
AudioParameters* params) {
WAVEFORMATPCMEX mix_format;
HRESULT hr = core_audio_utility::GetSharedModeMixFormat(client, &mix_format);
if (FAILED(hr))
return hr;
REFERENCE_TIME default_period = 0;
hr = core_audio_utility::GetDevicePeriod(client, AUDCLNT_SHAREMODE_SHARED,
&default_period);
if (FAILED(hr))
return hr;
const int sample_rate = mix_format.Format.nSamplesPerSec;
// TODO(henrika): utilize full mix_format.Format.wBitsPerSample.
// const size_t bits_per_sample = AudioParameters::kBitsPerSample;
// TODO(henrika): improve channel layout support.
const size_t channels = mix_format.Format.nChannels;
RTC_DCHECK_LE(channels, 2);
// Use the native device period to derive the smallest possible buffer size
// in shared mode.
double device_period_in_seconds =
static_cast<double>(
core_audio_utility::ReferenceTimeToTimeDelta(default_period).ms()) /
1000.0L;
const size_t frames_per_buffer =
static_cast<size_t>(sample_rate * device_period_in_seconds + 0.5);
AudioParameters audio_params(sample_rate, channels, frames_per_buffer);
*params = audio_params;
RTC_DLOG(INFO) << audio_params.ToString();
return hr;
}
} // namespace
namespace core_audio_utility {
bool IsSupported() {
static bool g_is_supported = IsSupportedInternal();
return g_is_supported;
}
int NumberOfActiveDevices(EDataFlow data_flow) {
// Generate a collection of active audio endpoint devices for the specified
// data-flow direction.
ComPtr<IMMDeviceCollection> collection = CreateCollectionInternal(data_flow);
if (!collection.Get()) {
return 0;
}
// Retrieve the number of active audio devices for the specified direction.
UINT number_of_active_devices = 0;
collection->GetCount(&number_of_active_devices);
std::string str;
if (data_flow == eCapture) {
str = "Number of capture devices: ";
} else if (data_flow == eRender) {
str = "Number of render devices: ";
} else if (data_flow == eAll) {
str = "Total number of devices: ";
}
RTC_DLOG(INFO) << str << number_of_active_devices;
return static_cast<int>(number_of_active_devices);
}
ComPtr<IMMDeviceEnumerator> CreateDeviceEnumerator() {
RTC_DLOG(INFO) << "CreateDeviceEnumerator";
return CreateDeviceEnumeratorInternal(true);
}
std::string GetDefaultInputDeviceID() {
RTC_DLOG(INFO) << "GetDefaultInputDeviceID";
ComPtr<IMMDevice> device(
CreateDevice(AudioDeviceName::kDefaultDeviceId, eCapture, eConsole));
return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string();
}
std::string GetDefaultOutputDeviceID() {
RTC_DLOG(INFO) << "GetDefaultOutputDeviceID";
ComPtr<IMMDevice> device(
CreateDevice(AudioDeviceName::kDefaultDeviceId, eRender, eConsole));
return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string();
}
std::string GetCommunicationsInputDeviceID() {
RTC_DLOG(INFO) << "GetCommunicationsInputDeviceID";
ComPtr<IMMDevice> device(CreateDevice(AudioDeviceName::kDefaultDeviceId,
eCapture, eCommunications));
return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string();
}
std::string GetCommunicationsOutputDeviceID() {
RTC_DLOG(INFO) << "GetCommunicationsOutputDeviceID";
ComPtr<IMMDevice> device(CreateDevice(AudioDeviceName::kDefaultDeviceId,
eRender, eCommunications));
return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string();
}
ComPtr<IMMDevice> CreateDevice(const std::string& device_id,
EDataFlow data_flow,
ERole role) {
RTC_DLOG(INFO) << "CreateDevice";
return CreateDeviceInternal(device_id, data_flow, role);
}
AudioDeviceName GetDeviceName(IMMDevice* device) {
RTC_DLOG(INFO) << "GetDeviceName";
RTC_DCHECK(device);
AudioDeviceName device_name(GetDeviceFriendlyNameInternal(device),
GetDeviceIdInternal(device));
RTC_DLOG(INFO) << "friendly name: " << device_name.device_name;
RTC_DLOG(INFO) << "unique id : " << device_name.unique_id;
return device_name;
}
std::string GetFriendlyName(const std::string& device_id,
EDataFlow data_flow,
ERole role) {
RTC_DLOG(INFO) << "GetFriendlyName";
ComPtr<IMMDevice> audio_device = CreateDevice(device_id, data_flow, role);
if (!audio_device.Get())
return std::string();
AudioDeviceName device_name = GetDeviceName(audio_device.Get());
return device_name.device_name;
}
EDataFlow GetDataFlow(IMMDevice* device) {
RTC_DLOG(INFO) << "GetDataFlow";
RTC_DCHECK(device);
ComPtr<IMMEndpoint> endpoint;
_com_error error = device->QueryInterface(endpoint.GetAddressOf());
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IMMDevice::QueryInterface failed: "
<< ErrorToString(error);
return eAll;
}
EDataFlow data_flow;
error = endpoint->GetDataFlow(&data_flow);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IMMEndpoint::GetDataFlow failed: "
<< ErrorToString(error);
return eAll;
}
return data_flow;
}
bool GetInputDeviceNames(webrtc::AudioDeviceNames* device_names) {
RTC_DLOG(INFO) << "GetInputDeviceNames";
RTC_DCHECK(device_names);
return GetDeviceNamesInternal(eCapture, device_names);
}
bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names) {
RTC_DLOG(INFO) << "GetOutputDeviceNames";
RTC_DCHECK(device_names);
return GetDeviceNamesInternal(eRender, device_names);
}
ComPtr<IAudioClient> CreateClient(const std::string& device_id,
EDataFlow data_flow,
ERole role) {
RTC_DLOG(INFO) << "CreateClient";
ComPtr<IMMDevice> device(CreateDevice(device_id, data_flow, role));
return CreateClientInternal(device.Get());
}
ComPtr<IAudioClient2> CreateClient2(const std::string& device_id,
EDataFlow data_flow,
ERole role) {
RTC_DLOG(INFO) << "CreateClient2";
ComPtr<IMMDevice> device(CreateDevice(device_id, data_flow, role));
return CreateClient2Internal(device.Get());
}
HRESULT SetClientProperties(IAudioClient2* client) {
RTC_DLOG(INFO) << "SetClientProperties";
RTC_DCHECK(client);
AudioClientProperties properties = {0};
properties.cbSize = sizeof(AudioClientProperties);
properties.bIsOffload = false;
// Real-time VoIP communication.
// TODO(henrika): other categories?
properties.eCategory = AudioCategory_Communications;
// TODO(henrika): can AUDCLNT_STREAMOPTIONS_RAW be used?
properties.Options = AUDCLNT_STREAMOPTIONS_NONE;
_com_error error = client->SetClientProperties(&properties);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioClient2::SetClientProperties failed: "
<< ErrorToString(error);
}
return error.Error();
}
HRESULT GetSharedModeMixFormat(IAudioClient* client,
WAVEFORMATEXTENSIBLE* format) {
RTC_DLOG(INFO) << "GetSharedModeMixFormat";
RTC_DCHECK(client);
ScopedCoMem<WAVEFORMATEXTENSIBLE> format_ex;
_com_error error =
client->GetMixFormat(reinterpret_cast<WAVEFORMATEX**>(&format_ex));
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioClient::GetMixFormat failed: "
<< ErrorToString(error);
return error.Error();
}
size_t bytes = sizeof(WAVEFORMATEX) + format_ex->Format.cbSize;
RTC_DCHECK_EQ(bytes, sizeof(WAVEFORMATEXTENSIBLE));
memcpy(format, format_ex, bytes);
RTC_DLOG(INFO) << WaveFormatExToString(format);
return error.Error();
}
bool IsFormatSupported(IAudioClient* client,
AUDCLNT_SHAREMODE share_mode,
const WAVEFORMATEXTENSIBLE* format) {
RTC_DLOG(INFO) << "IsFormatSupported";
RTC_DCHECK(client);
ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match;
// This method provides a way for a client to determine, before calling
// IAudioClient::Initialize, whether the audio engine supports a particular
// stream format or not. In shared mode, the audio engine always supports
// the mix format (see GetSharedModeMixFormat).
// TODO(henrika): verify support for exclusive mode as well?
_com_error error = client->IsFormatSupported(
share_mode, reinterpret_cast<const WAVEFORMATEX*>(format),
reinterpret_cast<WAVEFORMATEX**>(&closest_match));
if ((error.Error() == S_OK) && (closest_match == nullptr)) {
RTC_DLOG(INFO)
<< "The audio endpoint device supports the specified stream format";
} else if ((error.Error() == S_FALSE) && (closest_match != nullptr)) {
// Call succeeded with a closest match to the specified format. This log can
// only be triggered for shared mode.
RTC_LOG(LS_WARNING)
<< "Exact format is not supported, but a closest match exists";
RTC_LOG(INFO) << WaveFormatExToString(closest_match);
} else if ((error.Error() == AUDCLNT_E_UNSUPPORTED_FORMAT) &&
(closest_match == nullptr)) {
// The audio engine does not support the caller-specified format or any
// similar format.
RTC_DLOG(INFO) << "The audio endpoint device does not support the "
"specified stream format";
} else {
RTC_LOG(LS_ERROR) << "IAudioClient::IsFormatSupported failed: "
<< ErrorToString(error);
}
return (error.Error() == S_OK);
}
HRESULT GetDevicePeriod(IAudioClient* client,
AUDCLNT_SHAREMODE share_mode,
REFERENCE_TIME* device_period) {
RTC_DLOG(INFO) << "GetDevicePeriod";
RTC_DCHECK(client);
// The |default_period| parameter specifies the default scheduling period
// for a shared-mode stream. The |minimum_period| parameter specifies the
// minimum scheduling period for an exclusive-mode stream.
// The time is expressed in 100-nanosecond units.
REFERENCE_TIME default_period = 0;
REFERENCE_TIME minimum_period = 0;
_com_error error = client->GetDevicePeriod(&default_period, &minimum_period);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioClient::GetDevicePeriod failed: "
<< ErrorToString(error);
return error.Error();
}
*device_period = (share_mode == AUDCLNT_SHAREMODE_SHARED) ? default_period
: minimum_period;
RTC_LOG(INFO) << "device_period: "
<< ReferenceTimeToTimeDelta(*device_period).ms() << " [ms]";
return error.Error();
}
HRESULT GetPreferredAudioParameters(const std::string& device_id,
bool is_output_device,
AudioParameters* params) {
RTC_DLOG(INFO) << "GetPreferredAudioParameters";
EDataFlow data_flow = is_output_device ? eRender : eCapture;
ComPtr<IMMDevice> device;
if (device_id == AudioDeviceName::kDefaultCommunicationsDeviceId) {
device = CreateDeviceInternal(AudioDeviceName::kDefaultDeviceId, data_flow,
eCommunications);
} else {
// If |device_id| equals AudioDeviceName::kDefaultDeviceId, a default
// device will be created.
device = CreateDeviceInternal(device_id, data_flow, eConsole);
}
if (!device.Get()) {
return E_FAIL;
}
ComPtr<IAudioClient> client(CreateClientInternal(device.Get()));
if (!client.Get())
return E_FAIL;
return GetPreferredAudioParametersInternal(client.Get(), params);
}
HRESULT GetPreferredAudioParameters(IAudioClient* client,
AudioParameters* params) {
RTC_DCHECK(client);
return GetPreferredAudioParametersInternal(client, params);
}
HRESULT SharedModeInitialize(IAudioClient* client,
const WAVEFORMATEXTENSIBLE* format,
HANDLE event_handle,
uint32_t* endpoint_buffer_size) {
RTC_DLOG(INFO) << "SharedModeInitialize";
RTC_DCHECK(client);
// Use default flags (i.e, don't set AUDCLNT_STREAMFLAGS_NOPERSIST) to
// ensure that the volume level and muting state for a rendering session
// are persistent across system restarts. The volume level and muting
// state for a capture session are never persistent.
DWORD stream_flags = 0;
// Enable event-driven streaming if a valid event handle is provided.
// After the stream starts, the audio engine will signal the event handle
// to notify the client each time a buffer becomes ready to process.
// Event-driven buffering is supported for both rendering and capturing.
// Both shared-mode and exclusive-mode streams can use event-driven buffering.
bool use_event =
(event_handle != nullptr && event_handle != INVALID_HANDLE_VALUE);
if (use_event) {
stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
RTC_DLOG(INFO) << "The stream is initialized to be event driven";
}
RTC_DLOG(INFO) << "stream_flags: 0x" << std::hex << stream_flags;
// Initialize the shared mode client for minimal delay.
_com_error error = client->Initialize(
AUDCLNT_SHAREMODE_SHARED, stream_flags, 0, 0,
reinterpret_cast<const WAVEFORMATEX*>(format), nullptr);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioClient::Initialize failed: "
<< ErrorToString(error);
return error.Error();
}
// If a stream is initialized to be event driven and in shared mode, the
// associated application must also obtain a handle by making a call to
// IAudioClient::SetEventHandle.
if (use_event) {
error = client->SetEventHandle(event_handle);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioClient::SetEventHandle failed: "
<< ErrorToString(error);
return error.Error();
}
}
UINT32 buffer_size_in_frames = 0;
// Retrieves the size (maximum capacity) of the endpoint buffer. The size is
// expressed as the number of audio frames the buffer can hold.
error = client->GetBufferSize(&buffer_size_in_frames);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioClient::GetBufferSize failed: "
<< ErrorToString(error);
return error.Error();
}
*endpoint_buffer_size = buffer_size_in_frames;
RTC_DLOG(INFO) << "endpoint buffer size: " << buffer_size_in_frames
<< " [audio frames]";
RTC_DLOG(INFO) << "bytes per audio frame: " << format->Format.nBlockAlign;
RTC_DLOG(INFO) << "endpoint buffer size: "
<< buffer_size_in_frames * format->Format.nChannels *
(format->Format.wBitsPerSample / 8)
<< " [bytes]";
// TODO(henrika): utilize when delay measurements are added.
REFERENCE_TIME latency = 0;
error = client->GetStreamLatency(&latency);
RTC_DLOG(INFO) << "stream latency: " << ReferenceTimeToTimeDelta(latency).ms()
<< " [ms]";
return error.Error();
}
ComPtr<IAudioRenderClient> CreateRenderClient(IAudioClient* client) {
RTC_DLOG(INFO) << "CreateRenderClient";
RTC_DCHECK(client);
// Get access to the IAudioRenderClient interface. This interface
// enables us to write output data to a rendering endpoint buffer.
ComPtr<IAudioRenderClient> audio_render_client;
_com_error error = client->GetService(IID_PPV_ARGS(&audio_render_client));
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR)
<< "IAudioClient::GetService(IID_IAudioRenderClient) failed: "
<< ErrorToString(error);
return ComPtr<IAudioRenderClient>();
}
return audio_render_client;
}
ComPtr<IAudioCaptureClient> CreateCaptureClient(IAudioClient* client) {
RTC_DLOG(INFO) << "CreateCaptureClient";
RTC_DCHECK(client);
// Get access to the IAudioCaptureClient interface. This interface
// enables us to read input data from a capturing endpoint buffer.
ComPtr<IAudioCaptureClient> audio_capture_client;
_com_error error = client->GetService(IID_PPV_ARGS(&audio_capture_client));
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR)
<< "IAudioClient::GetService(IID_IAudioCaptureClient) failed: "
<< ErrorToString(error);
return ComPtr<IAudioCaptureClient>();
}
return audio_capture_client;
}
ComPtr<IAudioClock> CreateAudioClock(IAudioClient* client) {
RTC_DLOG(INFO) << "CreateAudioClock";
RTC_DCHECK(client);
// Get access to the IAudioClock interface. This interface enables us to
// monitor a stream's data rate and the current position in the stream.
ComPtr<IAudioClock> audio_clock;
_com_error error = client->GetService(IID_PPV_ARGS(&audio_clock));
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioClient::GetService(IID_IAudioClock) failed: "
<< ErrorToString(error);
return ComPtr<IAudioClock>();
}
return audio_clock;
}
bool FillRenderEndpointBufferWithSilence(IAudioClient* client,
IAudioRenderClient* render_client) {
RTC_DLOG(INFO) << "FillRenderEndpointBufferWithSilence";
RTC_DCHECK(client);
RTC_DCHECK(render_client);
UINT32 endpoint_buffer_size = 0;
_com_error error = client->GetBufferSize(&endpoint_buffer_size);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioClient::GetBufferSize failed: "
<< ErrorToString(error);
return false;
}
UINT32 num_queued_frames = 0;
// Get number of audio frames that are queued up to play in the endpoint
// buffer.
error = client->GetCurrentPadding(&num_queued_frames);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioClient::GetCurrentPadding failed: "
<< ErrorToString(error);
return false;
}
RTC_DLOG(INFO) << "num_queued_frames: " << num_queued_frames;
BYTE* data = nullptr;
int num_frames_to_fill = endpoint_buffer_size - num_queued_frames;
RTC_DLOG(INFO) << "num_frames_to_fill: " << num_frames_to_fill;
error = render_client->GetBuffer(num_frames_to_fill, &data);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioRenderClient::GetBuffer failed: "
<< ErrorToString(error);
return false;
}
// Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
// explicitly write silence data to the rendering buffer.
error = render_client->ReleaseBuffer(num_frames_to_fill,
AUDCLNT_BUFFERFLAGS_SILENT);
if (error.Error() != S_OK) {
RTC_LOG(LS_ERROR) << "IAudioRenderClient::ReleaseBuffer failed: "
<< ErrorToString(error);
return false;
}
return true;
}
std::string WaveFormatExToString(const WAVEFORMATEXTENSIBLE* format) {
RTC_DCHECK_EQ(format->Format.wFormatTag, WAVE_FORMAT_EXTENSIBLE);
char ss_buf[1024];
rtc::SimpleStringBuilder ss(ss_buf);
ss.AppendFormat("wFormatTag: WAVE_FORMAT_EXTENSIBLE");
ss.AppendFormat(", nChannels: %d", format->Format.nChannels);
ss.AppendFormat(", nSamplesPerSec: %d", format->Format.nSamplesPerSec);
ss.AppendFormat(", nAvgBytesPerSec: %d", format->Format.nAvgBytesPerSec);
ss.AppendFormat(", nBlockAlign: %d", format->Format.nBlockAlign);
ss.AppendFormat(", wBitsPerSample: %d", format->Format.wBitsPerSample);
ss.AppendFormat(", cbSize: %d", format->Format.cbSize);
ss.AppendFormat(", wValidBitsPerSample: %d",
format->Samples.wValidBitsPerSample);
ss.AppendFormat(", dwChannelMask: 0x%X", format->dwChannelMask);
if (format->SubFormat == KSDATAFORMAT_SUBTYPE_PCM) {
ss << ", SubFormat: KSDATAFORMAT_SUBTYPE_PCM";
} else if (format->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) {
ss << ", SubFormat: KSDATAFORMAT_SUBTYPE_IEEE_FLOAT";
} else {
ss << ", SubFormat: NOT_SUPPORTED";
}
return ss.str();
}
webrtc::TimeDelta ReferenceTimeToTimeDelta(REFERENCE_TIME time) {
// Each unit of reference time is 100 nanoseconds <=> 0.1 microsecond.
return webrtc::TimeDelta::us(0.1 * time + 0.5);
}
std::string ErrorToString(const _com_error& error) {
char ss_buf[1024];
rtc::SimpleStringBuilder ss(ss_buf);
ss.AppendFormat("%s (0x%08X)", rtc::ToUtf8(error.ErrorMessage()).c_str(),
error.Error());
return ss.str();
}
} // namespace core_audio_utility
} // namespace webrtc_win

View File

@ -0,0 +1,431 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_UTILITY_WIN_H_
#define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_UTILITY_WIN_H_
#include <Audioclient.h>
#include <Mmdeviceapi.h>
#include <avrt.h>
#include <comdef.h>
#include <objbase.h>
#include <propidl.h>
#include <wrl/client.h>
#include <string>
#include "api/units/time_delta.h"
#include "modules/audio_device/audio_device_name.h"
#include "modules/audio_device/include/audio_device_defines.h"
#include "rtc_base/logging.h"
namespace webrtc_win {
static const int64_t kNumMicrosecsPerSec = webrtc::TimeDelta::seconds(1).us();
// Utility class which registers a thread with MMCSS in the constructor and
// deregisters MMCSS in the destructor. The task name is given by |task_name|.
// The Multimedia Class Scheduler service (MMCSS) enables multimedia
// applications to ensure that their time-sensitive processing receives
// prioritized access to CPU resources without denying CPU resources to
// lower-priority applications.
class ScopedMMCSSRegistration {
public:
explicit ScopedMMCSSRegistration(const TCHAR* task_name) {
RTC_DLOG(INFO) << "ScopedMMCSSRegistration: " << rtc::ToUtf8(task_name);
// Register the calling thread with MMCSS for the supplied |task_name|.
DWORD mmcss_task_index = 0;
mmcss_handle_ = AvSetMmThreadCharacteristics(task_name, &mmcss_task_index);
if (mmcss_handle_ == nullptr) {
RTC_LOG(LS_ERROR) << "Failed to enable MMCSS on this thread: "
<< GetLastError();
}
}
~ScopedMMCSSRegistration() {
if (Succeeded()) {
// Deregister with MMCSS.
RTC_DLOG(INFO) << "~ScopedMMCSSRegistration";
AvRevertMmThreadCharacteristics(mmcss_handle_);
}
}
ScopedMMCSSRegistration(const ScopedMMCSSRegistration&) = delete;
ScopedMMCSSRegistration& operator=(const ScopedMMCSSRegistration&) = delete;
bool Succeeded() const { return mmcss_handle_ != nullptr; }
private:
HANDLE mmcss_handle_ = nullptr;
};
// Initializes COM in the constructor (STA or MTA), and uninitializes COM in the
// destructor. Taken from base::win::ScopedCOMInitializer.
//
// WARNING: This should only be used once per thread, ideally scoped to a
// similar lifetime as the thread itself. You should not be using this in
// random utility functions that make COM calls; instead ensure that these
// functions are running on a COM-supporting thread!
// See https://msdn.microsoft.com/en-us/library/ms809971.aspx for details.
class ScopedCOMInitializer {
public:
// Enum value provided to initialize the thread as an MTA instead of STA.
enum SelectMTA { kMTA };
// Constructor for STA initialization.
ScopedCOMInitializer() {
RTC_DLOG(INFO) << "Single-Threaded Apartment (STA) COM thread";
Initialize(COINIT_APARTMENTTHREADED);
}
// Constructor for MTA initialization.
explicit ScopedCOMInitializer(SelectMTA mta) {
RTC_DLOG(INFO) << "Multi-Threaded Apartment (MTA) COM thread";
Initialize(COINIT_MULTITHREADED);
}
~ScopedCOMInitializer() {
if (Succeeded()) {
CoUninitialize();
}
}
ScopedCOMInitializer(const ScopedCOMInitializer&) = delete;
ScopedCOMInitializer& operator=(const ScopedCOMInitializer&) = delete;
bool Succeeded() { return SUCCEEDED(hr_); }
private:
void Initialize(COINIT init) {
// Initializes the COM library for use by the calling thread, sets the
// thread's concurrency model, and creates a new apartment for the thread
// if one is required.
hr_ = CoInitializeEx(NULL, init);
RTC_CHECK_NE(RPC_E_CHANGED_MODE, hr_) << "Invalid COM thread model change";
}
HRESULT hr_;
};
// A PROPVARIANT that is automatically initialized and cleared upon respective
// construction and destruction of this class.
class ScopedPropVariant {
public:
ScopedPropVariant() { PropVariantInit(&pv_); }
~ScopedPropVariant() { Reset(); }
ScopedPropVariant(const ScopedPropVariant&) = delete;
ScopedPropVariant& operator=(const ScopedPropVariant&) = delete;
bool operator==(const ScopedPropVariant&) const = delete;
bool operator!=(const ScopedPropVariant&) const = delete;
// Returns a pointer to the underlying PROPVARIANT for use as an out param in
// a function call.
PROPVARIANT* Receive() {
RTC_DCHECK_EQ(pv_.vt, VT_EMPTY);
return &pv_;
}
// Clears the instance to prepare it for re-use (e.g., via Receive).
void Reset() {
if (pv_.vt != VT_EMPTY) {
HRESULT result = PropVariantClear(&pv_);
RTC_DCHECK_EQ(result, S_OK);
}
}
const PROPVARIANT& get() const { return pv_; }
const PROPVARIANT* ptr() const { return &pv_; }
private:
PROPVARIANT pv_;
};
// Simple scoped memory releaser class for COM allocated memory.
template <typename T>
class ScopedCoMem {
public:
ScopedCoMem() : mem_ptr_(nullptr) {}
~ScopedCoMem() { Reset(nullptr); }
ScopedCoMem(const ScopedCoMem&) = delete;
ScopedCoMem& operator=(const ScopedCoMem&) = delete;
T** operator&() { // NOLINT
RTC_DCHECK(mem_ptr_ == nullptr); // To catch memory leaks.
return &mem_ptr_;
}
operator T*() { return mem_ptr_; }
T* operator->() {
RTC_DCHECK(mem_ptr_ != nullptr);
return mem_ptr_;
}
const T* operator->() const {
RTC_DCHECK(mem_ptr_ != nullptr);
return mem_ptr_;
}
explicit operator bool() const { return mem_ptr_; }
friend bool operator==(const ScopedCoMem& lhs, std::nullptr_t) {
return lhs.Get() == nullptr;
}
friend bool operator==(std::nullptr_t, const ScopedCoMem& rhs) {
return rhs.Get() == nullptr;
}
friend bool operator!=(const ScopedCoMem& lhs, std::nullptr_t) {
return lhs.Get() != nullptr;
}
friend bool operator!=(std::nullptr_t, const ScopedCoMem& rhs) {
return rhs.Get() != nullptr;
}
void Reset(T* ptr) {
if (mem_ptr_)
CoTaskMemFree(mem_ptr_);
mem_ptr_ = ptr;
}
T* Get() const { return mem_ptr_; }
private:
T* mem_ptr_;
};
// A HANDLE that is automatically initialized and closed upon respective
// construction and destruction of this class.
class ScopedHandle {
public:
ScopedHandle() : handle_(nullptr) {}
explicit ScopedHandle(HANDLE h) : handle_(nullptr) { Set(h); }
~ScopedHandle() { Close(); }
ScopedHandle& operator=(const ScopedHandle&) = delete;
bool operator==(const ScopedHandle&) const = delete;
bool operator!=(const ScopedHandle&) const = delete;
// Use this instead of comparing to INVALID_HANDLE_VALUE.
bool IsValid() const { return handle_ != nullptr; }
void Set(HANDLE new_handle) {
Close();
// Windows is inconsistent about invalid handles.
// See https://blogs.msdn.microsoft.com/oldnewthing/20040302-00/?p=40443
// for details.
if (new_handle != INVALID_HANDLE_VALUE) {
handle_ = new_handle;
}
}
HANDLE Get() const { return handle_; }
operator HANDLE() const { return handle_; }
void Close() {
if (handle_) {
if (!::CloseHandle(handle_)) {
RTC_NOTREACHED();
}
handle_ = nullptr;
}
}
private:
HANDLE handle_;
};
// Utility methods for the Core Audio API on Windows.
// Always ensure that Core Audio is supported before using these methods.
// Use webrtc_win::core_audio_utility::IsSupported() for this purpose.
// Also, all methods must be called on a valid COM thread. This can be done
// by using the webrtc_win::ScopedCOMInitializer helper class.
// These methods are based on media::CoreAudioUtil in Chrome.
namespace core_audio_utility {
// Returns true if Windows Core Audio is supported.
// Always verify that this method returns true before using any of the
// other methods in this class.
bool IsSupported();
// The MMDevice API lets clients discover the audio endpoint devices in the
// system and determine which devices are suitable for the application to use.
// Header file Mmdeviceapi.h defines the interfaces in the MMDevice API.
// Number of active audio devices in the specified data flow direction.
// Set |data_flow| to eAll to retrieve the total number of active audio
// devices.
int NumberOfActiveDevices(EDataFlow data_flow);
// Creates an IMMDeviceEnumerator interface which provides methods for
// enumerating audio endpoint devices.
// TODO(henrika): IMMDeviceEnumerator::RegisterEndpointNotificationCallback.
Microsoft::WRL::ComPtr<IMMDeviceEnumerator> CreateDeviceEnumerator();
// These functions return the unique device id of the default or
// communications input/output device, or an empty string if no such device
// exists or if the device has been disabled.
std::string GetDefaultInputDeviceID();
std::string GetDefaultOutputDeviceID();
std::string GetCommunicationsInputDeviceID();
std::string GetCommunicationsOutputDeviceID();
// Creates an IMMDevice interface corresponding to the unique device id in
// |device_id|, or by data-flow direction and role if |device_id| is set to
// AudioDeviceName::kDefaultDeviceId.
Microsoft::WRL::ComPtr<IMMDevice> CreateDevice(const std::string& device_id,
EDataFlow data_flow,
ERole role);
// Returns the unique ID and user-friendly name of a given endpoint device.
// Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}", and
// "Microphone (Realtek High Definition Audio)".
webrtc::AudioDeviceName GetDeviceName(IMMDevice* device);
// Gets the user-friendly name of the endpoint device which is represented
// by a unique id in |device_id|, or by data-flow direction and role if
// |device_id| is set to AudioDeviceName::kDefaultDeviceId.
std::string GetFriendlyName(const std::string& device_id,
EDataFlow data_flow,
ERole role);
// Query if the audio device is a rendering device or a capture device.
EDataFlow GetDataFlow(IMMDevice* device);
// Enumerates all input devices and adds the names (friendly name and unique
// device id) to the list in |device_names|.
bool GetInputDeviceNames(webrtc::AudioDeviceNames* device_names);
// Enumerates all output devices and adds the names (friendly name and unique
// device id) to the list in |device_names|.
bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names);
// The Windows Audio Session API (WASAPI) enables client applications to
// manage the flow of audio data between the application and an audio endpoint
// device. Header files Audioclient.h and Audiopolicy.h define the WASAPI
// interfaces.
// Create an IAudioClient instance for a specific device or the default
// device specified by data-flow direction and role.
Microsoft::WRL::ComPtr<IAudioClient> CreateClient(const std::string& device_id,
EDataFlow data_flow,
ERole role);
Microsoft::WRL::ComPtr<IAudioClient2>
CreateClient2(const std::string& device_id, EDataFlow data_flow, ERole role);
// Sets the AudioCategory_Communications category. Should be called before
// GetSharedModeMixFormat() and IsFormatSupported().
// Minimum supported client: Windows 8.
// TODO(henrika): evaluate effect (if any).
HRESULT SetClientProperties(IAudioClient2* client);
// Get the mix format that the audio engine uses internally for processing
// of shared-mode streams. The client can call this method before calling
// IAudioClient::Initialize. When creating a shared-mode stream for an audio
// endpoint device, the Initialize method always accepts the stream format
// obtained by this method.
HRESULT GetSharedModeMixFormat(IAudioClient* client,
WAVEFORMATEXTENSIBLE* format);
// Returns true if the specified |client| supports the format in |format|
// for the given |share_mode| (shared or exclusive). The client can call this
// method before calling IAudioClient::Initialize.
bool IsFormatSupported(IAudioClient* client,
AUDCLNT_SHAREMODE share_mode,
const WAVEFORMATEXTENSIBLE* format);
// For a shared-mode stream, the audio engine periodically processes the
// data in the endpoint buffer at the period obtained in |device_period|.
// For an exclusive mode stream, |device_period| corresponds to the minimum
// time interval between successive processing by the endpoint device.
// This period plus the stream latency between the buffer and endpoint device
// represents the minimum possible latency that an audio application can
// achieve. The time in |device_period| is expressed in 100-nanosecond units.
HRESULT GetDevicePeriod(IAudioClient* client,
AUDCLNT_SHAREMODE share_mode,
REFERENCE_TIME* device_period);
// Get the preferred audio parameters for the given |device_id|. The acquired
// values should only be utilized for shared mode streamed since there are no
// preferred settings for an exclusive mode stream.
HRESULT GetPreferredAudioParameters(const std::string& device_id,
bool is_output_device,
webrtc::AudioParameters* params);
HRESULT GetPreferredAudioParameters(IAudioClient* client,
webrtc::AudioParameters* params);
// After activating an IAudioClient interface on an audio endpoint device,
// the client must initialize it once, and only once, to initialize the audio
// stream between the client and the device. In shared mode, the client
// connects indirectly through the audio engine which does the mixing.
// If a valid event is provided in |event_handle|, the client will be
// initialized for event-driven buffer handling. If |event_handle| is set to
// nullptr, event-driven buffer handling is not utilized.
// The output parameter |endpoint_buffer_size| contains the size of the
// endpoint buffer and it is expressed as the number of audio frames the
// buffer can hold.
// TODO(henrika):
// - use IAudioClient2::SetClientProperties before calling this method
// - IAudioClient::Initialize(your_format, AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM
// |
// AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY)
HRESULT SharedModeInitialize(IAudioClient* client,
const WAVEFORMATEXTENSIBLE* format,
HANDLE event_handle,
uint32_t* endpoint_buffer_size);
// Creates an IAudioRenderClient client for an existing IAudioClient given by
// |client|. The IAudioRenderClient interface enables a client to write
// output data to a rendering endpoint buffer. The methods in this interface
// manage the movement of data packets that contain audio-rendering data.
Microsoft::WRL::ComPtr<IAudioRenderClient> CreateRenderClient(
IAudioClient* client);
// Creates an IAudioCaptureClient client for an existing IAudioClient given by
// |client|. The IAudioCaptureClient interface enables a client to read
// input data from a capture endpoint buffer. The methods in this interface
// manage the movement of data packets that contain capture data.
Microsoft::WRL::ComPtr<IAudioCaptureClient> CreateCaptureClient(
IAudioClient* client);
// Creates an IAudioClock interface for an existing IAudioClient given by
// |client|. The IAudioClock interface enables a client to monitor a stream's
// data rate and the current position in the stream.
Microsoft::WRL::ComPtr<IAudioClock> CreateAudioClock(IAudioClient* client);
// Fills up the endpoint rendering buffer with silence for an existing
// IAudioClient given by |client| and a corresponding IAudioRenderClient
// given by |render_client|.
bool FillRenderEndpointBufferWithSilence(IAudioClient* client,
IAudioRenderClient* render_client);
// Transforms a WAVEFORMATEXTENSIBLE struct to a human-readable string.
std::string WaveFormatExToString(const WAVEFORMATEXTENSIBLE* format);
// Converts Windows internal REFERENCE_TIME (100 nanosecond units) into
// generic webrtc::TimeDelta which then can be converted to any time unit.
webrtc::TimeDelta ReferenceTimeToTimeDelta(REFERENCE_TIME time);
// Converts a COM error into a human-readable string.
std::string ErrorToString(const _com_error& error);
} // namespace core_audio_utility
} // namespace webrtc_win
#endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_UTILITY_WIN_H_

View File

@ -0,0 +1,563 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_device/win/core_audio_utility_win.h"
#include "rtc_base/arraysize.h"
#include "rtc_base/logging.h"
#include "test/gtest.h"
using Microsoft::WRL::ComPtr;
using webrtc::AudioDeviceName;
namespace webrtc_win {
namespace {
#define ABORT_TEST_IF_NOT(requirements_satisfied) \
do { \
bool fail = false; \
if (ShouldAbortTest(requirements_satisfied, #requirements_satisfied, \
&fail)) { \
if (fail) \
FAIL(); \
else \
return; \
} \
} while (false)
bool ShouldAbortTest(bool requirements_satisfied,
const char* requirements_expression,
bool* should_fail) {
if (!requirements_satisfied) {
RTC_LOG(LS_ERROR) << "Requirement(s) not satisfied ("
<< requirements_expression << ")";
// TODO(henrika): improve hard-coded condition to determine if test should
// fail or be ignored. Could use e.g. a command-line argument here to
// determine if the test should fail or not.
*should_fail = false;
return true;
}
*should_fail = false;
return false;
}
} // namespace
// CoreAudioUtilityWinTest test fixture.
class CoreAudioUtilityWinTest : public ::testing::Test {
protected:
CoreAudioUtilityWinTest() {
// We must initialize the COM library on a thread before we calling any of
// the library functions. All COM functions will return CO_E_NOTINITIALIZED
// otherwise.
EXPECT_TRUE(com_init_.Succeeded());
// Configure logging.
rtc::LogMessage::LogToDebug(rtc::LS_INFO);
rtc::LogMessage::LogTimestamps();
rtc::LogMessage::LogThreads();
}
virtual ~CoreAudioUtilityWinTest() {}
bool DevicesAvailable() {
return core_audio_utility::IsSupported() &&
core_audio_utility::NumberOfActiveDevices(eCapture) > 0 &&
core_audio_utility::NumberOfActiveDevices(eRender) > 0;
}
private:
ScopedCOMInitializer com_init_;
};
TEST_F(CoreAudioUtilityWinTest, NumberOfActiveDevices) {
ABORT_TEST_IF_NOT(DevicesAvailable());
int render_devices = core_audio_utility::NumberOfActiveDevices(eRender);
EXPECT_GT(render_devices, 0);
int capture_devices = core_audio_utility::NumberOfActiveDevices(eCapture);
EXPECT_GT(capture_devices, 0);
int total_devices = core_audio_utility::NumberOfActiveDevices(eAll);
EXPECT_EQ(total_devices, render_devices + capture_devices);
}
TEST_F(CoreAudioUtilityWinTest, CreateDeviceEnumerator) {
ABORT_TEST_IF_NOT(DevicesAvailable());
ComPtr<IMMDeviceEnumerator> enumerator =
core_audio_utility::CreateDeviceEnumerator();
EXPECT_TRUE(enumerator.Get());
}
TEST_F(CoreAudioUtilityWinTest, GetDefaultInputDeviceID) {
ABORT_TEST_IF_NOT(DevicesAvailable());
std::string default_device_id = core_audio_utility::GetDefaultInputDeviceID();
EXPECT_FALSE(default_device_id.empty());
}
TEST_F(CoreAudioUtilityWinTest, GetDefaultOutputDeviceID) {
ABORT_TEST_IF_NOT(DevicesAvailable());
std::string default_device_id =
core_audio_utility::GetDefaultOutputDeviceID();
EXPECT_FALSE(default_device_id.empty());
}
TEST_F(CoreAudioUtilityWinTest, GetCommunicationsInputDeviceID) {
ABORT_TEST_IF_NOT(DevicesAvailable());
std::string default_device_id =
core_audio_utility::GetCommunicationsInputDeviceID();
EXPECT_FALSE(default_device_id.empty());
}
TEST_F(CoreAudioUtilityWinTest, GetCommunicationsOutputDeviceID) {
ABORT_TEST_IF_NOT(DevicesAvailable());
std::string default_device_id =
core_audio_utility::GetCommunicationsOutputDeviceID();
EXPECT_FALSE(default_device_id.empty());
}
TEST_F(CoreAudioUtilityWinTest, CreateDefaultDevice) {
ABORT_TEST_IF_NOT(DevicesAvailable());
struct {
EDataFlow flow;
ERole role;
} data[] = {{eRender, eConsole}, {eRender, eCommunications},
{eRender, eMultimedia}, {eCapture, eConsole},
{eCapture, eCommunications}, {eCapture, eMultimedia}};
// Create default devices for all flow/role combinations above.
ComPtr<IMMDevice> audio_device;
for (size_t i = 0; i < arraysize(data); ++i) {
audio_device = core_audio_utility::CreateDevice(
AudioDeviceName::kDefaultDeviceId, data[i].flow, data[i].role);
EXPECT_TRUE(audio_device.Get());
EXPECT_EQ(data[i].flow,
core_audio_utility::GetDataFlow(audio_device.Get()));
}
// Only eRender and eCapture are allowed as flow parameter.
audio_device = core_audio_utility::CreateDevice(
AudioDeviceName::kDefaultDeviceId, eAll, eConsole);
EXPECT_FALSE(audio_device.Get());
}
TEST_F(CoreAudioUtilityWinTest, CreateDevice) {
ABORT_TEST_IF_NOT(DevicesAvailable());
// Get name and ID of default device used for playback.
ComPtr<IMMDevice> default_render_device = core_audio_utility::CreateDevice(
AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
AudioDeviceName default_render_name =
core_audio_utility::GetDeviceName(default_render_device.Get());
EXPECT_TRUE(default_render_name.IsValid());
// Use the unique ID as input to CreateDevice() and create a corresponding
// IMMDevice. The data-flow direction and role parameters are ignored for
// this scenario.
ComPtr<IMMDevice> audio_device = core_audio_utility::CreateDevice(
default_render_name.unique_id, EDataFlow(), ERole());
EXPECT_TRUE(audio_device.Get());
// Verify that the two IMMDevice interfaces represents the same endpoint
// by comparing their unique IDs.
AudioDeviceName device_name =
core_audio_utility::GetDeviceName(audio_device.Get());
EXPECT_EQ(default_render_name.unique_id, device_name.unique_id);
}
TEST_F(CoreAudioUtilityWinTest, GetDefaultDeviceName) {
ABORT_TEST_IF_NOT(DevicesAvailable());
struct {
EDataFlow flow;
ERole role;
} data[] = {{eRender, eConsole},
{eRender, eCommunications},
{eCapture, eConsole},
{eCapture, eCommunications}};
// Get name and ID of default devices for all flow/role combinations above.
ComPtr<IMMDevice> audio_device;
AudioDeviceName device_name;
for (size_t i = 0; i < arraysize(data); ++i) {
audio_device = core_audio_utility::CreateDevice(
AudioDeviceName::kDefaultDeviceId, data[i].flow, data[i].role);
device_name = core_audio_utility::GetDeviceName(audio_device.Get());
EXPECT_TRUE(device_name.IsValid());
}
}
TEST_F(CoreAudioUtilityWinTest, GetFriendlyName) {
ABORT_TEST_IF_NOT(DevicesAvailable());
// Get name and ID of default device used for recording.
ComPtr<IMMDevice> audio_device = core_audio_utility::CreateDevice(
AudioDeviceName::kDefaultDeviceId, eCapture, eConsole);
AudioDeviceName device_name =
core_audio_utility::GetDeviceName(audio_device.Get());
EXPECT_TRUE(device_name.IsValid());
// Use unique ID as input to GetFriendlyName() and compare the result
// with the already obtained friendly name for the default capture device.
std::string friendly_name = core_audio_utility::GetFriendlyName(
device_name.unique_id, eCapture, eConsole);
EXPECT_EQ(friendly_name, device_name.device_name);
// Same test as above but for playback.
audio_device = core_audio_utility::CreateDevice(
AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
device_name = core_audio_utility::GetDeviceName(audio_device.Get());
friendly_name = core_audio_utility::GetFriendlyName(device_name.unique_id,
eRender, eConsole);
EXPECT_EQ(friendly_name, device_name.device_name);
}
TEST_F(CoreAudioUtilityWinTest, GetInputDeviceNames) {
ABORT_TEST_IF_NOT(DevicesAvailable());
webrtc::AudioDeviceNames device_names;
EXPECT_TRUE(core_audio_utility::GetInputDeviceNames(&device_names));
// Number of elements in the list should be two more than the number of
// active devices since we always add default and default communication
// devices on index 0 and 1.
EXPECT_EQ(static_cast<int>(device_names.size()),
2 + core_audio_utility::NumberOfActiveDevices(eCapture));
}
TEST_F(CoreAudioUtilityWinTest, GetOutputDeviceNames) {
ABORT_TEST_IF_NOT(DevicesAvailable());
webrtc::AudioDeviceNames device_names;
EXPECT_TRUE(core_audio_utility::GetOutputDeviceNames(&device_names));
// Number of elements in the list should be two more than the number of
// active devices since we always add default and default communication
// devices on index 0 and 1.
EXPECT_EQ(static_cast<int>(device_names.size()),
2 + core_audio_utility::NumberOfActiveDevices(eRender));
}
TEST_F(CoreAudioUtilityWinTest, CreateClient) {
ABORT_TEST_IF_NOT(DevicesAvailable());
EDataFlow data[] = {eRender, eCapture};
// Obtain reference to an IAudioClient interface for a default audio endpoint
// device specified by two different data flows and the |eConsole| role.
for (size_t i = 0; i < arraysize(data); ++i) {
ComPtr<IAudioClient> client = core_audio_utility::CreateClient(
AudioDeviceName::kDefaultDeviceId, data[i], eConsole);
EXPECT_TRUE(client.Get());
}
}
// TODO(henrik): enable when support for Windows version querying is added.
TEST_F(CoreAudioUtilityWinTest, DISABLED_CreateClient2) {
ABORT_TEST_IF_NOT(DevicesAvailable());
EDataFlow data[] = {eRender, eCapture};
// Obtain reference to an IAudioClient2 interface for a default audio endpoint
// device specified by two different data flows and the |eConsole| role.
for (size_t i = 0; i < arraysize(data); ++i) {
ComPtr<IAudioClient2> client = core_audio_utility::CreateClient2(
AudioDeviceName::kDefaultDeviceId, data[i], eConsole);
EXPECT_TRUE(client.Get());
}
}
TEST_F(CoreAudioUtilityWinTest, SetClientProperties) {
ABORT_TEST_IF_NOT(DevicesAvailable());
ComPtr<IAudioClient2> client = core_audio_utility::CreateClient2(
AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
EXPECT_TRUE(client.Get());
EXPECT_TRUE(SUCCEEDED(core_audio_utility::SetClientProperties(client.Get())));
}
TEST_F(CoreAudioUtilityWinTest, GetSharedModeMixFormat) {
ABORT_TEST_IF_NOT(DevicesAvailable());
ComPtr<IAudioClient> client = core_audio_utility::CreateClient(
AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
EXPECT_TRUE(client.Get());
// Perform a simple sanity test of the acquired format structure.
WAVEFORMATPCMEX format;
EXPECT_TRUE(SUCCEEDED(
core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
EXPECT_GE(format.Format.nChannels, 1);
EXPECT_GE(format.Format.nSamplesPerSec, 8000u);
EXPECT_GE(format.Format.wBitsPerSample, 16);
EXPECT_GE(format.Samples.wValidBitsPerSample, 16);
EXPECT_EQ(format.Format.wFormatTag, WAVE_FORMAT_EXTENSIBLE);
}
TEST_F(CoreAudioUtilityWinTest, IsFormatSupported) {
ABORT_TEST_IF_NOT(DevicesAvailable());
// Create a default render client.
ComPtr<IAudioClient> client = core_audio_utility::CreateClient(
AudioDeviceName::kDefaultDeviceId, eRender, eConsole);
EXPECT_TRUE(client.Get());
// Get the default, shared mode, mixing format.
WAVEFORMATEXTENSIBLE format;
EXPECT_TRUE(SUCCEEDED(
core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
// In shared mode, the audio engine always supports the mix format.
EXPECT_TRUE(core_audio_utility::IsFormatSupported(
client.Get(), AUDCLNT_SHAREMODE_SHARED, &format));
// Use an invalid format and verify that it is not supported.
format.Format.nSamplesPerSec += 1;
EXPECT_FALSE(core_audio_utility::IsFormatSupported(
client.Get(), AUDCLNT_SHAREMODE_SHARED, &format));
}
TEST_F(CoreAudioUtilityWinTest, GetDevicePeriod) {
ABORT_TEST_IF_NOT(DevicesAvailable());
EDataFlow data[] = {eRender, eCapture};
// Verify that the device periods are valid for the default render and
// capture devices.
for (size_t i = 0; i < arraysize(data); ++i) {
ComPtr<IAudioClient> client;
REFERENCE_TIME shared_time_period = 0;
REFERENCE_TIME exclusive_time_period = 0;
client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
data[i], eConsole);
EXPECT_TRUE(client.Get());
EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetDevicePeriod(
client.Get(), AUDCLNT_SHAREMODE_SHARED, &shared_time_period)));
EXPECT_GT(shared_time_period, 0);
EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetDevicePeriod(
client.Get(), AUDCLNT_SHAREMODE_EXCLUSIVE, &exclusive_time_period)));
EXPECT_GT(exclusive_time_period, 0);
EXPECT_LE(exclusive_time_period, shared_time_period);
}
}
TEST_F(CoreAudioUtilityWinTest, GetPreferredAudioParameters) {
ABORT_TEST_IF_NOT(DevicesAvailable());
EDataFlow data[] = {eRender, eCapture};
// Verify that the preferred audio parameters are OK for the default render
// and capture devices.
for (size_t i = 0; i < arraysize(data); ++i) {
webrtc::AudioParameters params;
EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetPreferredAudioParameters(
AudioDeviceName::kDefaultDeviceId, data[i] == eRender, &params)));
EXPECT_TRUE(params.is_valid());
EXPECT_TRUE(params.is_complete());
}
// Verify that the preferred audio parameters are OK for the default
// communication devices.
for (size_t i = 0; i < arraysize(data); ++i) {
webrtc::AudioParameters params;
EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetPreferredAudioParameters(
AudioDeviceName::kDefaultCommunicationsDeviceId, data[i] == eRender,
&params)));
EXPECT_TRUE(params.is_valid());
EXPECT_TRUE(params.is_complete());
}
}
TEST_F(CoreAudioUtilityWinTest, SharedModeInitialize) {
ABORT_TEST_IF_NOT(DevicesAvailable());
ComPtr<IAudioClient> client;
client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
eRender, eConsole);
EXPECT_TRUE(client.Get());
WAVEFORMATPCMEX format;
EXPECT_TRUE(SUCCEEDED(
core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
// Perform a shared-mode initialization without event-driven buffer handling.
uint32_t endpoint_buffer_size = 0;
HRESULT hr = core_audio_utility::SharedModeInitialize(
client.Get(), &format, nullptr, &endpoint_buffer_size);
EXPECT_TRUE(SUCCEEDED(hr));
EXPECT_GT(endpoint_buffer_size, 0u);
// It is only possible to create a client once.
hr = core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr,
&endpoint_buffer_size);
EXPECT_FALSE(SUCCEEDED(hr));
EXPECT_EQ(hr, AUDCLNT_E_ALREADY_INITIALIZED);
// Verify that it is possible to reinitialize the client after releasing it
// and then creating a new client.
client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
eRender, eConsole);
EXPECT_TRUE(client.Get());
hr = core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr,
&endpoint_buffer_size);
EXPECT_TRUE(SUCCEEDED(hr));
EXPECT_GT(endpoint_buffer_size, 0u);
// Use a non-supported format and verify that initialization fails.
// A simple way to emulate an invalid format is to use the shared-mode
// mixing format and modify the preferred sample rate.
client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
eRender, eConsole);
EXPECT_TRUE(client.Get());
format.Format.nSamplesPerSec = format.Format.nSamplesPerSec + 1;
EXPECT_FALSE(core_audio_utility::IsFormatSupported(
client.Get(), AUDCLNT_SHAREMODE_SHARED, &format));
hr = core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr,
&endpoint_buffer_size);
EXPECT_TRUE(FAILED(hr));
EXPECT_EQ(hr, E_INVALIDARG);
// Finally, perform a shared-mode initialization using event-driven buffer
// handling. The event handle will be signaled when an audio buffer is ready
// to be processed by the client (not verified here). The event handle should
// be in the non-signaled state.
ScopedHandle event_handle(::CreateEvent(nullptr, TRUE, FALSE, nullptr));
client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
eRender, eConsole);
EXPECT_TRUE(client.Get());
EXPECT_TRUE(SUCCEEDED(
core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
EXPECT_TRUE(core_audio_utility::IsFormatSupported(
client.Get(), AUDCLNT_SHAREMODE_SHARED, &format));
hr = core_audio_utility::SharedModeInitialize(
client.Get(), &format, event_handle, &endpoint_buffer_size);
EXPECT_TRUE(SUCCEEDED(hr));
EXPECT_GT(endpoint_buffer_size, 0u);
}
TEST_F(CoreAudioUtilityWinTest, CreateRenderAndCaptureClients) {
ABORT_TEST_IF_NOT(DevicesAvailable());
EDataFlow data[] = {eRender, eCapture};
WAVEFORMATPCMEX format;
uint32_t endpoint_buffer_size = 0;
for (size_t i = 0; i < arraysize(data); ++i) {
ComPtr<IAudioClient> client;
ComPtr<IAudioRenderClient> render_client;
ComPtr<IAudioCaptureClient> capture_client;
// Create a default client for the given data-flow direction.
client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
data[i], eConsole);
EXPECT_TRUE(client.Get());
EXPECT_TRUE(SUCCEEDED(
core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
if (data[i] == eRender) {
// It is not possible to create a render client using an unitialized
// client interface.
render_client = core_audio_utility::CreateRenderClient(client.Get());
EXPECT_FALSE(render_client.Get());
// Do a proper initialization and verify that it works this time.
core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr,
&endpoint_buffer_size);
render_client = core_audio_utility::CreateRenderClient(client.Get());
EXPECT_TRUE(render_client.Get());
EXPECT_GT(endpoint_buffer_size, 0u);
} else if (data[i] == eCapture) {
// It is not possible to create a capture client using an unitialized
// client interface.
capture_client = core_audio_utility::CreateCaptureClient(client.Get());
EXPECT_FALSE(capture_client.Get());
// Do a proper initialization and verify that it works this time.
core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr,
&endpoint_buffer_size);
capture_client = core_audio_utility::CreateCaptureClient(client.Get());
EXPECT_TRUE(capture_client.Get());
EXPECT_GT(endpoint_buffer_size, 0u);
}
}
}
TEST_F(CoreAudioUtilityWinTest, CreateAudioClock) {
ABORT_TEST_IF_NOT(DevicesAvailable());
EDataFlow data[] = {eRender, eCapture};
WAVEFORMATPCMEX format;
uint32_t endpoint_buffer_size = 0;
for (size_t i = 0; i < arraysize(data); ++i) {
ComPtr<IAudioClient> client;
ComPtr<IAudioClock> audio_clock;
// Create a default client for the given data-flow direction.
client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
data[i], eConsole);
EXPECT_TRUE(client.Get());
EXPECT_TRUE(SUCCEEDED(
core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
// It is not possible to create an audio clock using an unitialized client
// interface.
audio_clock = core_audio_utility::CreateAudioClock(client.Get());
EXPECT_FALSE(audio_clock.Get());
// Do a proper initialization and verify that it works this time.
core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr,
&endpoint_buffer_size);
audio_clock = core_audio_utility::CreateAudioClock(client.Get());
EXPECT_TRUE(audio_clock.Get());
EXPECT_GT(endpoint_buffer_size, 0u);
// Use the audio clock and verify that querying the device frequency works.
UINT64 frequency = 0;
EXPECT_TRUE(SUCCEEDED(audio_clock->GetFrequency(&frequency)));
EXPECT_GT(frequency, 0u);
}
}
TEST_F(CoreAudioUtilityWinTest, FillRenderEndpointBufferWithSilence) {
ABORT_TEST_IF_NOT(DevicesAvailable());
// Create default clients using the default mixing format for shared mode.
ComPtr<IAudioClient> client(core_audio_utility::CreateClient(
AudioDeviceName::kDefaultDeviceId, eRender, eConsole));
EXPECT_TRUE(client.Get());
WAVEFORMATPCMEX format;
uint32_t endpoint_buffer_size = 0;
EXPECT_TRUE(SUCCEEDED(
core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
core_audio_utility::SharedModeInitialize(client.Get(), &format, nullptr,
&endpoint_buffer_size);
EXPECT_GT(endpoint_buffer_size, 0u);
ComPtr<IAudioRenderClient> render_client(
core_audio_utility::CreateRenderClient(client.Get()));
EXPECT_TRUE(render_client.Get());
// The endpoint audio buffer should not be filled up by default after being
// created.
UINT32 num_queued_frames = 0;
client->GetCurrentPadding(&num_queued_frames);
EXPECT_EQ(num_queued_frames, 0u);
// Fill it up with zeros and verify that the buffer is full.
// It is not possible to verify that the actual data consists of zeros
// since we can't access data that has already been sent to the endpoint
// buffer.
EXPECT_TRUE(core_audio_utility::FillRenderEndpointBufferWithSilence(
client.Get(), render_client.Get()));
client->GetCurrentPadding(&num_queued_frames);
EXPECT_EQ(num_queued_frames, endpoint_buffer_size);
}
} // namespace webrtc_win