Clarifies identification of default communication device in ADM2

ADM2 for Windows is based on the CoreAudioUtil class in Chrome.
CoreAudioUtil in Chrome does not use a special string to identify
the Default Communication device but instead a combination of a
string (Default) and a role parameter [1].

When CoreAudioUtil was ported to WebRTC, I accidentally added an
invalid usage of a unique string to identify the default comm device
and it can lead to errors since there are then two different ways to
identify this device. It will also complicate life when we want to
merge changes from Chrome into WebRTC.

This CL removes usage of AudioDeviceName::kDefaultCommunicationsDeviceId
in WebRTC to reduce the risk of errors.

[1] https://cs.chromium.org/chromium/src/media/audio/win/core_audio_util_win.cc?q=core_audio_ut&sq=package:chromium&g=0&l=464

Excluding flaky bot win_x86_msvc_dbg and using Tbr.

Tbr: thaloun@chromium.org
No-Try: True
Bug: webrtc:11107
Change-Id: Ie6687adbe9c3940a217456e4025967f71d86214c
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/160047
Commit-Queue: Henrik Andreassson <henrika@webrtc.org>
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#29848}
This commit is contained in:
henrika 2019-11-20 14:40:33 +01:00 committed by Commit Bot
parent eec1d04aa9
commit bb55e0bc72
6 changed files with 62 additions and 61 deletions

View File

@ -15,7 +15,6 @@
namespace webrtc {
const char AudioDeviceName::kDefaultDeviceId[] = "default";
const char AudioDeviceName::kDefaultCommunicationsDeviceId[] = "communications";
AudioDeviceName::AudioDeviceName(std::string device_name, std::string unique_id)
: device_name(std::move(device_name)), unique_id(std::move(unique_id)) {}

View File

@ -17,12 +17,13 @@
namespace webrtc {
struct AudioDeviceName {
// Unique ID of the generic default device.
// Represents a default device. Note that, on Windows there are two different
// types of default devices (Default and Default Communication). They can
// either be two different physical devices or be two different roles for one
// single device. Hence, this id must be combined with a "role parameter" on
// Windows to uniquely identify a default device.
static const char kDefaultDeviceId[];
// Unique ID of the generic default communications device.
static const char kDefaultCommunicationsDeviceId[];
AudioDeviceName() = default;
AudioDeviceName(std::string device_name, std::string unique_id);

View File

@ -291,7 +291,7 @@ int CoreAudioBase::DeviceName(int index,
RTC_DLOG(INFO) << "name: " << *name;
if (guid != nullptr) {
*guid = device_names[index].unique_id;
RTC_DLOG(INFO) << "guid: " << guid;
RTC_DLOG(INFO) << "guid: " << *guid;
}
return 0;
}
@ -306,13 +306,16 @@ bool CoreAudioBase::Init() {
// Use an existing |device_id_| and set parameters which are required to
// create an audio client. It is up to the parent class to set |device_id_|.
// TODO(henrika): add unique information about device role since |device_id_|
// does not uniquely identify the device and role if there is only one
// physical device.
std::string device_id = device_id_;
ERole role = eConsole;
if (IsDefaultDevice(device_id)) {
device_id = AudioDeviceName::kDefaultDeviceId;
role = eConsole;
} else if (IsDefaultCommunicationsDevice(device_id)) {
device_id = AudioDeviceName::kDefaultCommunicationsDeviceId;
device_id = AudioDeviceName::kDefaultDeviceId;
role = eCommunications;
} else {
RTC_DLOG(LS_WARNING) << "Not using a default device";

View File

@ -174,6 +174,32 @@ const char* WaveFormatTagToString(WORD format_tag) {
}
}
const char* RoleToString(const ERole role) {
switch (role) {
case eConsole:
return "Console";
case eMultimedia:
return "Multimedia";
case eCommunications:
return "Communications";
default:
return "Unsupported";
}
}
const char* FlowToString(const EDataFlow flow) {
switch (flow) {
case eRender:
return "Render";
case eCapture:
return "Capture";
case eAll:
return "Render or Capture";
default:
return "Unsupported";
}
}
bool LoadAudiosesDll() {
static const wchar_t* const kAudiosesDLL =
L"%WINDIR%\\system32\\audioses.dll";
@ -257,7 +283,9 @@ bool IsDeviceActive(IMMDevice* device) {
ComPtr<IMMDevice> CreateDeviceInternal(const std::string& device_id,
EDataFlow data_flow,
ERole role) {
RTC_DLOG(INFO) << "CreateDeviceInternal: " << role;
RTC_DLOG(INFO) << "CreateDeviceInternal: "
<< "id=" << device_id << ", flow=" << FlowToString(data_flow)
<< ", role=" << RoleToString(role);
ComPtr<IMMDevice> audio_endpoint_device;
// Create the IMMDeviceEnumerator interface.
@ -266,8 +294,7 @@ ComPtr<IMMDevice> CreateDeviceInternal(const std::string& device_id,
return audio_endpoint_device;
_com_error error(S_FALSE);
if (device_id == AudioDeviceName::kDefaultDeviceId ||
device_id == AudioDeviceName::kDefaultCommunicationsDeviceId) {
if (device_id == AudioDeviceName::kDefaultDeviceId) {
error = device_enum->GetDefaultAudioEndpoint(
data_flow, role, audio_endpoint_device.GetAddressOf());
if (FAILED(error.Error())) {
@ -1055,31 +1082,6 @@ HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3,
return error.Error();
}
HRESULT GetPreferredAudioParameters(const std::string& device_id,
bool is_output_device,
AudioParameters* params) {
RTC_DLOG(INFO) << "GetPreferredAudioParameters: " << is_output_device;
EDataFlow data_flow = is_output_device ? eRender : eCapture;
ComPtr<IMMDevice> device;
if (device_id == AudioDeviceName::kDefaultCommunicationsDeviceId) {
device = CreateDeviceInternal(AudioDeviceName::kDefaultDeviceId, data_flow,
eCommunications);
} else {
// If |device_id| equals AudioDeviceName::kDefaultDeviceId, a default
// device will be created.
device = CreateDeviceInternal(device_id, data_flow, eConsole);
}
if (!device.Get()) {
return E_FAIL;
}
ComPtr<IAudioClient> client(CreateClientInternal(device.Get()));
if (!client.Get())
return E_FAIL;
return GetPreferredAudioParametersInternal(client.Get(), params, -1);
}
HRESULT GetPreferredAudioParameters(IAudioClient* client,
AudioParameters* params) {
RTC_DLOG(INFO) << "GetPreferredAudioParameters";

View File

@ -508,14 +508,11 @@ HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3,
uint32_t* min_period_in_frames,
uint32_t* max_period_in_frames);
// Get the preferred audio parameters for the given |device_id| or |client|
// corresponding to the stream format that the audio engine uses for its
// internal processing of shared-mode streams. The acquired values should only
// be utilized for shared mode streamed since there are no preferred settings
// for an exclusive mode stream.
HRESULT GetPreferredAudioParameters(const std::string& device_id,
bool is_output_device,
webrtc::AudioParameters* params);
// Get the preferred audio parameters for the given |client| corresponding to
// the stream format that the audio engine uses for its internal processing of
// shared-mode streams. The acquired values should only be utilized for shared
// mode streamed since there are no preferred settings for an exclusive mode
// stream.
HRESULT GetPreferredAudioParameters(IAudioClient* client,
webrtc::AudioParameters* params);
// As above but override the preferred sample rate and use |sample_rate|

View File

@ -546,8 +546,8 @@ TEST_F(CoreAudioUtilityWinTest, GetDevicePeriod) {
// Verify that the device periods are valid for the default render and
// capture devices.
ComPtr<IAudioClient> client;
for (size_t i = 0; i < arraysize(data_flow); ++i) {
ComPtr<IAudioClient> client;
REFERENCE_TIME shared_time_period = 0;
REFERENCE_TIME exclusive_time_period = 0;
client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
@ -566,25 +566,24 @@ TEST_F(CoreAudioUtilityWinTest, GetDevicePeriod) {
TEST_F(CoreAudioUtilityWinTest, GetPreferredAudioParameters) {
ABORT_TEST_IF_NOT(DevicesAvailable());
EDataFlow data_flow[] = {eRender, eCapture};
struct {
EDataFlow flow;
ERole role;
} data[] = {{eRender, eConsole},
{eRender, eCommunications},
{eCapture, eConsole},
{eCapture, eCommunications}};
// Verify that the preferred audio parameters are OK for the default render
// and capture devices.
for (size_t i = 0; i < arraysize(data_flow); ++i) {
webrtc::AudioParameters params;
// Verify that the preferred audio parameters are OK for all flow/role
// combinations above.
ComPtr<IAudioClient> client;
webrtc::AudioParameters params;
for (size_t i = 0; i < arraysize(data); ++i) {
client = core_audio_utility::CreateClient(AudioDeviceName::kDefaultDeviceId,
data[i].flow, data[i].role);
EXPECT_TRUE(client.Get());
EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetPreferredAudioParameters(
AudioDeviceName::kDefaultDeviceId, data_flow[i] == eRender, &params)));
EXPECT_TRUE(params.is_valid());
EXPECT_TRUE(params.is_complete());
}
// Verify that the preferred audio parameters are OK for the default
// communication devices.
for (size_t i = 0; i < arraysize(data_flow); ++i) {
webrtc::AudioParameters params;
EXPECT_TRUE(SUCCEEDED(core_audio_utility::GetPreferredAudioParameters(
AudioDeviceName::kDefaultCommunicationsDeviceId,
data_flow[i] == eRender, &params)));
client.Get(), &params)));
EXPECT_TRUE(params.is_valid());
EXPECT_TRUE(params.is_complete());
}